-
Notifications
You must be signed in to change notification settings - Fork 88
Open
Description
~> pip list (cuda128)
Package Version
------------------------ ------------
filelock 3.20.0
fsspec 2025.10.0
Jinja2 3.1.6
MarkupSafe 3.0.3
mpmath 1.3.0
networkx 3.6
numpy 2.3.5
nvidia-cublas-cu12 12.8.3.14
nvidia-cuda-cupti-cu12 12.8.57
nvidia-cuda-nvrtc-cu12 12.8.61
nvidia-cuda-runtime-cu12 12.8.57
nvidia-cudnn-cu12 9.7.1.26
nvidia-cufft-cu12 11.3.3.41
nvidia-cufile-cu12 1.13.0.11
nvidia-curand-cu12 10.3.9.55
nvidia-cusolver-cu12 11.7.2.55
nvidia-cusparse-cu12 12.5.7.53
nvidia-cusparselt-cu12 0.6.3
nvidia-nccl-cu12 2.26.2
nvidia-nvjitlink-cu12 12.8.61
nvidia-nvshmem-cu12 3.3.20
nvidia-nvtx-cu12 12.8.55
pillow 12.0.0
pip 25.3
setuptools 80.9.0
sympy 1.14.0
torch 2.7.1+cu128
torchvision 0.22.1+cu128
triton 3.3.1
typing_extensions 4.15.0
wheel 0.45.1
and here's the partial installation output
[1/2] /home/user/miniconda3/envs/cuda128/bin/nvcc --generate-dependencies-with-compile --dependency-output /tmp/pip-install-5tkue1ee/difflogic_9b5c7ad5150f4e5a9519ddfe4c48103e/build/temp.linux-x86_64-cpython-311/difflogic/cuda/difflogic_kernel.o.d -I/home/user/miniconda3/envs/cuda128/lib/python3.11/site-packages/torch/include -I/home/y00852311/miniconda3/envs/cuda128/lib/python3.11/site-packages/torch/include/torch/csrc/api/include -I/home/user/miniconda3/envs/cuda128/include -I/home/user/miniconda3/envs/cuda128/include/python3.11 -c -c /tmp/pip-install-5tkue1ee/difflogic_9b5c7ad5150f4e5a9519ddfe4c48103e/difflogic/cuda/difflogic_kernel.cu -o /tmp/pip-install-5tkue1ee/difflogic_9b5c7ad5150f4e5a9519ddfe4c48103e/build/temp.linux-x86_64-cpython-311/difflogic/cuda/difflogic_kernel.o -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr --compiler-options ''"'"'-fPIC'"'"'' -lineinfo -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE="_gcc"' '-DPYBIND11_STDLIB="_libstdcpp"' '-DPYBIND11_BUILD_ABI="_cxxabi1016"' -DTORCH_EXTENSION_NAME=difflogic_cuda -D_GLIBCXX_USE_CXX11_ABI=1 -gencode=arch=compute_89,code=compute_89 -gencode=arch=compute_89,code=sm_89 -std=c++17
FAILED: /tmp/pip-install-5tkue1ee/difflogic_9b5c7ad5150f4e5a9519ddfe4c48103e/build/temp.linux-x86_64-cpython-311/difflogic/cuda/difflogic_kernel.o
/home/user/miniconda3/envs/cuda128/bin/nvcc --generate-dependencies-with-compile --dependency-output /tmp/pip-install-5tkue1ee/difflogic_9b5c7ad5150f4e5a9519ddfe4c48103e/build/temp.linux-x86_64-cpython-311/difflogic/cuda/difflogic_kernel.o.d -I/home/user/miniconda3/envs/cuda128/lib/python3.11/site-packages/torch/include -I/home/user/miniconda3/envs/cuda128/lib/python3.11/site-packages/torch/include/torch/csrc/api/include -I/home/user/miniconda3/envs/cuda128/include -I/home/user/miniconda3/envs/cuda128/include/python3.11 -c -c /tmp/pip-install-5tkue1ee/difflogic_9b5c7ad5150f4e5a9519ddfe4c48103e/difflogic/cuda/difflogic_kernel.cu -o /tmp/pip-install-5tkue1ee/difflogic_9b5c7ad5150f4e5a9519ddfe4c48103e/build/temp.linux-x86_64-cpython-311/difflogic/cuda/difflogic_kernel.o -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr --compiler-options ''"'"'-fPIC'"'"'' -lineinfo -DTORCH_API_INCLUDE_EXTENSION_H '-DPYBIND11_COMPILER_TYPE="_gcc"' '-DPYBIND11_STDLIB="_libstdcpp"' '-DPYBIND11_BUILD_ABI="_cxxabi1016"' -DTORCH_EXTENSION_NAME=difflogic_cuda -D_GLIBCXX_USE_CXX11_ABI=1 -gencode=arch=compute_89,code=compute_89 -gencode=arch=compute_89,code=sm_89 -std=c++17
/tmp/pip-install-5tkue1ee/difflogic_9b5c7ad5150f4e5a9519ddfe4c48103e/difflogic/cuda/difflogic_kernel.cu(283): error: no suitable conversion function from "const at::DeprecatedTypeProperties" to "c10::ScalarType" exists
[&] { const auto& the_type = x.type(); constexpr const char* at_dispatch_name = "logic_layer_cuda_forward"; at::ScalarType _st = ::detail::scalar_type(the_type); ; switch (_st) { case at::ScalarType::Double: { do { if constexpr (!at::should_include_kernel_dtype( at_dispatch_name, at::ScalarType::Double)) { if (!(false)) { ::c10::detail::torchCheckFail( __func__, "/tmp/pip-install-5tkue1ee/difflogic_9b5c7ad5150f4e5a9519ddfe4c48103e/difflogic/cuda/difflogic_kernel.cu", static_cast<uint32_t>(283), (::c10::detail::torchCheckMsgImpl( "Expected " "false" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "dtype '", toString(at::ScalarType::Double), "' not selected for kernel tag ", at_dispatch_name))); }; } } while (0); using scalar_t [[maybe_unused]] = c10::impl::ScalarTypeToCPPTypeT<at::ScalarType::Double>; return ([&] { logic_layer_cuda_forward_kernel<scalar_t><<<blocks_per_grid, threads_per_block>>>( x.packed_accessor64<scalar_t, 2, torch::RestrictPtrTraits>(), a.packed_accessor64<int64_t, 1, torch::RestrictPtrTraits>(), b.packed_accessor64<int64_t, 1, torch::RestrictPtrTraits>(), w.packed_accessor64<scalar_t, 2, torch::RestrictPtrTraits>(), y.packed_accessor64<scalar_t, 2, torch::RestrictPtrTraits>() ); })(); } case at::ScalarType::Float: { do { if constexpr (!at::should_include_kernel_dtype( at_dispatch_name, at::ScalarType::Float)) { if (!(false)) { ::c10::detail::torchCheckFail( __func__, "/tmp/pip-install-5tkue1ee/difflogic_9b5c7ad5150f4e5a9519ddfe4c48103e/difflogic/cuda/difflogic_kernel.cu", static_cast<uint32_t>(283), (::c10::detail::torchCheckMsgImpl( "Expected " "false" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "dtype '", toString(at::ScalarType::Float), "' not selected for kernel tag ", at_dispatch_name))); }; } } while (0); using scalar_t [[maybe_unused]] = c10::impl::ScalarTypeToCPPTypeT<at::ScalarType::Float>; return ([&] { logic_layer_cuda_forward_kernel<scalar_t><<<blocks_per_grid, threads_per_block>>>( x.packed_accessor64<scalar_t, 2, torch::RestrictPtrTraits>(), a.packed_accessor64<int64_t, 1, torch::RestrictPtrTraits>(), b.packed_accessor64<int64_t, 1, torch::RestrictPtrTraits>(), w.packed_accessor64<scalar_t, 2, torch::RestrictPtrTraits>(), y.packed_accessor64<scalar_t, 2, torch::RestrictPtrTraits>() ); })(); } case at::ScalarType::Half: { do { if constexpr (!at::should_include_kernel_dtype( at_dispatch_name, at::ScalarType::Half)) { if (!(false)) { ::c10::detail::torchCheckFail( __func__, "/tmp/pip-install-5tkue1ee/difflogic_9b5c7ad5150f4e5a9519ddfe4c48103e/difflogic/cuda/difflogic_kernel.cu", static_cast<uint32_t>(283), (::c10::detail::torchCheckMsgImpl( "Expected " "false" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", "dtype '", toString(at::ScalarType::Half), "' not selected for kernel tag ", at_dispatch_name))); }; } } while (0); using scalar_t [[maybe_unused]] = c10::impl::ScalarTypeToCPPTypeT<at::ScalarType::Half>; return ([&] { logic_layer_cuda_forward_kernel<scalar_t><<<blocks_per_grid, threads_per_block>>>( x.packed_accessor64<scalar_t, 2, torch::RestrictPtrTraits>(), a.packed_accessor64<int64_t, 1, torch::RestrictPtrTraits>(), b.packed_accessor64<int64_t, 1, torch::RestrictPtrTraits>(), w.packed_accessor64<scalar_t, 2, torch::RestrictPtrTraits>(), y.packed_accessor64<scalar_t, 2, torch::RestrictPtrTraits>() ); })(); } default: if (!(false)) { ::c10::detail::torchCheckFail( __func__, "/tmp/pip-install-5tkue1ee/difflogic_9b5c7ad5150f4e5a9519ddfe4c48103e/difflogic/cuda/difflogic_kernel.cu", static_cast<uint32_t>(283), (::c10::detail::torchCheckMsgImpl( "Expected " "false" " to be true, but got false. " "(Could this error message be improved? If so, " "please report an enhancement request to PyTorch.)", '"', at_dispatch_name, "\" not implemented for '", toString(_st), "'"))); }; } }()
^
Metadata
Metadata
Assignees
Labels
No labels