From 7536562ffa1a18ca714bb110bf4691b583ccee05 Mon Sep 17 00:00:00 2001 From: Andrew Bailey Date: Sun, 6 Mar 2022 14:38:23 -0500 Subject: [PATCH] Updated examples 2 9 and 10 for Optix 7.0 and updated crates versions --- optix-sys/cuda_wrapper.rs | 17326 +--------------- optix-sys/optix_wrapper.rs | 4 +- optix/Cargo.toml | 25 +- optix/examples/02_pipeline/LaunchParams.h | 2 +- optix/examples/07_obj/main.rs | 4 +- optix/examples/08_texture/main.rs | 6 +- optix/examples/09_shadow/devicePrograms.cu | 51 +- optix/examples/09_shadow/main.rs | 10 +- optix/examples/09_shadow/sample_renderer.rs | 121 +- .../examples/10_softshadow/devicePrograms.cu | 52 +- optix/examples/10_softshadow/main.rs | 10 +- .../examples/10_softshadow/sample_renderer.rs | 126 +- optix/src/math.rs | 9 +- 13 files changed, 278 insertions(+), 17468 deletions(-) diff --git a/optix-sys/cuda_wrapper.rs b/optix-sys/cuda_wrapper.rs index c98cd23..2dda455 100644 --- a/optix-sys/cuda_wrapper.rs +++ b/optix-sys/cuda_wrapper.rs @@ -145,139 +145,79 @@ pub struct CUuuid_st { pub bytes: [::std::os::raw::c_char; 16usize], } pub type CUuuid = CUuuid_st; -#[doc = " CUDA IPC event handle"] #[repr(C)] #[derive(Copy, Clone)] pub struct CUipcEventHandle_st { pub reserved: [::std::os::raw::c_char; 64usize], } pub type CUipcEventHandle = CUipcEventHandle_st; -#[doc = " CUDA IPC mem handle"] #[repr(C)] #[derive(Copy, Clone)] pub struct CUipcMemHandle_st { pub reserved: [::std::os::raw::c_char; 64usize], } pub type CUipcMemHandle = CUipcMemHandle_st; -#[doc = "< Automatically enable peer access between remote devices as needed"] pub const CUipcMem_flags_enum_CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS: CUipcMem_flags_enum = 1; -#[doc = " CUDA Ipc Mem Flags"] pub type CUipcMem_flags_enum = u32; pub use self::CUipcMem_flags_enum as CUipcMem_flags; -#[doc = "< Memory can be accessed by any stream on any device"] pub const CUmemAttach_flags_enum_CU_MEM_ATTACH_GLOBAL: CUmemAttach_flags_enum = 1; -#[doc = "< Memory cannot be accessed by any stream on any device"] pub const CUmemAttach_flags_enum_CU_MEM_ATTACH_HOST: CUmemAttach_flags_enum = 2; -#[doc = "< Memory can only be accessed by a single stream on the associated device"] pub const CUmemAttach_flags_enum_CU_MEM_ATTACH_SINGLE: CUmemAttach_flags_enum = 4; -#[doc = " CUDA Mem Attach Flags"] pub type CUmemAttach_flags_enum = u32; pub use self::CUmemAttach_flags_enum as CUmemAttach_flags; -#[doc = "< Automatic scheduling"] pub const CUctx_flags_enum_CU_CTX_SCHED_AUTO: CUctx_flags_enum = 0; -#[doc = "< Set spin as default scheduling"] pub const CUctx_flags_enum_CU_CTX_SCHED_SPIN: CUctx_flags_enum = 1; -#[doc = "< Set yield as default scheduling"] pub const CUctx_flags_enum_CU_CTX_SCHED_YIELD: CUctx_flags_enum = 2; -#[doc = "< Set blocking synchronization as default scheduling"] pub const CUctx_flags_enum_CU_CTX_SCHED_BLOCKING_SYNC: CUctx_flags_enum = 4; -#[doc = "< Set blocking synchronization as default scheduling"] -#[doc = " \\deprecated This flag was deprecated as of CUDA 4.0"] -#[doc = " and was replaced with ::CU_CTX_SCHED_BLOCKING_SYNC."] pub const CUctx_flags_enum_CU_CTX_BLOCKING_SYNC: CUctx_flags_enum = 4; pub const CUctx_flags_enum_CU_CTX_SCHED_MASK: CUctx_flags_enum = 7; -#[doc = "< Support mapped pinned allocations"] pub const CUctx_flags_enum_CU_CTX_MAP_HOST: CUctx_flags_enum = 8; -#[doc = "< Keep local memory allocation after launch"] pub const CUctx_flags_enum_CU_CTX_LMEM_RESIZE_TO_MAX: CUctx_flags_enum = 16; pub const CUctx_flags_enum_CU_CTX_FLAGS_MASK: CUctx_flags_enum = 31; -#[doc = " Context creation flags"] pub type CUctx_flags_enum = u32; pub use self::CUctx_flags_enum as CUctx_flags; -#[doc = "< Default stream flag"] pub const CUstream_flags_enum_CU_STREAM_DEFAULT: CUstream_flags_enum = 0; -#[doc = "< Stream does not synchronize with stream 0 (the NULL stream)"] pub const CUstream_flags_enum_CU_STREAM_NON_BLOCKING: CUstream_flags_enum = 1; -#[doc = " Stream creation flags"] pub type CUstream_flags_enum = u32; pub use self::CUstream_flags_enum as CUstream_flags; -#[doc = "< Default event flag"] pub const CUevent_flags_enum_CU_EVENT_DEFAULT: CUevent_flags_enum = 0; -#[doc = "< Event uses blocking synchronization"] pub const CUevent_flags_enum_CU_EVENT_BLOCKING_SYNC: CUevent_flags_enum = 1; -#[doc = "< Event will not record timing data"] pub const CUevent_flags_enum_CU_EVENT_DISABLE_TIMING: CUevent_flags_enum = 2; -#[doc = "< Event is suitable for interprocess use. CU_EVENT_DISABLE_TIMING must be set"] pub const CUevent_flags_enum_CU_EVENT_INTERPROCESS: CUevent_flags_enum = 4; -#[doc = " Event creation flags"] pub type CUevent_flags_enum = u32; pub use self::CUevent_flags_enum as CUevent_flags; -#[doc = "< Wait until (int32_t)(*addr - value) >= 0 (or int64_t for 64 bit"] -#[doc = "values). Note this is a cyclic comparison which ignores wraparound."] -#[doc = "(Default behavior.)"] pub const CUstreamWaitValue_flags_enum_CU_STREAM_WAIT_VALUE_GEQ: CUstreamWaitValue_flags_enum = 0; -#[doc = "< Wait until *addr == value."] pub const CUstreamWaitValue_flags_enum_CU_STREAM_WAIT_VALUE_EQ: CUstreamWaitValue_flags_enum = 1; -#[doc = "< Wait until (*addr & value) != 0."] pub const CUstreamWaitValue_flags_enum_CU_STREAM_WAIT_VALUE_AND: CUstreamWaitValue_flags_enum = 2; -#[doc = "< Wait until ~(*addr | value) != 0. Support for this operation can be"] -#[doc = "queried with ::cuDeviceGetAttribute() and"] -#[doc = "::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR."] pub const CUstreamWaitValue_flags_enum_CU_STREAM_WAIT_VALUE_NOR: CUstreamWaitValue_flags_enum = 3; -#[doc = "< Follow the wait operation with a flush of outstanding remote writes. This"] -#[doc = "means that, if a remote write operation is guaranteed to have reached the"] -#[doc = "device before the wait can be satisfied, that write is guaranteed to be"] -#[doc = "visible to downstream device work. The device is permitted to reorder"] -#[doc = "remote writes internally. For example, this flag would be required if"] -#[doc = "two remote writes arrive in a defined order, the wait is satisfied by the"] -#[doc = "second write, and downstream work needs to observe the first write."] -#[doc = "Support for this operation is restricted to selected platforms and can be"] -#[doc = "queried with ::CU_DEVICE_ATTRIBUTE_CAN_USE_WAIT_VALUE_FLUSH."] pub const CUstreamWaitValue_flags_enum_CU_STREAM_WAIT_VALUE_FLUSH: CUstreamWaitValue_flags_enum = 1073741824; -#[doc = " Flags for ::cuStreamWaitValue32 and ::cuStreamWaitValue64"] pub type CUstreamWaitValue_flags_enum = u32; pub use self::CUstreamWaitValue_flags_enum as CUstreamWaitValue_flags; -#[doc = "< Default behavior"] pub const CUstreamWriteValue_flags_enum_CU_STREAM_WRITE_VALUE_DEFAULT: CUstreamWriteValue_flags_enum = 0; -#[doc = "< Permits the write to be reordered with writes which were issued"] -#[doc = "before it, as a performance optimization. Normally,"] -#[doc = "::cuStreamWriteValue32 will provide a memory fence before the"] -#[doc = "write, which has similar semantics to"] -#[doc = "__threadfence_system() but is scoped to the stream"] -#[doc = "rather than a CUDA thread."] pub const CUstreamWriteValue_flags_enum_CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER : CUstreamWriteValue_flags_enum = 1 ; -#[doc = " Flags for ::cuStreamWriteValue32"] pub type CUstreamWriteValue_flags_enum = u32; pub use self::CUstreamWriteValue_flags_enum as CUstreamWriteValue_flags; -#[doc = "< Represents a ::cuStreamWaitValue32 operation"] pub const CUstreamBatchMemOpType_enum_CU_STREAM_MEM_OP_WAIT_VALUE_32: CUstreamBatchMemOpType_enum = 1; -#[doc = "< Represents a ::cuStreamWriteValue32 operation"] pub const CUstreamBatchMemOpType_enum_CU_STREAM_MEM_OP_WRITE_VALUE_32: CUstreamBatchMemOpType_enum = 2; -#[doc = "< Represents a ::cuStreamWaitValue64 operation"] pub const CUstreamBatchMemOpType_enum_CU_STREAM_MEM_OP_WAIT_VALUE_64: CUstreamBatchMemOpType_enum = 4; -#[doc = "< Represents a ::cuStreamWriteValue64 operation"] pub const CUstreamBatchMemOpType_enum_CU_STREAM_MEM_OP_WRITE_VALUE_64: CUstreamBatchMemOpType_enum = 5; -#[doc = "< This has the same effect as ::CU_STREAM_WAIT_VALUE_FLUSH, but as a"] -#[doc = "standalone operation."] pub const CUstreamBatchMemOpType_enum_CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES: CUstreamBatchMemOpType_enum = 3; -#[doc = " Operations for ::cuStreamBatchMemOp"] pub type CUstreamBatchMemOpType_enum = u32; pub use self::CUstreamBatchMemOpType_enum as CUstreamBatchMemOpType; -#[doc = " Per-operation parameters for ::cuStreamBatchMemOp"] #[repr(C)] pub struct CUstreamBatchMemOpParams_union { pub operation: __BindgenUnionField, @@ -294,7 +234,7 @@ pub struct CUstreamBatchMemOpParams_union { pub bindgen_union_field: [u64; 6usize], } #[repr(C)] -pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st { pub operation : CUstreamBatchMemOpType , pub address : CUdeviceptr , pub __bindgen_anon_1 : CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st__bindgen_ty_1 , pub flags : :: std :: os :: raw :: c_uint , # [ doc = "< For driver internal use. Initial value is unimportant." ] pub alias : CUdeviceptr , } +pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st { pub operation : CUstreamBatchMemOpType , pub address : CUdeviceptr , pub __bindgen_anon_1 : CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st__bindgen_ty_1 , pub flags : :: std :: os :: raw :: c_uint , pub alias : CUdeviceptr , } #[repr(C)] pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st__bindgen_ty_1 { @@ -303,7 +243,7 @@ pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpWaitValueParams_st__bindg pub bindgen_union_field: u64, } #[repr(C)] -pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st { pub operation : CUstreamBatchMemOpType , pub address : CUdeviceptr , pub __bindgen_anon_1 : CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st__bindgen_ty_1 , pub flags : :: std :: os :: raw :: c_uint , # [ doc = "< For driver internal use. Initial value is unimportant." ] pub alias : CUdeviceptr , } +pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st { pub operation : CUstreamBatchMemOpType , pub address : CUdeviceptr , pub __bindgen_anon_1 : CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st__bindgen_ty_1 , pub flags : :: std :: os :: raw :: c_uint , pub alias : CUdeviceptr , } #[repr(C)] pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpWriteValueParams_st__bindgen_ty_1 { @@ -319,706 +259,357 @@ pub struct CUstreamBatchMemOpParams_union_CUstreamMemOpFlushRemoteWritesParams_s pub flags: ::std::os::raw::c_uint, } pub type CUstreamBatchMemOpParams = CUstreamBatchMemOpParams_union; -#[doc = "< Default behavior"] pub const CUoccupancy_flags_enum_CU_OCCUPANCY_DEFAULT: CUoccupancy_flags_enum = 0; -#[doc = "< Assume global caching is enabled and cannot be automatically turned off"] pub const CUoccupancy_flags_enum_CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE: CUoccupancy_flags_enum = 1; -#[doc = " Occupancy calculator flag"] pub type CUoccupancy_flags_enum = u32; pub use self::CUoccupancy_flags_enum as CUoccupancy_flags; -#[doc = "< Unsigned 8-bit integers"] pub const CUarray_format_enum_CU_AD_FORMAT_UNSIGNED_INT8: CUarray_format_enum = 1; -#[doc = "< Unsigned 16-bit integers"] pub const CUarray_format_enum_CU_AD_FORMAT_UNSIGNED_INT16: CUarray_format_enum = 2; -#[doc = "< Unsigned 32-bit integers"] pub const CUarray_format_enum_CU_AD_FORMAT_UNSIGNED_INT32: CUarray_format_enum = 3; -#[doc = "< Signed 8-bit integers"] pub const CUarray_format_enum_CU_AD_FORMAT_SIGNED_INT8: CUarray_format_enum = 8; -#[doc = "< Signed 16-bit integers"] pub const CUarray_format_enum_CU_AD_FORMAT_SIGNED_INT16: CUarray_format_enum = 9; -#[doc = "< Signed 32-bit integers"] pub const CUarray_format_enum_CU_AD_FORMAT_SIGNED_INT32: CUarray_format_enum = 10; -#[doc = "< 16-bit floating point"] pub const CUarray_format_enum_CU_AD_FORMAT_HALF: CUarray_format_enum = 16; -#[doc = "< 32-bit floating point"] pub const CUarray_format_enum_CU_AD_FORMAT_FLOAT: CUarray_format_enum = 32; -#[doc = " Array formats"] pub type CUarray_format_enum = u32; pub use self::CUarray_format_enum as CUarray_format; -#[doc = "< Wrapping address mode"] pub const CUaddress_mode_enum_CU_TR_ADDRESS_MODE_WRAP: CUaddress_mode_enum = 0; -#[doc = "< Clamp to edge address mode"] pub const CUaddress_mode_enum_CU_TR_ADDRESS_MODE_CLAMP: CUaddress_mode_enum = 1; -#[doc = "< Mirror address mode"] pub const CUaddress_mode_enum_CU_TR_ADDRESS_MODE_MIRROR: CUaddress_mode_enum = 2; -#[doc = "< Border address mode"] pub const CUaddress_mode_enum_CU_TR_ADDRESS_MODE_BORDER: CUaddress_mode_enum = 3; -#[doc = " Texture reference addressing modes"] pub type CUaddress_mode_enum = u32; pub use self::CUaddress_mode_enum as CUaddress_mode; -#[doc = "< Point filter mode"] pub const CUfilter_mode_enum_CU_TR_FILTER_MODE_POINT: CUfilter_mode_enum = 0; -#[doc = "< Linear filter mode"] pub const CUfilter_mode_enum_CU_TR_FILTER_MODE_LINEAR: CUfilter_mode_enum = 1; -#[doc = " Texture reference filtering modes"] pub type CUfilter_mode_enum = u32; pub use self::CUfilter_mode_enum as CUfilter_mode; -#[doc = "< Maximum number of threads per block"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK: CUdevice_attribute_enum = 1; -#[doc = "< Maximum block dimension X"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X: CUdevice_attribute_enum = 2; -#[doc = "< Maximum block dimension Y"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y: CUdevice_attribute_enum = 3; -#[doc = "< Maximum block dimension Z"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z: CUdevice_attribute_enum = 4; -#[doc = "< Maximum grid dimension X"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X: CUdevice_attribute_enum = 5; -#[doc = "< Maximum grid dimension Y"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y: CUdevice_attribute_enum = 6; -#[doc = "< Maximum grid dimension Z"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z: CUdevice_attribute_enum = 7; -#[doc = "< Maximum shared memory available per block in bytes"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK : CUdevice_attribute_enum = 8 ; -#[doc = "< Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK : CUdevice_attribute_enum = 8 ; -#[doc = "< Memory available on device for __constant__ variables in a CUDA C kernel in bytes"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY: CUdevice_attribute_enum = 9; -#[doc = "< Warp size in threads"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_WARP_SIZE: CUdevice_attribute_enum = 10; -#[doc = "< Maximum pitch in bytes allowed by memory copies"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAX_PITCH: CUdevice_attribute_enum = 11; -#[doc = "< Maximum number of 32-bit registers available per block"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK : CUdevice_attribute_enum = 12 ; -#[doc = "< Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK: CUdevice_attribute_enum = 12; -#[doc = "< Typical clock frequency in kilohertz"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_CLOCK_RATE: CUdevice_attribute_enum = 13; -#[doc = "< Alignment requirement for textures"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT: CUdevice_attribute_enum = 14; -#[doc = "< Device can possibly copy memory and execute a kernel concurrently. Deprecated. Use instead CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT."] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_GPU_OVERLAP: CUdevice_attribute_enum = 15; -#[doc = "< Number of multiprocessors on device"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT: CUdevice_attribute_enum = 16; -#[doc = "< Specifies whether there is a run time limit on kernels"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT: CUdevice_attribute_enum = 17; -#[doc = "< Device is integrated with host memory"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_INTEGRATED: CUdevice_attribute_enum = 18; -#[doc = "< Device can map host memory into CUDA address space"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY: CUdevice_attribute_enum = 19; -#[doc = "< Compute mode (See ::CUcomputemode for details)"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_COMPUTE_MODE: CUdevice_attribute_enum = 20; -#[doc = "< Maximum 1D texture width"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH : CUdevice_attribute_enum = 21 ; -#[doc = "< Maximum 2D texture width"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH : CUdevice_attribute_enum = 22 ; -#[doc = "< Maximum 2D texture height"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT : CUdevice_attribute_enum = 23 ; -#[doc = "< Maximum 3D texture width"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH : CUdevice_attribute_enum = 24 ; -#[doc = "< Maximum 3D texture height"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT : CUdevice_attribute_enum = 25 ; -#[doc = "< Maximum 3D texture depth"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH : CUdevice_attribute_enum = 26 ; -#[doc = "< Maximum 2D layered texture width"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH : CUdevice_attribute_enum = 27 ; -#[doc = "< Maximum 2D layered texture height"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT : CUdevice_attribute_enum = 28 ; -#[doc = "< Maximum layers in a 2D layered texture"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS : CUdevice_attribute_enum = 29 ; -#[doc = "< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH : CUdevice_attribute_enum = 27 ; -#[doc = "< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT : CUdevice_attribute_enum = 28 ; -#[doc = "< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES : CUdevice_attribute_enum = 29 ; -#[doc = "< Alignment requirement for surfaces"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT: CUdevice_attribute_enum = 30; -#[doc = "< Device can possibly execute multiple kernels concurrently"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS: CUdevice_attribute_enum = 31; -#[doc = "< Device has ECC support enabled"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_ECC_ENABLED: CUdevice_attribute_enum = 32; -#[doc = "< PCI bus ID of the device"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_PCI_BUS_ID: CUdevice_attribute_enum = 33; -#[doc = "< PCI device ID of the device"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID: CUdevice_attribute_enum = 34; -#[doc = "< Device is using TCC driver model"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_TCC_DRIVER: CUdevice_attribute_enum = 35; -#[doc = "< Peak memory clock frequency in kilohertz"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE: CUdevice_attribute_enum = 36; -#[doc = "< Global memory bus width in bits"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH : CUdevice_attribute_enum = 37 ; -#[doc = "< Size of L2 cache in bytes"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE: CUdevice_attribute_enum = 38; -#[doc = "< Maximum resident threads per multiprocessor"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR : CUdevice_attribute_enum = 39 ; -#[doc = "< Number of asynchronous engines"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT: CUdevice_attribute_enum = 40; -#[doc = "< Device shares a unified address space with the host"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING: CUdevice_attribute_enum = 41; -#[doc = "< Maximum 1D layered texture width"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH : CUdevice_attribute_enum = 42 ; -#[doc = "< Maximum layers in a 1D layered texture"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS : CUdevice_attribute_enum = 43 ; -#[doc = "< Deprecated, do not use."] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER: CUdevice_attribute_enum = 44; -#[doc = "< Maximum 2D texture width if CUDA_ARRAY3D_TEXTURE_GATHER is set"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH : CUdevice_attribute_enum = 45 ; -#[doc = "< Maximum 2D texture height if CUDA_ARRAY3D_TEXTURE_GATHER is set"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT : CUdevice_attribute_enum = 46 ; -#[doc = "< Alternate maximum 3D texture width"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE : CUdevice_attribute_enum = 47 ; -#[doc = "< Alternate maximum 3D texture height"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE : CUdevice_attribute_enum = 48 ; -#[doc = "< Alternate maximum 3D texture depth"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE : CUdevice_attribute_enum = 49 ; -#[doc = "< PCI domain ID of the device"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID: CUdevice_attribute_enum = 50; -#[doc = "< Pitch alignment requirement for textures"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT : CUdevice_attribute_enum = 51 ; -#[doc = "< Maximum cubemap texture width/height"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH : CUdevice_attribute_enum = 52 ; -#[doc = "< Maximum cubemap layered texture width/height"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH : CUdevice_attribute_enum = 53 ; -#[doc = "< Maximum layers in a cubemap layered texture"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS : CUdevice_attribute_enum = 54 ; -#[doc = "< Maximum 1D surface width"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH : CUdevice_attribute_enum = 55 ; -#[doc = "< Maximum 2D surface width"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH : CUdevice_attribute_enum = 56 ; -#[doc = "< Maximum 2D surface height"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT : CUdevice_attribute_enum = 57 ; -#[doc = "< Maximum 3D surface width"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH : CUdevice_attribute_enum = 58 ; -#[doc = "< Maximum 3D surface height"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT : CUdevice_attribute_enum = 59 ; -#[doc = "< Maximum 3D surface depth"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH : CUdevice_attribute_enum = 60 ; -#[doc = "< Maximum 1D layered surface width"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH : CUdevice_attribute_enum = 61 ; -#[doc = "< Maximum layers in a 1D layered surface"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS : CUdevice_attribute_enum = 62 ; -#[doc = "< Maximum 2D layered surface width"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH : CUdevice_attribute_enum = 63 ; -#[doc = "< Maximum 2D layered surface height"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT : CUdevice_attribute_enum = 64 ; -#[doc = "< Maximum layers in a 2D layered surface"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS : CUdevice_attribute_enum = 65 ; -#[doc = "< Maximum cubemap surface width"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH : CUdevice_attribute_enum = 66 ; -#[doc = "< Maximum cubemap layered surface width"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH : CUdevice_attribute_enum = 67 ; -#[doc = "< Maximum layers in a cubemap layered surface"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS : CUdevice_attribute_enum = 68 ; -#[doc = "< Maximum 1D linear texture width"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH : CUdevice_attribute_enum = 69 ; -#[doc = "< Maximum 2D linear texture width"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH : CUdevice_attribute_enum = 70 ; -#[doc = "< Maximum 2D linear texture height"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT : CUdevice_attribute_enum = 71 ; -#[doc = "< Maximum 2D linear texture pitch in bytes"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH : CUdevice_attribute_enum = 72 ; -#[doc = "< Maximum mipmapped 2D texture width"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH : CUdevice_attribute_enum = 73 ; -#[doc = "< Maximum mipmapped 2D texture height"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT : CUdevice_attribute_enum = 74 ; -#[doc = "< Major compute capability version number"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR : CUdevice_attribute_enum = 75 ; -#[doc = "< Minor compute capability version number"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR : CUdevice_attribute_enum = 76 ; -#[doc = "< Maximum mipmapped 1D texture width"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH : CUdevice_attribute_enum = 77 ; -#[doc = "< Device supports stream priorities"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED : CUdevice_attribute_enum = 78 ; -#[doc = "< Device supports caching globals in L1"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED : CUdevice_attribute_enum = 79 ; -#[doc = "< Device supports caching locals in L1"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED : CUdevice_attribute_enum = 80 ; -#[doc = "< Maximum shared memory available per multiprocessor in bytes"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR : CUdevice_attribute_enum = 81 ; -#[doc = "< Maximum number of 32-bit registers available per multiprocessor"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR : CUdevice_attribute_enum = 82 ; -#[doc = "< Device can allocate managed memory on this system"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY: CUdevice_attribute_enum = 83; -#[doc = "< Device is on a multi-GPU board"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD: CUdevice_attribute_enum = 84; -#[doc = "< Unique id for a group of devices on the same multi-GPU board"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID : CUdevice_attribute_enum = 85 ; -#[doc = "< Link between the device and the host supports native atomic operations (this is a placeholder attribute, and is not supported on any current hardware)"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED : CUdevice_attribute_enum = 86 ; -#[doc = "< Ratio of single precision performance (in floating-point operations per second) to double precision performance"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO : CUdevice_attribute_enum = 87 ; -#[doc = "< Device supports coherently accessing pageable memory without calling cudaHostRegister on it"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS: CUdevice_attribute_enum = 88; -#[doc = "< Device can coherently access managed memory concurrently with the CPU"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS : CUdevice_attribute_enum = 89 ; -#[doc = "< Device supports compute preemption."] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED : CUdevice_attribute_enum = 90 ; -#[doc = "< Device can access host registered memory at the same virtual address as the CPU"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM : CUdevice_attribute_enum = 91 ; -#[doc = "< ::cuStreamBatchMemOp and related APIs are supported."] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS: CUdevice_attribute_enum = 92; -#[doc = "< 64-bit operations are supported in ::cuStreamBatchMemOp and related APIs."] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS : CUdevice_attribute_enum = 93 ; -#[doc = "< ::CU_STREAM_WAIT_VALUE_NOR is supported."] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR : CUdevice_attribute_enum = 94 ; -#[doc = "< Device supports launching cooperative kernels via ::cuLaunchCooperativeKernel"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH: CUdevice_attribute_enum = 95; -#[doc = "< Device can participate in cooperative kernels launched via ::cuLaunchCooperativeKernelMultiDevice"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH : CUdevice_attribute_enum = 96 ; -#[doc = "< Maximum optin shared memory per block"] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN : CUdevice_attribute_enum = 97 ; -#[doc = "< Both the ::CU_STREAM_WAIT_VALUE_FLUSH flag and the ::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the device. See \\ref CUDA_MEMOP for additional details."] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES : CUdevice_attribute_enum = 98 ; -#[doc = "< Device supports host memory registration via ::cudaHostRegister."] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED : CUdevice_attribute_enum = 99 ; -#[doc = "< Device accesses pageable memory via the host's page tables."] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES : CUdevice_attribute_enum = 100 ; -#[doc = "< The host can directly access managed memory on the device without migration."] pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST : CUdevice_attribute_enum = 101 ; pub const CUdevice_attribute_enum_CU_DEVICE_ATTRIBUTE_MAX: CUdevice_attribute_enum = 102; -#[doc = " Device properties"] pub type CUdevice_attribute_enum = u32; pub use self::CUdevice_attribute_enum as CUdevice_attribute; -#[doc = " Legacy device properties"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct CUdevprop_st { - #[doc = "< Maximum number of threads per block"] pub maxThreadsPerBlock: ::std::os::raw::c_int, - #[doc = "< Maximum size of each dimension of a block"] pub maxThreadsDim: [::std::os::raw::c_int; 3usize], - #[doc = "< Maximum size of each dimension of a grid"] pub maxGridSize: [::std::os::raw::c_int; 3usize], - #[doc = "< Shared memory available per block in bytes"] pub sharedMemPerBlock: ::std::os::raw::c_int, - #[doc = "< Constant memory available on device in bytes"] pub totalConstantMemory: ::std::os::raw::c_int, - #[doc = "< Warp size in threads"] pub SIMDWidth: ::std::os::raw::c_int, - #[doc = "< Maximum pitch in bytes allowed by memory copies"] pub memPitch: ::std::os::raw::c_int, - #[doc = "< 32-bit registers available per block"] pub regsPerBlock: ::std::os::raw::c_int, - #[doc = "< Clock frequency in kilohertz"] pub clockRate: ::std::os::raw::c_int, - #[doc = "< Alignment requirement for textures"] pub textureAlign: ::std::os::raw::c_int, } pub type CUdevprop = CUdevprop_st; -#[doc = "< The ::CUcontext on which a pointer was allocated or registered"] pub const CUpointer_attribute_enum_CU_POINTER_ATTRIBUTE_CONTEXT: CUpointer_attribute_enum = 1; -#[doc = "< The ::CUmemorytype describing the physical location of a pointer"] pub const CUpointer_attribute_enum_CU_POINTER_ATTRIBUTE_MEMORY_TYPE: CUpointer_attribute_enum = 2; -#[doc = "< The address at which a pointer's memory may be accessed on the device"] pub const CUpointer_attribute_enum_CU_POINTER_ATTRIBUTE_DEVICE_POINTER: CUpointer_attribute_enum = 3; -#[doc = "< The address at which a pointer's memory may be accessed on the host"] pub const CUpointer_attribute_enum_CU_POINTER_ATTRIBUTE_HOST_POINTER: CUpointer_attribute_enum = 4; -#[doc = "< A pair of tokens for use with the nv-p2p.h Linux kernel interface"] pub const CUpointer_attribute_enum_CU_POINTER_ATTRIBUTE_P2P_TOKENS: CUpointer_attribute_enum = 5; -#[doc = "< Synchronize every synchronous memory operation initiated on this region"] pub const CUpointer_attribute_enum_CU_POINTER_ATTRIBUTE_SYNC_MEMOPS: CUpointer_attribute_enum = 6; -#[doc = "< A process-wide unique ID for an allocated memory region"] pub const CUpointer_attribute_enum_CU_POINTER_ATTRIBUTE_BUFFER_ID: CUpointer_attribute_enum = 7; -#[doc = "< Indicates if the pointer points to managed memory"] pub const CUpointer_attribute_enum_CU_POINTER_ATTRIBUTE_IS_MANAGED: CUpointer_attribute_enum = 8; -#[doc = "< A device ordinal of a device on which a pointer was allocated or registered"] pub const CUpointer_attribute_enum_CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL: CUpointer_attribute_enum = 9; -#[doc = " Pointer information"] pub type CUpointer_attribute_enum = u32; pub use self::CUpointer_attribute_enum as CUpointer_attribute; -#[doc = " The maximum number of threads per block, beyond which a launch of the"] -#[doc = " function would fail. This number depends on both the function and the"] -#[doc = " device on which the function is currently loaded."] pub const CUfunction_attribute_enum_CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK: CUfunction_attribute_enum = 0; -#[doc = " The size in bytes of statically-allocated shared memory required by"] -#[doc = " this function. This does not include dynamically-allocated shared"] -#[doc = " memory requested by the user at runtime."] pub const CUfunction_attribute_enum_CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES: CUfunction_attribute_enum = 1; -#[doc = " The size in bytes of user-allocated constant memory required by this"] -#[doc = " function."] pub const CUfunction_attribute_enum_CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES: CUfunction_attribute_enum = 2; -#[doc = " The size in bytes of local memory used by each thread of this function."] pub const CUfunction_attribute_enum_CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES: CUfunction_attribute_enum = 3; -#[doc = " The number of registers used by each thread of this function."] pub const CUfunction_attribute_enum_CU_FUNC_ATTRIBUTE_NUM_REGS: CUfunction_attribute_enum = 4; -#[doc = " The PTX virtual architecture version for which the function was"] -#[doc = " compiled. This value is the major PTX version * 10 + the minor PTX"] -#[doc = " version, so a PTX version 1.3 function would return the value 13."] -#[doc = " Note that this may return the undefined value of 0 for cubins"] -#[doc = " compiled prior to CUDA 3.0."] pub const CUfunction_attribute_enum_CU_FUNC_ATTRIBUTE_PTX_VERSION: CUfunction_attribute_enum = 5; -#[doc = " The binary architecture version for which the function was compiled."] -#[doc = " This value is the major binary version * 10 + the minor binary version,"] -#[doc = " so a binary version 1.3 function would return the value 13. Note that"] -#[doc = " this will return a value of 10 for legacy cubins that do not have a"] -#[doc = " properly-encoded binary architecture version."] pub const CUfunction_attribute_enum_CU_FUNC_ATTRIBUTE_BINARY_VERSION: CUfunction_attribute_enum = 6; -#[doc = " The attribute to indicate whether the function has been compiled with"] -#[doc = " user specified option \"-Xptxas --dlcm=ca\" set ."] pub const CUfunction_attribute_enum_CU_FUNC_ATTRIBUTE_CACHE_MODE_CA: CUfunction_attribute_enum = 7; -#[doc = " The maximum size in bytes of dynamically-allocated shared memory that can be used by"] -#[doc = " this function. If the user-specified dynamic shared memory size is larger than this"] -#[doc = " value, the launch will fail."] -#[doc = " See ::cuFuncSetAttribute"] pub const CUfunction_attribute_enum_CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES : CUfunction_attribute_enum = 8 ; -#[doc = " On devices where the L1 cache and shared memory use the same hardware resources,"] -#[doc = " this sets the shared memory carveout preference, in percent of the total shared memory."] -#[doc = " Refer to ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR."] -#[doc = " This is only a hint, and the driver can choose a different ratio if required to execute the function."] -#[doc = " See ::cuFuncSetAttribute"] pub const CUfunction_attribute_enum_CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT : CUfunction_attribute_enum = 9 ; -#[doc = " On devices where the L1 cache and shared memory use the same hardware resources,"] -#[doc = " this sets the shared memory carveout preference, in percent of the total shared memory."] -#[doc = " Refer to ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR."] -#[doc = " This is only a hint, and the driver can choose a different ratio if required to execute the function."] -#[doc = " See ::cuFuncSetAttribute"] pub const CUfunction_attribute_enum_CU_FUNC_ATTRIBUTE_MAX: CUfunction_attribute_enum = 10; -#[doc = " Function properties"] pub type CUfunction_attribute_enum = u32; pub use self::CUfunction_attribute_enum as CUfunction_attribute; -#[doc = "< no preference for shared memory or L1 (default)"] pub const CUfunc_cache_enum_CU_FUNC_CACHE_PREFER_NONE: CUfunc_cache_enum = 0; -#[doc = "< prefer larger shared memory and smaller L1 cache"] pub const CUfunc_cache_enum_CU_FUNC_CACHE_PREFER_SHARED: CUfunc_cache_enum = 1; -#[doc = "< prefer larger L1 cache and smaller shared memory"] pub const CUfunc_cache_enum_CU_FUNC_CACHE_PREFER_L1: CUfunc_cache_enum = 2; -#[doc = "< prefer equal sized L1 cache and shared memory"] pub const CUfunc_cache_enum_CU_FUNC_CACHE_PREFER_EQUAL: CUfunc_cache_enum = 3; -#[doc = " Function cache configurations"] pub type CUfunc_cache_enum = u32; pub use self::CUfunc_cache_enum as CUfunc_cache; -#[doc = "< set default shared memory bank size"] pub const CUsharedconfig_enum_CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE: CUsharedconfig_enum = 0; -#[doc = "< set shared memory bank width to four bytes"] pub const CUsharedconfig_enum_CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: CUsharedconfig_enum = 1; -#[doc = "< set shared memory bank width to eight bytes"] pub const CUsharedconfig_enum_CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: CUsharedconfig_enum = 2; -#[doc = " Shared memory configurations"] pub type CUsharedconfig_enum = u32; pub use self::CUsharedconfig_enum as CUsharedconfig; -#[doc = "< No preference for shared memory or L1 (default)"] pub const CUshared_carveout_enum_CU_SHAREDMEM_CARVEOUT_DEFAULT: CUshared_carveout_enum = -1; -#[doc = "< Prefer maximum available shared memory, minimum L1 cache"] pub const CUshared_carveout_enum_CU_SHAREDMEM_CARVEOUT_MAX_SHARED: CUshared_carveout_enum = 100; -#[doc = "< Prefer maximum available L1 cache, minimum shared memory"] pub const CUshared_carveout_enum_CU_SHAREDMEM_CARVEOUT_MAX_L1: CUshared_carveout_enum = 0; -#[doc = " Shared memory carveout configurations. These may be passed to ::cuFuncSetAttribute"] pub type CUshared_carveout_enum = i32; pub use self::CUshared_carveout_enum as CUshared_carveout; -#[doc = "< Host memory"] pub const CUmemorytype_enum_CU_MEMORYTYPE_HOST: CUmemorytype_enum = 1; -#[doc = "< Device memory"] pub const CUmemorytype_enum_CU_MEMORYTYPE_DEVICE: CUmemorytype_enum = 2; -#[doc = "< Array memory"] pub const CUmemorytype_enum_CU_MEMORYTYPE_ARRAY: CUmemorytype_enum = 3; -#[doc = "< Unified device or host memory"] pub const CUmemorytype_enum_CU_MEMORYTYPE_UNIFIED: CUmemorytype_enum = 4; -#[doc = " Memory types"] pub type CUmemorytype_enum = u32; pub use self::CUmemorytype_enum as CUmemorytype; -#[doc = "< Default compute mode (Multiple contexts allowed per device)"] pub const CUcomputemode_enum_CU_COMPUTEMODE_DEFAULT: CUcomputemode_enum = 0; -#[doc = "< Compute-prohibited mode (No contexts can be created on this device at this time)"] pub const CUcomputemode_enum_CU_COMPUTEMODE_PROHIBITED: CUcomputemode_enum = 2; -#[doc = "< Compute-exclusive-process mode (Only one context used by a single process can be present on this device at a time)"] pub const CUcomputemode_enum_CU_COMPUTEMODE_EXCLUSIVE_PROCESS: CUcomputemode_enum = 3; -#[doc = " Compute Modes"] pub type CUcomputemode_enum = u32; pub use self::CUcomputemode_enum as CUcomputemode; -#[doc = "< Data will mostly be read and only occassionally be written to"] pub const CUmem_advise_enum_CU_MEM_ADVISE_SET_READ_MOSTLY: CUmem_advise_enum = 1; -#[doc = "< Undo the effect of ::CU_MEM_ADVISE_SET_READ_MOSTLY"] pub const CUmem_advise_enum_CU_MEM_ADVISE_UNSET_READ_MOSTLY: CUmem_advise_enum = 2; -#[doc = "< Set the preferred location for the data as the specified device"] pub const CUmem_advise_enum_CU_MEM_ADVISE_SET_PREFERRED_LOCATION: CUmem_advise_enum = 3; -#[doc = "< Clear the preferred location for the data"] pub const CUmem_advise_enum_CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION: CUmem_advise_enum = 4; -#[doc = "< Data will be accessed by the specified device, so prevent page faults as much as possible"] pub const CUmem_advise_enum_CU_MEM_ADVISE_SET_ACCESSED_BY: CUmem_advise_enum = 5; -#[doc = "< Let the Unified Memory subsystem decide on the page faulting policy for the specified device"] pub const CUmem_advise_enum_CU_MEM_ADVISE_UNSET_ACCESSED_BY: CUmem_advise_enum = 6; -#[doc = " Memory advise values"] pub type CUmem_advise_enum = u32; pub use self::CUmem_advise_enum as CUmem_advise; -#[doc = "< Whether the range will mostly be read and only occassionally be written to"] pub const CUmem_range_attribute_enum_CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY: CUmem_range_attribute_enum = 1; -#[doc = "< The preferred location of the range"] pub const CUmem_range_attribute_enum_CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION : CUmem_range_attribute_enum = 2 ; -#[doc = "< Memory range has ::CU_MEM_ADVISE_SET_ACCESSED_BY set for specified device"] pub const CUmem_range_attribute_enum_CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY: CUmem_range_attribute_enum = 3; -#[doc = "< The last location to which the range was prefetched"] pub const CUmem_range_attribute_enum_CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION : CUmem_range_attribute_enum = 4 ; pub type CUmem_range_attribute_enum = u32; pub use self::CUmem_range_attribute_enum as CUmem_range_attribute; -#[doc = " Max number of registers that a thread may use.\\n"] -#[doc = " Option type: unsigned int\\n"] -#[doc = " Applies to: compiler only"] pub const CUjit_option_enum_CU_JIT_MAX_REGISTERS: CUjit_option_enum = 0; -#[doc = " IN: Specifies minimum number of threads per block to target compilation"] -#[doc = " for\\n"] -#[doc = " OUT: Returns the number of threads the compiler actually targeted."] -#[doc = " This restricts the resource utilization fo the compiler (e.g. max"] -#[doc = " registers) such that a block with the given number of threads should be"] -#[doc = " able to launch based on register limitations. Note, this option does not"] -#[doc = " currently take into account any other resource limitations, such as"] -#[doc = " shared memory utilization.\\n"] -#[doc = " Cannot be combined with ::CU_JIT_TARGET.\\n"] -#[doc = " Option type: unsigned int\\n"] -#[doc = " Applies to: compiler only"] pub const CUjit_option_enum_CU_JIT_THREADS_PER_BLOCK: CUjit_option_enum = 1; -#[doc = " Overwrites the option value with the total wall clock time, in"] -#[doc = " milliseconds, spent in the compiler and linker\\n"] -#[doc = " Option type: float\\n"] -#[doc = " Applies to: compiler and linker"] pub const CUjit_option_enum_CU_JIT_WALL_TIME: CUjit_option_enum = 2; -#[doc = " Pointer to a buffer in which to print any log messages"] -#[doc = " that are informational in nature (the buffer size is specified via"] -#[doc = " option ::CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES)\\n"] -#[doc = " Option type: char *\\n"] -#[doc = " Applies to: compiler and linker"] pub const CUjit_option_enum_CU_JIT_INFO_LOG_BUFFER: CUjit_option_enum = 3; -#[doc = " IN: Log buffer size in bytes. Log messages will be capped at this size"] -#[doc = " (including null terminator)\\n"] -#[doc = " OUT: Amount of log buffer filled with messages\\n"] -#[doc = " Option type: unsigned int\\n"] -#[doc = " Applies to: compiler and linker"] pub const CUjit_option_enum_CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES: CUjit_option_enum = 4; -#[doc = " Pointer to a buffer in which to print any log messages that"] -#[doc = " reflect errors (the buffer size is specified via option"] -#[doc = " ::CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES)\\n"] -#[doc = " Option type: char *\\n"] -#[doc = " Applies to: compiler and linker"] pub const CUjit_option_enum_CU_JIT_ERROR_LOG_BUFFER: CUjit_option_enum = 5; -#[doc = " IN: Log buffer size in bytes. Log messages will be capped at this size"] -#[doc = " (including null terminator)\\n"] -#[doc = " OUT: Amount of log buffer filled with messages\\n"] -#[doc = " Option type: unsigned int\\n"] -#[doc = " Applies to: compiler and linker"] pub const CUjit_option_enum_CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES: CUjit_option_enum = 6; -#[doc = " Level of optimizations to apply to generated code (0 - 4), with 4"] -#[doc = " being the default and highest level of optimizations.\\n"] -#[doc = " Option type: unsigned int\\n"] -#[doc = " Applies to: compiler only"] pub const CUjit_option_enum_CU_JIT_OPTIMIZATION_LEVEL: CUjit_option_enum = 7; -#[doc = " No option value required. Determines the target based on the current"] -#[doc = " attached context (default)\\n"] -#[doc = " Option type: No option value needed\\n"] -#[doc = " Applies to: compiler and linker"] pub const CUjit_option_enum_CU_JIT_TARGET_FROM_CUCONTEXT: CUjit_option_enum = 8; -#[doc = " Target is chosen based on supplied ::CUjit_target. Cannot be"] -#[doc = " combined with ::CU_JIT_THREADS_PER_BLOCK.\\n"] -#[doc = " Option type: unsigned int for enumerated type ::CUjit_target\\n"] -#[doc = " Applies to: compiler and linker"] pub const CUjit_option_enum_CU_JIT_TARGET: CUjit_option_enum = 9; -#[doc = " Specifies choice of fallback strategy if matching cubin is not found."] -#[doc = " Choice is based on supplied ::CUjit_fallback. This option cannot be"] -#[doc = " used with cuLink* APIs as the linker requires exact matches.\\n"] -#[doc = " Option type: unsigned int for enumerated type ::CUjit_fallback\\n"] -#[doc = " Applies to: compiler only"] pub const CUjit_option_enum_CU_JIT_FALLBACK_STRATEGY: CUjit_option_enum = 10; -#[doc = " Specifies whether to create debug information in output (-g)"] -#[doc = " (0: false, default)\\n"] -#[doc = " Option type: int\\n"] -#[doc = " Applies to: compiler and linker"] pub const CUjit_option_enum_CU_JIT_GENERATE_DEBUG_INFO: CUjit_option_enum = 11; -#[doc = " Generate verbose log messages (0: false, default)\\n"] -#[doc = " Option type: int\\n"] -#[doc = " Applies to: compiler and linker"] pub const CUjit_option_enum_CU_JIT_LOG_VERBOSE: CUjit_option_enum = 12; -#[doc = " Generate line number information (-lineinfo) (0: false, default)\\n"] -#[doc = " Option type: int\\n"] -#[doc = " Applies to: compiler only"] pub const CUjit_option_enum_CU_JIT_GENERATE_LINE_INFO: CUjit_option_enum = 13; -#[doc = " Specifies whether to enable caching explicitly (-dlcm) \\n"] -#[doc = " Choice is based on supplied ::CUjit_cacheMode_enum.\\n"] -#[doc = " Option type: unsigned int for enumerated type ::CUjit_cacheMode_enum\\n"] -#[doc = " Applies to: compiler only"] pub const CUjit_option_enum_CU_JIT_CACHE_MODE: CUjit_option_enum = 14; -#[doc = " The below jit options are used for internal purposes only, in this version of CUDA"] pub const CUjit_option_enum_CU_JIT_NEW_SM3X_OPT: CUjit_option_enum = 15; -#[doc = " The below jit options are used for internal purposes only, in this version of CUDA"] pub const CUjit_option_enum_CU_JIT_FAST_COMPILE: CUjit_option_enum = 16; -#[doc = " Array of device symbol names that will be relocated to the corresponing"] -#[doc = " host addresses stored in ::CU_JIT_GLOBAL_SYMBOL_ADDRESSES.\\n"] -#[doc = " Must contain ::CU_JIT_GLOBAL_SYMBOL_COUNT entries.\\n"] -#[doc = " When loding a device module, driver will relocate all encountered"] -#[doc = " unresolved symbols to the host addresses.\\n"] -#[doc = " It is only allowed to register symbols that correspond to unresolved"] -#[doc = " global variables.\\n"] -#[doc = " It is illegal to register the same device symbol at multiple addresses.\\n"] -#[doc = " Option type: const char **\\n"] -#[doc = " Applies to: dynamic linker only"] pub const CUjit_option_enum_CU_JIT_GLOBAL_SYMBOL_NAMES: CUjit_option_enum = 17; -#[doc = " Array of host addresses that will be used to relocate corresponding"] -#[doc = " device symbols stored in ::CU_JIT_GLOBAL_SYMBOL_NAMES.\\n"] -#[doc = " Must contain ::CU_JIT_GLOBAL_SYMBOL_COUNT entries.\\n"] -#[doc = " Option type: void **\\n"] -#[doc = " Applies to: dynamic linker only"] pub const CUjit_option_enum_CU_JIT_GLOBAL_SYMBOL_ADDRESSES: CUjit_option_enum = 18; -#[doc = " Number of entries in ::CU_JIT_GLOBAL_SYMBOL_NAMES and"] -#[doc = " ::CU_JIT_GLOBAL_SYMBOL_ADDRESSES arrays.\\n"] -#[doc = " Option type: unsigned int\\n"] -#[doc = " Applies to: dynamic linker only"] pub const CUjit_option_enum_CU_JIT_GLOBAL_SYMBOL_COUNT: CUjit_option_enum = 19; -#[doc = " Number of entries in ::CU_JIT_GLOBAL_SYMBOL_NAMES and"] -#[doc = " ::CU_JIT_GLOBAL_SYMBOL_ADDRESSES arrays.\\n"] -#[doc = " Option type: unsigned int\\n"] -#[doc = " Applies to: dynamic linker only"] pub const CUjit_option_enum_CU_JIT_NUM_OPTIONS: CUjit_option_enum = 20; -#[doc = " Online compiler and linker options"] pub type CUjit_option_enum = u32; pub use self::CUjit_option_enum as CUjit_option; -#[doc = "< Compute device class 2.0"] pub const CUjit_target_enum_CU_TARGET_COMPUTE_20: CUjit_target_enum = 20; -#[doc = "< Compute device class 2.1"] pub const CUjit_target_enum_CU_TARGET_COMPUTE_21: CUjit_target_enum = 21; -#[doc = "< Compute device class 3.0"] pub const CUjit_target_enum_CU_TARGET_COMPUTE_30: CUjit_target_enum = 30; -#[doc = "< Compute device class 3.2"] pub const CUjit_target_enum_CU_TARGET_COMPUTE_32: CUjit_target_enum = 32; -#[doc = "< Compute device class 3.5"] pub const CUjit_target_enum_CU_TARGET_COMPUTE_35: CUjit_target_enum = 35; -#[doc = "< Compute device class 3.7"] pub const CUjit_target_enum_CU_TARGET_COMPUTE_37: CUjit_target_enum = 37; -#[doc = "< Compute device class 5.0"] pub const CUjit_target_enum_CU_TARGET_COMPUTE_50: CUjit_target_enum = 50; -#[doc = "< Compute device class 5.2"] pub const CUjit_target_enum_CU_TARGET_COMPUTE_52: CUjit_target_enum = 52; -#[doc = "< Compute device class 5.3"] pub const CUjit_target_enum_CU_TARGET_COMPUTE_53: CUjit_target_enum = 53; -#[doc = "< Compute device class 6.0."] pub const CUjit_target_enum_CU_TARGET_COMPUTE_60: CUjit_target_enum = 60; -#[doc = "< Compute device class 6.1."] pub const CUjit_target_enum_CU_TARGET_COMPUTE_61: CUjit_target_enum = 61; -#[doc = "< Compute device class 6.2."] pub const CUjit_target_enum_CU_TARGET_COMPUTE_62: CUjit_target_enum = 62; -#[doc = "< Compute device class 7.0."] pub const CUjit_target_enum_CU_TARGET_COMPUTE_70: CUjit_target_enum = 70; -#[doc = "< Compute device class 7.2."] pub const CUjit_target_enum_CU_TARGET_COMPUTE_72: CUjit_target_enum = 72; -#[doc = "< Compute device class 7.5."] pub const CUjit_target_enum_CU_TARGET_COMPUTE_75: CUjit_target_enum = 75; -#[doc = " Online compilation targets"] pub type CUjit_target_enum = u32; pub use self::CUjit_target_enum as CUjit_target; -#[doc = "< Prefer to compile ptx if exact binary match not found"] pub const CUjit_fallback_enum_CU_PREFER_PTX: CUjit_fallback_enum = 0; -#[doc = "< Prefer to fall back to compatible binary code if exact match not found"] pub const CUjit_fallback_enum_CU_PREFER_BINARY: CUjit_fallback_enum = 1; -#[doc = " Cubin matching fallback strategies"] pub type CUjit_fallback_enum = u32; pub use self::CUjit_fallback_enum as CUjit_fallback; -#[doc = "< Compile with no -dlcm flag specified"] pub const CUjit_cacheMode_enum_CU_JIT_CACHE_OPTION_NONE: CUjit_cacheMode_enum = 0; -#[doc = "< Compile with L1 cache disabled"] pub const CUjit_cacheMode_enum_CU_JIT_CACHE_OPTION_CG: CUjit_cacheMode_enum = 1; -#[doc = "< Compile with L1 cache enabled"] pub const CUjit_cacheMode_enum_CU_JIT_CACHE_OPTION_CA: CUjit_cacheMode_enum = 2; -#[doc = " Caching modes for dlcm"] pub type CUjit_cacheMode_enum = u32; pub use self::CUjit_cacheMode_enum as CUjit_cacheMode; -#[doc = " Compiled device-class-specific device code\\n"] -#[doc = " Applicable options: none"] pub const CUjitInputType_enum_CU_JIT_INPUT_CUBIN: CUjitInputType_enum = 0; -#[doc = " PTX source code\\n"] -#[doc = " Applicable options: PTX compiler options"] pub const CUjitInputType_enum_CU_JIT_INPUT_PTX: CUjitInputType_enum = 1; -#[doc = " Bundle of multiple cubins and/or PTX of some device code\\n"] -#[doc = " Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY"] pub const CUjitInputType_enum_CU_JIT_INPUT_FATBINARY: CUjitInputType_enum = 2; -#[doc = " Host object with embedded device code\\n"] -#[doc = " Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY"] pub const CUjitInputType_enum_CU_JIT_INPUT_OBJECT: CUjitInputType_enum = 3; -#[doc = " Archive of host objects with embedded device code\\n"] -#[doc = " Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY"] pub const CUjitInputType_enum_CU_JIT_INPUT_LIBRARY: CUjitInputType_enum = 4; -#[doc = " Archive of host objects with embedded device code\\n"] -#[doc = " Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY"] pub const CUjitInputType_enum_CU_JIT_NUM_INPUT_TYPES: CUjitInputType_enum = 5; -#[doc = " Device code formats"] pub type CUjitInputType_enum = u32; pub use self::CUjitInputType_enum as CUjitInputType; #[repr(C)] @@ -1034,157 +625,101 @@ pub const CUgraphicsRegisterFlags_enum_CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY: pub const CUgraphicsRegisterFlags_enum_CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD : CUgraphicsRegisterFlags_enum = 2 ; pub const CUgraphicsRegisterFlags_enum_CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST : CUgraphicsRegisterFlags_enum = 4 ; pub const CUgraphicsRegisterFlags_enum_CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER : CUgraphicsRegisterFlags_enum = 8 ; -#[doc = " Flags to register a graphics resource"] pub type CUgraphicsRegisterFlags_enum = u32; pub use self::CUgraphicsRegisterFlags_enum as CUgraphicsRegisterFlags; pub const CUgraphicsMapResourceFlags_enum_CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE : CUgraphicsMapResourceFlags_enum = 0 ; pub const CUgraphicsMapResourceFlags_enum_CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY : CUgraphicsMapResourceFlags_enum = 1 ; pub const CUgraphicsMapResourceFlags_enum_CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD : CUgraphicsMapResourceFlags_enum = 2 ; -#[doc = " Flags for mapping and unmapping interop resources"] pub type CUgraphicsMapResourceFlags_enum = u32; pub use self::CUgraphicsMapResourceFlags_enum as CUgraphicsMapResourceFlags; -#[doc = "< Positive X face of cubemap"] pub const CUarray_cubemap_face_enum_CU_CUBEMAP_FACE_POSITIVE_X: CUarray_cubemap_face_enum = 0; -#[doc = "< Negative X face of cubemap"] pub const CUarray_cubemap_face_enum_CU_CUBEMAP_FACE_NEGATIVE_X: CUarray_cubemap_face_enum = 1; -#[doc = "< Positive Y face of cubemap"] pub const CUarray_cubemap_face_enum_CU_CUBEMAP_FACE_POSITIVE_Y: CUarray_cubemap_face_enum = 2; -#[doc = "< Negative Y face of cubemap"] pub const CUarray_cubemap_face_enum_CU_CUBEMAP_FACE_NEGATIVE_Y: CUarray_cubemap_face_enum = 3; -#[doc = "< Positive Z face of cubemap"] pub const CUarray_cubemap_face_enum_CU_CUBEMAP_FACE_POSITIVE_Z: CUarray_cubemap_face_enum = 4; -#[doc = "< Negative Z face of cubemap"] pub const CUarray_cubemap_face_enum_CU_CUBEMAP_FACE_NEGATIVE_Z: CUarray_cubemap_face_enum = 5; -#[doc = " Array indices for cube faces"] pub type CUarray_cubemap_face_enum = u32; pub use self::CUarray_cubemap_face_enum as CUarray_cubemap_face; -#[doc = "< GPU thread stack size"] pub const CUlimit_enum_CU_LIMIT_STACK_SIZE: CUlimit_enum = 0; -#[doc = "< GPU printf FIFO size"] pub const CUlimit_enum_CU_LIMIT_PRINTF_FIFO_SIZE: CUlimit_enum = 1; -#[doc = "< GPU malloc heap size"] pub const CUlimit_enum_CU_LIMIT_MALLOC_HEAP_SIZE: CUlimit_enum = 2; -#[doc = "< GPU device runtime launch synchronize depth"] pub const CUlimit_enum_CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH: CUlimit_enum = 3; -#[doc = "< GPU device runtime pending launch count"] pub const CUlimit_enum_CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT: CUlimit_enum = 4; -#[doc = "< A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint"] pub const CUlimit_enum_CU_LIMIT_MAX_L2_FETCH_GRANULARITY: CUlimit_enum = 5; pub const CUlimit_enum_CU_LIMIT_MAX: CUlimit_enum = 6; -#[doc = " Limits"] pub type CUlimit_enum = u32; pub use self::CUlimit_enum as CUlimit; -#[doc = "< Array resoure"] pub const CUresourcetype_enum_CU_RESOURCE_TYPE_ARRAY: CUresourcetype_enum = 0; -#[doc = "< Mipmapped array resource"] pub const CUresourcetype_enum_CU_RESOURCE_TYPE_MIPMAPPED_ARRAY: CUresourcetype_enum = 1; -#[doc = "< Linear resource"] pub const CUresourcetype_enum_CU_RESOURCE_TYPE_LINEAR: CUresourcetype_enum = 2; -#[doc = "< Pitch 2D resource"] pub const CUresourcetype_enum_CU_RESOURCE_TYPE_PITCH2D: CUresourcetype_enum = 3; -#[doc = " Resource types"] pub type CUresourcetype_enum = u32; pub use self::CUresourcetype_enum as CUresourcetype; -#[doc = " CUDA host function"] -#[doc = " \\param userData Argument value passed to the function"] pub type CUhostFn = ::std::option::Option< unsafe extern "C" fn(userData: *mut ::std::os::raw::c_void), >; -#[doc = " GPU kernel node parameters"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct CUDA_KERNEL_NODE_PARAMS_st { - #[doc = "< Kernel to launch"] pub func: CUfunction, - #[doc = "< Width of grid in blocks"] pub gridDimX: ::std::os::raw::c_uint, - #[doc = "< Height of grid in blocks"] pub gridDimY: ::std::os::raw::c_uint, - #[doc = "< Depth of grid in blocks"] pub gridDimZ: ::std::os::raw::c_uint, - #[doc = "< X dimension of each thread block"] pub blockDimX: ::std::os::raw::c_uint, - #[doc = "< Y dimension of each thread block"] pub blockDimY: ::std::os::raw::c_uint, - #[doc = "< Z dimension of each thread block"] pub blockDimZ: ::std::os::raw::c_uint, - #[doc = "< Dynamic shared-memory size per thread block in bytes"] pub sharedMemBytes: ::std::os::raw::c_uint, - #[doc = "< Array of pointers to kernel parameters"] pub kernelParams: *mut *mut ::std::os::raw::c_void, - #[doc = "< Extra options"] pub extra: *mut *mut ::std::os::raw::c_void, } pub type CUDA_KERNEL_NODE_PARAMS = CUDA_KERNEL_NODE_PARAMS_st; -#[doc = " Memset node parameters"] #[repr(C)] pub struct CUDA_MEMSET_NODE_PARAMS_st { - #[doc = "< Destination device pointer"] pub dst: CUdeviceptr, - #[doc = "< Pitch of destination device pointer. Unused if height is 1"] pub pitch: usize, - #[doc = "< Value to be set"] pub value: ::std::os::raw::c_uint, - #[doc = "< Size of each element in bytes. Must be 1, 2, or 4."] pub elementSize: ::std::os::raw::c_uint, - #[doc = "< Width in bytes, of the row"] pub width: usize, - #[doc = "< Number of rows"] pub height: usize, } pub type CUDA_MEMSET_NODE_PARAMS = CUDA_MEMSET_NODE_PARAMS_st; -#[doc = " Host node parameters"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct CUDA_HOST_NODE_PARAMS_st { - #[doc = "< The function to call when the node executes"] pub fn_: CUhostFn, - #[doc = "< Argument to pass to the function"] pub userData: *mut ::std::os::raw::c_void, } pub type CUDA_HOST_NODE_PARAMS = CUDA_HOST_NODE_PARAMS_st; -#[doc = "< GPU kernel node"] pub const CUgraphNodeType_enum_CU_GRAPH_NODE_TYPE_KERNEL: CUgraphNodeType_enum = 0; -#[doc = "< Memcpy node"] pub const CUgraphNodeType_enum_CU_GRAPH_NODE_TYPE_MEMCPY: CUgraphNodeType_enum = 1; -#[doc = "< Memset node"] pub const CUgraphNodeType_enum_CU_GRAPH_NODE_TYPE_MEMSET: CUgraphNodeType_enum = 2; -#[doc = "< Host (executable) node"] pub const CUgraphNodeType_enum_CU_GRAPH_NODE_TYPE_HOST: CUgraphNodeType_enum = 3; -#[doc = "< Node which executes an embedded graph"] pub const CUgraphNodeType_enum_CU_GRAPH_NODE_TYPE_GRAPH: CUgraphNodeType_enum = 4; -#[doc = "< Empty (no-op) node"] pub const CUgraphNodeType_enum_CU_GRAPH_NODE_TYPE_EMPTY: CUgraphNodeType_enum = 5; pub const CUgraphNodeType_enum_CU_GRAPH_NODE_TYPE_COUNT: CUgraphNodeType_enum = 6; -#[doc = " Graph node types"] pub type CUgraphNodeType_enum = u32; pub use self::CUgraphNodeType_enum as CUgraphNodeType; -#[doc = "< Stream is not capturing"] pub const CUstreamCaptureStatus_enum_CU_STREAM_CAPTURE_STATUS_NONE: CUstreamCaptureStatus_enum = 0; -#[doc = "< Stream is actively capturing"] pub const CUstreamCaptureStatus_enum_CU_STREAM_CAPTURE_STATUS_ACTIVE: CUstreamCaptureStatus_enum = 1; -#[doc = "< Stream is part of a capture sequence that"] -#[doc = "has been invalidated, but not terminated"] pub const CUstreamCaptureStatus_enum_CU_STREAM_CAPTURE_STATUS_INVALIDATED: CUstreamCaptureStatus_enum = 2; -#[doc = " Possible stream capture statuses returned by ::cuStreamIsCapturing"] pub type CUstreamCaptureStatus_enum = u32; pub use self::CUstreamCaptureStatus_enum as CUstreamCaptureStatus; pub const CUstreamCaptureMode_enum_CU_STREAM_CAPTURE_MODE_GLOBAL: @@ -1193,305 +728,93 @@ pub const CUstreamCaptureMode_enum_CU_STREAM_CAPTURE_MODE_THREAD_LOCAL: CUstreamCaptureMode_enum = 1; pub const CUstreamCaptureMode_enum_CU_STREAM_CAPTURE_MODE_RELAXED: CUstreamCaptureMode_enum = 2; -#[doc = " Possible modes for stream capture thread interactions. For more details see"] -#[doc = " ::cuStreamBeginCapture and ::cuThreadExchangeStreamCaptureMode"] pub type CUstreamCaptureMode_enum = u32; pub use self::CUstreamCaptureMode_enum as CUstreamCaptureMode; pub mod cudaError_enum { - #[doc = " Error codes"] pub type Type = u32; - #[doc = " The API call returned with no errors. In the case of query calls, this"] - #[doc = " also means that the operation being queried is complete (see"] - #[doc = " ::cuEventQuery() and ::cuStreamQuery())."] pub const CUDA_SUCCESS: Type = 0; - #[doc = " This indicates that one or more of the parameters passed to the API call"] - #[doc = " is not within an acceptable range of values."] pub const CUDA_ERROR_INVALID_VALUE: Type = 1; - #[doc = " The API call failed because it was unable to allocate enough memory to"] - #[doc = " perform the requested operation."] pub const CUDA_ERROR_OUT_OF_MEMORY: Type = 2; - #[doc = " This indicates that the CUDA driver has not been initialized with"] - #[doc = " ::cuInit() or that initialization has failed."] pub const CUDA_ERROR_NOT_INITIALIZED: Type = 3; - #[doc = " This indicates that the CUDA driver is in the process of shutting down."] pub const CUDA_ERROR_DEINITIALIZED: Type = 4; - #[doc = " This indicates profiler is not initialized for this run. This can"] - #[doc = " happen when the application is running with external profiling tools"] - #[doc = " like visual profiler."] pub const CUDA_ERROR_PROFILER_DISABLED: Type = 5; - #[doc = " \\deprecated"] - #[doc = " This error return is deprecated as of CUDA 5.0. It is no longer an error"] - #[doc = " to attempt to enable/disable the profiling via ::cuProfilerStart or"] - #[doc = " ::cuProfilerStop without initialization."] pub const CUDA_ERROR_PROFILER_NOT_INITIALIZED: Type = 6; - #[doc = " \\deprecated"] - #[doc = " This error return is deprecated as of CUDA 5.0. It is no longer an error"] - #[doc = " to call cuProfilerStart() when profiling is already enabled."] pub const CUDA_ERROR_PROFILER_ALREADY_STARTED: Type = 7; - #[doc = " \\deprecated"] - #[doc = " This error return is deprecated as of CUDA 5.0. It is no longer an error"] - #[doc = " to call cuProfilerStop() when profiling is already disabled."] pub const CUDA_ERROR_PROFILER_ALREADY_STOPPED: Type = 8; - #[doc = " This indicates that no CUDA-capable devices were detected by the installed"] - #[doc = " CUDA driver."] pub const CUDA_ERROR_NO_DEVICE: Type = 100; - #[doc = " This indicates that the device ordinal supplied by the user does not"] - #[doc = " correspond to a valid CUDA device."] pub const CUDA_ERROR_INVALID_DEVICE: Type = 101; - #[doc = " This indicates that the device kernel image is invalid. This can also"] - #[doc = " indicate an invalid CUDA module."] pub const CUDA_ERROR_INVALID_IMAGE: Type = 200; - #[doc = " This most frequently indicates that there is no context bound to the"] - #[doc = " current thread. This can also be returned if the context passed to an"] - #[doc = " API call is not a valid handle (such as a context that has had"] - #[doc = " ::cuCtxDestroy() invoked on it). This can also be returned if a user"] - #[doc = " mixes different API versions (i.e. 3010 context with 3020 API calls)."] - #[doc = " See ::cuCtxGetApiVersion() for more details."] pub const CUDA_ERROR_INVALID_CONTEXT: Type = 201; - #[doc = " This indicated that the context being supplied as a parameter to the"] - #[doc = " API call was already the active context."] - #[doc = " \\deprecated"] - #[doc = " This error return is deprecated as of CUDA 3.2. It is no longer an"] - #[doc = " error to attempt to push the active context via ::cuCtxPushCurrent()."] pub const CUDA_ERROR_CONTEXT_ALREADY_CURRENT: Type = 202; - #[doc = " This indicates that a map or register operation has failed."] pub const CUDA_ERROR_MAP_FAILED: Type = 205; - #[doc = " This indicates that an unmap or unregister operation has failed."] pub const CUDA_ERROR_UNMAP_FAILED: Type = 206; - #[doc = " This indicates that the specified array is currently mapped and thus"] - #[doc = " cannot be destroyed."] pub const CUDA_ERROR_ARRAY_IS_MAPPED: Type = 207; - #[doc = " This indicates that the resource is already mapped."] pub const CUDA_ERROR_ALREADY_MAPPED: Type = 208; - #[doc = " This indicates that there is no kernel image available that is suitable"] - #[doc = " for the device. This can occur when a user specifies code generation"] - #[doc = " options for a particular CUDA source file that do not include the"] - #[doc = " corresponding device configuration."] pub const CUDA_ERROR_NO_BINARY_FOR_GPU: Type = 209; - #[doc = " This indicates that a resource has already been acquired."] pub const CUDA_ERROR_ALREADY_ACQUIRED: Type = 210; - #[doc = " This indicates that a resource is not mapped."] pub const CUDA_ERROR_NOT_MAPPED: Type = 211; - #[doc = " This indicates that a mapped resource is not available for access as an"] - #[doc = " array."] pub const CUDA_ERROR_NOT_MAPPED_AS_ARRAY: Type = 212; - #[doc = " This indicates that a mapped resource is not available for access as a"] - #[doc = " pointer."] pub const CUDA_ERROR_NOT_MAPPED_AS_POINTER: Type = 213; - #[doc = " This indicates that an uncorrectable ECC error was detected during"] - #[doc = " execution."] pub const CUDA_ERROR_ECC_UNCORRECTABLE: Type = 214; - #[doc = " This indicates that the ::CUlimit passed to the API call is not"] - #[doc = " supported by the active device."] pub const CUDA_ERROR_UNSUPPORTED_LIMIT: Type = 215; - #[doc = " This indicates that the ::CUcontext passed to the API call can"] - #[doc = " only be bound to a single CPU thread at a time but is already"] - #[doc = " bound to a CPU thread."] pub const CUDA_ERROR_CONTEXT_ALREADY_IN_USE: Type = 216; - #[doc = " This indicates that peer access is not supported across the given"] - #[doc = " devices."] pub const CUDA_ERROR_PEER_ACCESS_UNSUPPORTED: Type = 217; - #[doc = " This indicates that a PTX JIT compilation failed."] pub const CUDA_ERROR_INVALID_PTX: Type = 218; - #[doc = " This indicates an error with OpenGL or DirectX context."] pub const CUDA_ERROR_INVALID_GRAPHICS_CONTEXT: Type = 219; - #[doc = " This indicates that an uncorrectable NVLink error was detected during the"] - #[doc = " execution."] pub const CUDA_ERROR_NVLINK_UNCORRECTABLE: Type = 220; - #[doc = " This indicates that the PTX JIT compiler library was not found."] pub const CUDA_ERROR_JIT_COMPILER_NOT_FOUND: Type = 221; - #[doc = " This indicates that the device kernel source is invalid."] pub const CUDA_ERROR_INVALID_SOURCE: Type = 300; - #[doc = " This indicates that the file specified was not found."] pub const CUDA_ERROR_FILE_NOT_FOUND: Type = 301; - #[doc = " This indicates that a link to a shared object failed to resolve."] pub const CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND: Type = 302; - #[doc = " This indicates that initialization of a shared object failed."] pub const CUDA_ERROR_SHARED_OBJECT_INIT_FAILED: Type = 303; - #[doc = " This indicates that an OS call failed."] pub const CUDA_ERROR_OPERATING_SYSTEM: Type = 304; - #[doc = " This indicates that a resource handle passed to the API call was not"] - #[doc = " valid. Resource handles are opaque types like ::CUstream and ::CUevent."] pub const CUDA_ERROR_INVALID_HANDLE: Type = 400; - #[doc = " This indicates that a resource required by the API call is not in a"] - #[doc = " valid state to perform the requested operation."] pub const CUDA_ERROR_ILLEGAL_STATE: Type = 401; - #[doc = " This indicates that a named symbol was not found. Examples of symbols"] - #[doc = " are global/constant variable names, texture names, and surface names."] pub const CUDA_ERROR_NOT_FOUND: Type = 500; - #[doc = " This indicates that asynchronous operations issued previously have not"] - #[doc = " completed yet. This result is not actually an error, but must be indicated"] - #[doc = " differently than ::CUDA_SUCCESS (which indicates completion). Calls that"] - #[doc = " may return this value include ::cuEventQuery() and ::cuStreamQuery()."] pub const CUDA_ERROR_NOT_READY: Type = 600; - #[doc = " While executing a kernel, the device encountered a"] - #[doc = " load or store instruction on an invalid memory address."] - #[doc = " This leaves the process in an inconsistent state and any further CUDA work"] - #[doc = " will return the same error. To continue using CUDA, the process must be terminated"] - #[doc = " and relaunched."] pub const CUDA_ERROR_ILLEGAL_ADDRESS: Type = 700; - #[doc = " This indicates that a launch did not occur because it did not have"] - #[doc = " appropriate resources. This error usually indicates that the user has"] - #[doc = " attempted to pass too many arguments to the device kernel, or the"] - #[doc = " kernel launch specifies too many threads for the kernel's register"] - #[doc = " count. Passing arguments of the wrong size (i.e. a 64-bit pointer"] - #[doc = " when a 32-bit int is expected) is equivalent to passing too many"] - #[doc = " arguments and can also result in this error."] pub const CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES: Type = 701; - #[doc = " This indicates that the device kernel took too long to execute. This can"] - #[doc = " only occur if timeouts are enabled - see the device attribute"] - #[doc = " ::CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT for more information."] - #[doc = " This leaves the process in an inconsistent state and any further CUDA work"] - #[doc = " will return the same error. To continue using CUDA, the process must be terminated"] - #[doc = " and relaunched."] pub const CUDA_ERROR_LAUNCH_TIMEOUT: Type = 702; - #[doc = " This error indicates a kernel launch that uses an incompatible texturing"] - #[doc = " mode."] pub const CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING: Type = 703; - #[doc = " This error indicates that a call to ::cuCtxEnablePeerAccess() is"] - #[doc = " trying to re-enable peer access to a context which has already"] - #[doc = " had peer access to it enabled."] pub const CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED: Type = 704; - #[doc = " This error indicates that ::cuCtxDisablePeerAccess() is"] - #[doc = " trying to disable peer access which has not been enabled yet"] - #[doc = " via ::cuCtxEnablePeerAccess()."] pub const CUDA_ERROR_PEER_ACCESS_NOT_ENABLED: Type = 705; - #[doc = " This error indicates that the primary context for the specified device"] - #[doc = " has already been initialized."] pub const CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE: Type = 708; - #[doc = " This error indicates that the context current to the calling thread"] - #[doc = " has been destroyed using ::cuCtxDestroy, or is a primary context which"] - #[doc = " has not yet been initialized."] pub const CUDA_ERROR_CONTEXT_IS_DESTROYED: Type = 709; - #[doc = " A device-side assert triggered during kernel execution. The context"] - #[doc = " cannot be used anymore, and must be destroyed. All existing device"] - #[doc = " memory allocations from this context are invalid and must be"] - #[doc = " reconstructed if the program is to continue using CUDA."] pub const CUDA_ERROR_ASSERT: Type = 710; - #[doc = " This error indicates that the hardware resources required to enable"] - #[doc = " peer access have been exhausted for one or more of the devices"] - #[doc = " passed to ::cuCtxEnablePeerAccess()."] pub const CUDA_ERROR_TOO_MANY_PEERS: Type = 711; - #[doc = " This error indicates that the memory range passed to ::cuMemHostRegister()"] - #[doc = " has already been registered."] pub const CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED: Type = 712; - #[doc = " This error indicates that the pointer passed to ::cuMemHostUnregister()"] - #[doc = " does not correspond to any currently registered memory region."] pub const CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED: Type = 713; - #[doc = " While executing a kernel, the device encountered a stack error."] - #[doc = " This can be due to stack corruption or exceeding the stack size limit."] - #[doc = " This leaves the process in an inconsistent state and any further CUDA work"] - #[doc = " will return the same error. To continue using CUDA, the process must be terminated"] - #[doc = " and relaunched."] pub const CUDA_ERROR_HARDWARE_STACK_ERROR: Type = 714; - #[doc = " While executing a kernel, the device encountered an illegal instruction."] - #[doc = " This leaves the process in an inconsistent state and any further CUDA work"] - #[doc = " will return the same error. To continue using CUDA, the process must be terminated"] - #[doc = " and relaunched."] pub const CUDA_ERROR_ILLEGAL_INSTRUCTION: Type = 715; - #[doc = " While executing a kernel, the device encountered a load or store instruction"] - #[doc = " on a memory address which is not aligned."] - #[doc = " This leaves the process in an inconsistent state and any further CUDA work"] - #[doc = " will return the same error. To continue using CUDA, the process must be terminated"] - #[doc = " and relaunched."] pub const CUDA_ERROR_MISALIGNED_ADDRESS: Type = 716; - #[doc = " While executing a kernel, the device encountered an instruction"] - #[doc = " which can only operate on memory locations in certain address spaces"] - #[doc = " (global, shared, or local), but was supplied a memory address not"] - #[doc = " belonging to an allowed address space."] - #[doc = " This leaves the process in an inconsistent state and any further CUDA work"] - #[doc = " will return the same error. To continue using CUDA, the process must be terminated"] - #[doc = " and relaunched."] pub const CUDA_ERROR_INVALID_ADDRESS_SPACE: Type = 717; - #[doc = " While executing a kernel, the device program counter wrapped its address space."] - #[doc = " This leaves the process in an inconsistent state and any further CUDA work"] - #[doc = " will return the same error. To continue using CUDA, the process must be terminated"] - #[doc = " and relaunched."] pub const CUDA_ERROR_INVALID_PC: Type = 718; - #[doc = " An exception occurred on the device while executing a kernel. Common"] - #[doc = " causes include dereferencing an invalid device pointer and accessing"] - #[doc = " out of bounds shared memory. Less common cases can be system specific - more"] - #[doc = " information about these cases can be found in the system specific user guide."] - #[doc = " This leaves the process in an inconsistent state and any further CUDA work"] - #[doc = " will return the same error. To continue using CUDA, the process must be terminated"] - #[doc = " and relaunched."] pub const CUDA_ERROR_LAUNCH_FAILED: Type = 719; - #[doc = " This error indicates that the number of blocks launched per grid for a kernel that was"] - #[doc = " launched via either ::cuLaunchCooperativeKernel or ::cuLaunchCooperativeKernelMultiDevice"] - #[doc = " exceeds the maximum number of blocks as allowed by ::cuOccupancyMaxActiveBlocksPerMultiprocessor"] - #[doc = " or ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags times the number of multiprocessors"] - #[doc = " as specified by the device attribute ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT."] pub const CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE: Type = 720; - #[doc = " This error indicates that the attempted operation is not permitted."] pub const CUDA_ERROR_NOT_PERMITTED: Type = 800; - #[doc = " This error indicates that the attempted operation is not supported"] - #[doc = " on the current system or device."] pub const CUDA_ERROR_NOT_SUPPORTED: Type = 801; - #[doc = " This error indicates that the system is not yet ready to start any CUDA"] - #[doc = " work. To continue using CUDA, verify the system configuration is in a"] - #[doc = " valid state and all required driver daemons are actively running."] - #[doc = " More information about this error can be found in the system specific"] - #[doc = " user guide."] pub const CUDA_ERROR_SYSTEM_NOT_READY: Type = 802; - #[doc = " This error indicates that there is a mismatch between the versions of"] - #[doc = " the display driver and the CUDA driver. Refer to the compatibility documentation"] - #[doc = " for supported versions."] pub const CUDA_ERROR_SYSTEM_DRIVER_MISMATCH: Type = 803; - #[doc = " This error indicates that the system was upgraded to run with forward compatibility"] - #[doc = " but the visible hardware detected by CUDA does not support this configuration."] - #[doc = " Refer to the compatibility documentation for the supported hardware matrix or ensure"] - #[doc = " that only supported hardware is visible during initialization via the CUDA_VISIBLE_DEVICES"] - #[doc = " environment variable."] pub const CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE: Type = 804; - #[doc = " This error indicates that the operation is not permitted when"] - #[doc = " the stream is capturing."] pub const CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED: Type = 900; - #[doc = " This error indicates that the current capture sequence on the stream"] - #[doc = " has been invalidated due to a previous error."] pub const CUDA_ERROR_STREAM_CAPTURE_INVALIDATED: Type = 901; - #[doc = " This error indicates that the operation would have resulted in a merge"] - #[doc = " of two independent capture sequences."] pub const CUDA_ERROR_STREAM_CAPTURE_MERGE: Type = 902; - #[doc = " This error indicates that the capture was not initiated in this stream."] pub const CUDA_ERROR_STREAM_CAPTURE_UNMATCHED: Type = 903; - #[doc = " This error indicates that the capture sequence contains a fork that was"] - #[doc = " not joined to the primary stream."] pub const CUDA_ERROR_STREAM_CAPTURE_UNJOINED: Type = 904; - #[doc = " This error indicates that a dependency would have been created which"] - #[doc = " crosses the capture sequence boundary. Only implicit in-stream ordering"] - #[doc = " dependencies are allowed to cross the boundary."] pub const CUDA_ERROR_STREAM_CAPTURE_ISOLATION: Type = 905; - #[doc = " This error indicates a disallowed implicit dependency on a current capture"] - #[doc = " sequence from cudaStreamLegacy."] pub const CUDA_ERROR_STREAM_CAPTURE_IMPLICIT: Type = 906; - #[doc = " This error indicates that the operation is not permitted on an event which"] - #[doc = " was last recorded in a capturing stream."] pub const CUDA_ERROR_CAPTURED_EVENT: Type = 907; - #[doc = " A stream capture sequence not initiated with the ::CU_STREAM_CAPTURE_MODE_RELAXED"] - #[doc = " argument to ::cuStreamBeginCapture was passed to ::cuStreamEndCapture in a"] - #[doc = " different thread."] pub const CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD: Type = 908; - #[doc = " This indicates that an unknown internal error has occurred."] pub const CUDA_ERROR_UNKNOWN: Type = 999; } pub use self::cudaError_enum::Type as CUresult; -#[doc = "< A relative value indicating the performance of the link between two devices"] pub const CUdevice_P2PAttribute_enum_CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK : CUdevice_P2PAttribute_enum = 1 ; -#[doc = "< P2P Access is enable"] pub const CUdevice_P2PAttribute_enum_CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED : CUdevice_P2PAttribute_enum = 2 ; -#[doc = "< Atomic operation over the link supported"] pub const CUdevice_P2PAttribute_enum_CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED : CUdevice_P2PAttribute_enum = 3 ; -#[doc = "< \\deprecated use CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED instead"] pub const CUdevice_P2PAttribute_enum_CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED : CUdevice_P2PAttribute_enum = 4 ; -#[doc = "< Accessing CUDA arrays over the link supported"] pub const CUdevice_P2PAttribute_enum_CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED : CUdevice_P2PAttribute_enum = 4 ; -#[doc = " P2P Attributes"] pub type CUdevice_P2PAttribute_enum = u32; pub use self::CUdevice_P2PAttribute_enum as CUdevice_P2PAttribute; -#[doc = " CUDA stream callback"] -#[doc = " \\param hStream The stream the callback was added to, as passed to ::cuStreamAddCallback. May be NULL."] -#[doc = " \\param status ::CUDA_SUCCESS or any persistent error on the stream."] -#[doc = " \\param userData User parameter provided at registration."] pub type CUstreamCallback = ::std::option::Option< unsafe extern "C" fn( hStream: CUstream, @@ -1499,197 +822,109 @@ pub type CUstreamCallback = ::std::option::Option< userData: *mut ::std::os::raw::c_void, ), >; -#[doc = " Block size to per-block dynamic shared memory mapping for a certain"] -#[doc = " kernel \\param blockSize Block size of the kernel."] -#[doc = ""] -#[doc = " \\return The dynamic shared memory needed by a block."] pub type CUoccupancyB2DSize = ::std::option::Option< unsafe extern "C" fn(blockSize: ::std::os::raw::c_int) -> usize, >; -#[doc = " 2D memory copy parameters"] #[repr(C)] pub struct CUDA_MEMCPY2D_st { - #[doc = "< Source X in bytes"] pub srcXInBytes: usize, - #[doc = "< Source Y"] pub srcY: usize, - #[doc = "< Source memory type (host, device, array)"] pub srcMemoryType: CUmemorytype, - #[doc = "< Source host pointer"] pub srcHost: *const ::std::os::raw::c_void, - #[doc = "< Source device pointer"] pub srcDevice: CUdeviceptr, - #[doc = "< Source array reference"] pub srcArray: CUarray, - #[doc = "< Source pitch (ignored when src is array)"] pub srcPitch: usize, - #[doc = "< Destination X in bytes"] pub dstXInBytes: usize, - #[doc = "< Destination Y"] pub dstY: usize, - #[doc = "< Destination memory type (host, device, array)"] pub dstMemoryType: CUmemorytype, - #[doc = "< Destination host pointer"] pub dstHost: *mut ::std::os::raw::c_void, - #[doc = "< Destination device pointer"] pub dstDevice: CUdeviceptr, - #[doc = "< Destination array reference"] pub dstArray: CUarray, - #[doc = "< Destination pitch (ignored when dst is array)"] pub dstPitch: usize, - #[doc = "< Width of 2D memory copy in bytes"] pub WidthInBytes: usize, - #[doc = "< Height of 2D memory copy"] pub Height: usize, } pub type CUDA_MEMCPY2D = CUDA_MEMCPY2D_st; -#[doc = " 3D memory copy parameters"] #[repr(C)] pub struct CUDA_MEMCPY3D_st { - #[doc = "< Source X in bytes"] pub srcXInBytes: usize, - #[doc = "< Source Y"] pub srcY: usize, - #[doc = "< Source Z"] pub srcZ: usize, - #[doc = "< Source LOD"] pub srcLOD: usize, - #[doc = "< Source memory type (host, device, array)"] pub srcMemoryType: CUmemorytype, - #[doc = "< Source host pointer"] pub srcHost: *const ::std::os::raw::c_void, - #[doc = "< Source device pointer"] pub srcDevice: CUdeviceptr, - #[doc = "< Source array reference"] pub srcArray: CUarray, - #[doc = "< Must be NULL"] pub reserved0: *mut ::std::os::raw::c_void, - #[doc = "< Source pitch (ignored when src is array)"] pub srcPitch: usize, - #[doc = "< Source height (ignored when src is array; may be 0 if Depth==1)"] pub srcHeight: usize, - #[doc = "< Destination X in bytes"] pub dstXInBytes: usize, - #[doc = "< Destination Y"] pub dstY: usize, - #[doc = "< Destination Z"] pub dstZ: usize, - #[doc = "< Destination LOD"] pub dstLOD: usize, - #[doc = "< Destination memory type (host, device, array)"] pub dstMemoryType: CUmemorytype, - #[doc = "< Destination host pointer"] pub dstHost: *mut ::std::os::raw::c_void, - #[doc = "< Destination device pointer"] pub dstDevice: CUdeviceptr, - #[doc = "< Destination array reference"] pub dstArray: CUarray, - #[doc = "< Must be NULL"] pub reserved1: *mut ::std::os::raw::c_void, - #[doc = "< Destination pitch (ignored when dst is array)"] pub dstPitch: usize, - #[doc = "< Destination height (ignored when dst is array; may be 0 if Depth==1)"] pub dstHeight: usize, - #[doc = "< Width of 3D memory copy in bytes"] pub WidthInBytes: usize, - #[doc = "< Height of 3D memory copy"] pub Height: usize, - #[doc = "< Depth of 3D memory copy"] pub Depth: usize, } pub type CUDA_MEMCPY3D = CUDA_MEMCPY3D_st; -#[doc = " 3D memory cross-context copy parameters"] #[repr(C)] pub struct CUDA_MEMCPY3D_PEER_st { - #[doc = "< Source X in bytes"] pub srcXInBytes: usize, - #[doc = "< Source Y"] pub srcY: usize, - #[doc = "< Source Z"] pub srcZ: usize, - #[doc = "< Source LOD"] pub srcLOD: usize, - #[doc = "< Source memory type (host, device, array)"] pub srcMemoryType: CUmemorytype, - #[doc = "< Source host pointer"] pub srcHost: *const ::std::os::raw::c_void, - #[doc = "< Source device pointer"] pub srcDevice: CUdeviceptr, - #[doc = "< Source array reference"] pub srcArray: CUarray, - #[doc = "< Source context (ignored with srcMemoryType is ::CU_MEMORYTYPE_ARRAY)"] pub srcContext: CUcontext, - #[doc = "< Source pitch (ignored when src is array)"] pub srcPitch: usize, - #[doc = "< Source height (ignored when src is array; may be 0 if Depth==1)"] pub srcHeight: usize, - #[doc = "< Destination X in bytes"] pub dstXInBytes: usize, - #[doc = "< Destination Y"] pub dstY: usize, - #[doc = "< Destination Z"] pub dstZ: usize, - #[doc = "< Destination LOD"] pub dstLOD: usize, - #[doc = "< Destination memory type (host, device, array)"] pub dstMemoryType: CUmemorytype, - #[doc = "< Destination host pointer"] pub dstHost: *mut ::std::os::raw::c_void, - #[doc = "< Destination device pointer"] pub dstDevice: CUdeviceptr, - #[doc = "< Destination array reference"] pub dstArray: CUarray, - #[doc = "< Destination context (ignored with dstMemoryType is ::CU_MEMORYTYPE_ARRAY)"] pub dstContext: CUcontext, - #[doc = "< Destination pitch (ignored when dst is array)"] pub dstPitch: usize, - #[doc = "< Destination height (ignored when dst is array; may be 0 if Depth==1)"] pub dstHeight: usize, - #[doc = "< Width of 3D memory copy in bytes"] pub WidthInBytes: usize, - #[doc = "< Height of 3D memory copy"] pub Height: usize, - #[doc = "< Depth of 3D memory copy"] pub Depth: usize, } pub type CUDA_MEMCPY3D_PEER = CUDA_MEMCPY3D_PEER_st; -#[doc = " Array descriptor"] #[repr(C)] pub struct CUDA_ARRAY_DESCRIPTOR_st { - #[doc = "< Width of array"] pub Width: usize, - #[doc = "< Height of array"] pub Height: usize, - #[doc = "< Array format"] pub Format: CUarray_format, - #[doc = "< Channels per array element"] pub NumChannels: ::std::os::raw::c_uint, } pub type CUDA_ARRAY_DESCRIPTOR = CUDA_ARRAY_DESCRIPTOR_st; -#[doc = " 3D array descriptor"] #[repr(C)] pub struct CUDA_ARRAY3D_DESCRIPTOR_st { - #[doc = "< Width of 3D array"] pub Width: usize, - #[doc = "< Height of 3D array"] pub Height: usize, - #[doc = "< Depth of 3D array"] pub Depth: usize, - #[doc = "< Array format"] pub Format: CUarray_format, - #[doc = "< Channels per array element"] pub NumChannels: ::std::os::raw::c_uint, - #[doc = "< Flags"] pub Flags: ::std::os::raw::c_uint, } pub type CUDA_ARRAY3D_DESCRIPTOR = CUDA_ARRAY3D_DESCRIPTOR_st; -#[doc = " CUDA Resource descriptor"] #[repr(C)] pub struct CUDA_RESOURCE_DESC_st { - #[doc = "< Resource type"] pub resType: CUresourcetype, pub res: CUDA_RESOURCE_DESC_st__bindgen_ty_1, - #[doc = "< Flags (must be zero)"] pub flags: ::std::os::raw::c_uint, } #[repr(C)] @@ -1709,39 +944,27 @@ pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1 { #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_1 { - #[doc = "< CUDA array"] pub hArray: CUarray, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_2 { - #[doc = "< CUDA mipmapped array"] pub hMipmappedArray: CUmipmappedArray, } #[repr(C)] pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_3 { - #[doc = "< Device pointer"] pub devPtr: CUdeviceptr, - #[doc = "< Array format"] pub format: CUarray_format, - #[doc = "< Channels per array element"] pub numChannels: ::std::os::raw::c_uint, - #[doc = "< Size in bytes"] pub sizeInBytes: usize, } #[repr(C)] pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_4 { - #[doc = "< Device pointer"] pub devPtr: CUdeviceptr, - #[doc = "< Array format"] pub format: CUarray_format, - #[doc = "< Channels per array element"] pub numChannels: ::std::os::raw::c_uint, - #[doc = "< Width of the array in elements"] pub width: usize, - #[doc = "< Height of the array in elements"] pub height: usize, - #[doc = "< Pitch between two rows in bytes"] pub pitchInBytes: usize, } #[repr(C)] @@ -1750,162 +973,106 @@ pub struct CUDA_RESOURCE_DESC_st__bindgen_ty_1__bindgen_ty_5 { pub reserved: [::std::os::raw::c_int; 32usize], } pub type CUDA_RESOURCE_DESC = CUDA_RESOURCE_DESC_st; -#[doc = " Texture descriptor"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct CUDA_TEXTURE_DESC_st { - #[doc = "< Address modes"] pub addressMode: [CUaddress_mode; 3usize], - #[doc = "< Filter mode"] pub filterMode: CUfilter_mode, - #[doc = "< Flags"] pub flags: ::std::os::raw::c_uint, - #[doc = "< Maximum anisotropy ratio"] pub maxAnisotropy: ::std::os::raw::c_uint, - #[doc = "< Mipmap filter mode"] pub mipmapFilterMode: CUfilter_mode, - #[doc = "< Mipmap level bias"] pub mipmapLevelBias: f32, - #[doc = "< Mipmap minimum level clamp"] pub minMipmapLevelClamp: f32, - #[doc = "< Mipmap maximum level clamp"] pub maxMipmapLevelClamp: f32, - #[doc = "< Border Color"] pub borderColor: [f32; 4usize], pub reserved: [::std::os::raw::c_int; 12usize], } pub type CUDA_TEXTURE_DESC = CUDA_TEXTURE_DESC_st; -#[doc = "< No resource view format (use underlying resource format)"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_NONE: CUresourceViewFormat_enum = 0; -#[doc = "< 1 channel unsigned 8-bit integers"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_UINT_1X8: CUresourceViewFormat_enum = 1; -#[doc = "< 2 channel unsigned 8-bit integers"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_UINT_2X8: CUresourceViewFormat_enum = 2; -#[doc = "< 4 channel unsigned 8-bit integers"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_UINT_4X8: CUresourceViewFormat_enum = 3; -#[doc = "< 1 channel signed 8-bit integers"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_SINT_1X8: CUresourceViewFormat_enum = 4; -#[doc = "< 2 channel signed 8-bit integers"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_SINT_2X8: CUresourceViewFormat_enum = 5; -#[doc = "< 4 channel signed 8-bit integers"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_SINT_4X8: CUresourceViewFormat_enum = 6; -#[doc = "< 1 channel unsigned 16-bit integers"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_UINT_1X16: CUresourceViewFormat_enum = 7; -#[doc = "< 2 channel unsigned 16-bit integers"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_UINT_2X16: CUresourceViewFormat_enum = 8; -#[doc = "< 4 channel unsigned 16-bit integers"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_UINT_4X16: CUresourceViewFormat_enum = 9; -#[doc = "< 1 channel signed 16-bit integers"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_SINT_1X16: CUresourceViewFormat_enum = 10; -#[doc = "< 2 channel signed 16-bit integers"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_SINT_2X16: CUresourceViewFormat_enum = 11; -#[doc = "< 4 channel signed 16-bit integers"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_SINT_4X16: CUresourceViewFormat_enum = 12; -#[doc = "< 1 channel unsigned 32-bit integers"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_UINT_1X32: CUresourceViewFormat_enum = 13; -#[doc = "< 2 channel unsigned 32-bit integers"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_UINT_2X32: CUresourceViewFormat_enum = 14; -#[doc = "< 4 channel unsigned 32-bit integers"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_UINT_4X32: CUresourceViewFormat_enum = 15; -#[doc = "< 1 channel signed 32-bit integers"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_SINT_1X32: CUresourceViewFormat_enum = 16; -#[doc = "< 2 channel signed 32-bit integers"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_SINT_2X32: CUresourceViewFormat_enum = 17; -#[doc = "< 4 channel signed 32-bit integers"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_SINT_4X32: CUresourceViewFormat_enum = 18; -#[doc = "< 1 channel 16-bit floating point"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_FLOAT_1X16: CUresourceViewFormat_enum = 19; -#[doc = "< 2 channel 16-bit floating point"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_FLOAT_2X16: CUresourceViewFormat_enum = 20; -#[doc = "< 4 channel 16-bit floating point"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_FLOAT_4X16: CUresourceViewFormat_enum = 21; -#[doc = "< 1 channel 32-bit floating point"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_FLOAT_1X32: CUresourceViewFormat_enum = 22; -#[doc = "< 2 channel 32-bit floating point"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_FLOAT_2X32: CUresourceViewFormat_enum = 23; -#[doc = "< 4 channel 32-bit floating point"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_FLOAT_4X32: CUresourceViewFormat_enum = 24; -#[doc = "< Block compressed 1"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_UNSIGNED_BC1: CUresourceViewFormat_enum = 25; -#[doc = "< Block compressed 2"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_UNSIGNED_BC2: CUresourceViewFormat_enum = 26; -#[doc = "< Block compressed 3"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_UNSIGNED_BC3: CUresourceViewFormat_enum = 27; -#[doc = "< Block compressed 4 unsigned"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_UNSIGNED_BC4: CUresourceViewFormat_enum = 28; -#[doc = "< Block compressed 4 signed"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_SIGNED_BC4: CUresourceViewFormat_enum = 29; -#[doc = "< Block compressed 5 unsigned"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_UNSIGNED_BC5: CUresourceViewFormat_enum = 30; -#[doc = "< Block compressed 5 signed"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_SIGNED_BC5: CUresourceViewFormat_enum = 31; -#[doc = "< Block compressed 6 unsigned half-float"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_UNSIGNED_BC6H: CUresourceViewFormat_enum = 32; -#[doc = "< Block compressed 6 signed half-float"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_SIGNED_BC6H: CUresourceViewFormat_enum = 33; -#[doc = "< Block compressed 7"] pub const CUresourceViewFormat_enum_CU_RES_VIEW_FORMAT_UNSIGNED_BC7: CUresourceViewFormat_enum = 34; -#[doc = " Resource view format"] pub type CUresourceViewFormat_enum = u32; pub use self::CUresourceViewFormat_enum as CUresourceViewFormat; -#[doc = " Resource view descriptor"] #[repr(C)] pub struct CUDA_RESOURCE_VIEW_DESC_st { - #[doc = "< Resource view format"] pub format: CUresourceViewFormat, - #[doc = "< Width of the resource view"] pub width: usize, - #[doc = "< Height of the resource view"] pub height: usize, - #[doc = "< Depth of the resource view"] pub depth: usize, - #[doc = "< First defined mipmap level"] pub firstMipmapLevel: ::std::os::raw::c_uint, - #[doc = "< Last defined mipmap level"] pub lastMipmapLevel: ::std::os::raw::c_uint, - #[doc = "< First layer index"] pub firstLayer: ::std::os::raw::c_uint, - #[doc = "< Last layer index"] pub lastLayer: ::std::os::raw::c_uint, pub reserved: [::std::os::raw::c_uint; 16usize], } pub type CUDA_RESOURCE_VIEW_DESC = CUDA_RESOURCE_VIEW_DESC_st; -#[doc = " GPU Direct v3 tokens"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st { @@ -1914,174 +1081,103 @@ pub struct CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st { } pub type CUDA_POINTER_ATTRIBUTE_P2P_TOKENS = CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st; -#[doc = " Kernel launch parameters"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct CUDA_LAUNCH_PARAMS_st { - #[doc = "< Kernel to launch"] pub function: CUfunction, - #[doc = "< Width of grid in blocks"] pub gridDimX: ::std::os::raw::c_uint, - #[doc = "< Height of grid in blocks"] pub gridDimY: ::std::os::raw::c_uint, - #[doc = "< Depth of grid in blocks"] pub gridDimZ: ::std::os::raw::c_uint, - #[doc = "< X dimension of each thread block"] pub blockDimX: ::std::os::raw::c_uint, - #[doc = "< Y dimension of each thread block"] pub blockDimY: ::std::os::raw::c_uint, - #[doc = "< Z dimension of each thread block"] pub blockDimZ: ::std::os::raw::c_uint, - #[doc = "< Dynamic shared-memory size per thread block in bytes"] pub sharedMemBytes: ::std::os::raw::c_uint, - #[doc = "< Stream identifier"] pub hStream: CUstream, - #[doc = "< Array of pointers to kernel parameters"] pub kernelParams: *mut *mut ::std::os::raw::c_void, } pub type CUDA_LAUNCH_PARAMS = CUDA_LAUNCH_PARAMS_st; -#[doc = " Handle is an opaque file descriptor"] pub const CUexternalMemoryHandleType_enum_CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD : CUexternalMemoryHandleType_enum = 1 ; -#[doc = " Handle is an opaque shared NT handle"] pub const CUexternalMemoryHandleType_enum_CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 : CUexternalMemoryHandleType_enum = 2 ; -#[doc = " Handle is an opaque, globally shared handle"] pub const CUexternalMemoryHandleType_enum_CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT : CUexternalMemoryHandleType_enum = 3 ; -#[doc = " Handle is a D3D12 heap object"] pub const CUexternalMemoryHandleType_enum_CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP : CUexternalMemoryHandleType_enum = 4 ; -#[doc = " Handle is a D3D12 committed resource"] pub const CUexternalMemoryHandleType_enum_CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE : CUexternalMemoryHandleType_enum = 5 ; -#[doc = " External memory handle types"] pub type CUexternalMemoryHandleType_enum = u32; pub use self::CUexternalMemoryHandleType_enum as CUexternalMemoryHandleType; -#[doc = " External memory handle descriptor"] #[repr(C)] #[derive(Copy, Clone)] pub struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st { - #[doc = " Type of the handle"] pub type_: CUexternalMemoryHandleType, pub handle: CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1, - #[doc = " Size of the memory allocation"] pub size: ::std::os::raw::c_ulonglong, - #[doc = " Flags must either be zero or ::CUDA_EXTERNAL_MEMORY_DEDICATED"] pub flags: ::std::os::raw::c_uint, pub reserved: [::std::os::raw::c_uint; 16usize], } #[repr(C)] #[derive(Copy, Clone)] pub union CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1 { - #[doc = " File descriptor referencing the memory object. Valid"] - #[doc = " when type is"] - #[doc = " ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD"] pub fd: ::std::os::raw::c_int, pub win32: CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1, _bindgen_union_align: [u64; 2usize], } -#[doc = " Win32 handle referencing the semaphore object. Valid when"] -#[doc = " type is one of the following:"] -#[doc = " - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32"] -#[doc = " - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT"] -#[doc = " - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP"] -#[doc = " - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE"] -#[doc = " Exactly one of 'handle' and 'name' must be non-NULL. If"] -#[doc = " type is"] -#[doc = " ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT"] -#[doc = " then 'name' must be NULL."] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1 { - #[doc = " Valid NT handle. Must be NULL if 'name' is non-NULL"] pub handle: *mut ::std::os::raw::c_void, - #[doc = " Name of a valid memory object."] - #[doc = " Must be NULL if 'handle' is non-NULL."] pub name: *const ::std::os::raw::c_void, } pub type CUDA_EXTERNAL_MEMORY_HANDLE_DESC = CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st; -#[doc = " External memory buffer descriptor"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st { - #[doc = " Offset into the memory object where the buffer's base is"] pub offset: ::std::os::raw::c_ulonglong, - #[doc = " Size of the buffer"] pub size: ::std::os::raw::c_ulonglong, - #[doc = " Flags reserved for future use. Must be zero."] pub flags: ::std::os::raw::c_uint, pub reserved: [::std::os::raw::c_uint; 16usize], } pub type CUDA_EXTERNAL_MEMORY_BUFFER_DESC = CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st; -#[doc = " External memory mipmap descriptor"] #[repr(C)] pub struct CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st { - #[doc = " Offset into the memory object where the base level of the"] - #[doc = " mipmap chain is."] pub offset: ::std::os::raw::c_ulonglong, - #[doc = " Format, dimension and type of base level of the mipmap chain"] pub arrayDesc: CUDA_ARRAY3D_DESCRIPTOR, - #[doc = " Total number of levels in the mipmap chain"] pub numLevels: ::std::os::raw::c_uint, pub reserved: [::std::os::raw::c_uint; 16usize], } pub type CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC = CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st; -#[doc = " Handle is an opaque file descriptor"] pub const CUexternalSemaphoreHandleType_enum_CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD : CUexternalSemaphoreHandleType_enum = 1 ; -#[doc = " Handle is an opaque shared NT handle"] pub const CUexternalSemaphoreHandleType_enum_CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 : CUexternalSemaphoreHandleType_enum = 2 ; -#[doc = " Handle is an opaque, globally shared handle"] pub const CUexternalSemaphoreHandleType_enum_CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT : CUexternalSemaphoreHandleType_enum = 3 ; -#[doc = " Handle is a shared NT handle referencing a D3D12 fence object"] pub const CUexternalSemaphoreHandleType_enum_CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE : CUexternalSemaphoreHandleType_enum = 4 ; -#[doc = " External semaphore handle types"] pub type CUexternalSemaphoreHandleType_enum = u32; pub use self::CUexternalSemaphoreHandleType_enum as CUexternalSemaphoreHandleType; -#[doc = " External semaphore handle descriptor"] #[repr(C)] #[derive(Copy, Clone)] pub struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st { - #[doc = " Type of the handle"] pub type_: CUexternalSemaphoreHandleType, pub handle: CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1, - #[doc = " Flags reserved for the future. Must be zero."] pub flags: ::std::os::raw::c_uint, pub reserved: [::std::os::raw::c_uint; 16usize], } #[repr(C)] #[derive(Copy, Clone)] pub union CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1 { - #[doc = " File descriptor referencing the semaphore object. Valid"] - #[doc = " when type is"] - #[doc = " ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD"] pub fd: ::std::os::raw::c_int, pub win32: CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1, _bindgen_union_align: [u64; 2usize], } -#[doc = " Win32 handle referencing the semaphore object. Valid when"] -#[doc = " type is one of the following:"] -#[doc = " - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32"] -#[doc = " - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT"] -#[doc = " - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE"] -#[doc = " Exactly one of 'handle' and 'name' must be non-NULL. If"] -#[doc = " type is"] -#[doc = " ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT"] -#[doc = " then 'name' must be NULL."] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st__bindgen_ty_1__bindgen_ty_1 { - #[doc = " Valid NT handle. Must be NULL if 'name' is non-NULL"] pub handle: *mut ::std::os::raw::c_void, - #[doc = " Name of a valid synchronization primitive."] - #[doc = " Must be NULL if 'handle' is non-NULL."] pub name: *const ::std::os::raw::c_void, } pub type CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC = CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st; -#[doc = " External semaphore signal parameters"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st { pub params: CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1, - #[doc = " Flags reserved for the future. Must be zero."] pub flags: ::std::os::raw::c_uint, pub reserved: [::std::os::raw::c_uint; 16usize], } @@ -2092,22 +1188,18 @@ pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1 { CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_1, pub reserved: [::std::os::raw::c_uint; 16usize], } -#[doc = " Parameters for fence objects"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st__bindgen_ty_1__bindgen_ty_1 { - #[doc = " Value of fence to be signaled"] pub value: ::std::os::raw::c_ulonglong, } pub type CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS = CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st; -#[doc = " External semaphore wait parameters"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st { pub params: CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1, - #[doc = " Flags reserved for the future. Must be zero."] pub flags: ::std::os::raw::c_uint, pub reserved: [::std::os::raw::c_uint; 16usize], } @@ -2118,190 +1210,43 @@ pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1 { CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_1, pub reserved: [::std::os::raw::c_uint; 16usize], } -#[doc = " Parameters for fence objects"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st__bindgen_ty_1__bindgen_ty_1 { - #[doc = " Value of fence to be waited on"] pub value: ::std::os::raw::c_ulonglong, } pub type CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS = CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st; extern "C" { - #[doc = " \\brief Gets the string description of an error code"] - #[doc = ""] - #[doc = " Sets \\p *pStr to the address of a NULL-terminated string description"] - #[doc = " of the error code \\p error."] - #[doc = " If the error code is not recognized, ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " will be returned and \\p *pStr will be set to the NULL address."] - #[doc = ""] - #[doc = " \\param error - Error code to convert to string"] - #[doc = " \\param pStr - Address of the string pointer."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::CUresult,"] - #[doc = " ::cudaGetErrorString"] pub fn cuGetErrorString( error: CUresult, pStr: *mut *const ::std::os::raw::c_char, ) -> CUresult; } extern "C" { - #[doc = " \\brief Gets the string representation of an error code enum name"] - #[doc = ""] - #[doc = " Sets \\p *pStr to the address of a NULL-terminated string representation"] - #[doc = " of the name of the enum error code \\p error."] - #[doc = " If the error code is not recognized, ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " will be returned and \\p *pStr will be set to the NULL address."] - #[doc = ""] - #[doc = " \\param error - Error code to convert to string"] - #[doc = " \\param pStr - Address of the string pointer."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::CUresult,"] - #[doc = " ::cudaGetErrorName"] pub fn cuGetErrorName( error: CUresult, pStr: *mut *const ::std::os::raw::c_char, ) -> CUresult; } extern "C" { - #[doc = " \\brief Initialize the CUDA driver API"] - #[doc = ""] - #[doc = " Initializes the driver API and must be called before any other function from"] - #[doc = " the driver API. Currently, the \\p Flags parameter must be 0. If ::cuInit()"] - #[doc = " has not been called, any function from the driver API will return"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED."] - #[doc = ""] - #[doc = " \\param Flags - Initialization flag for CUDA."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE,"] - #[doc = " ::CUDA_ERROR_SYSTEM_DRIVER_MISMATCH,"] - #[doc = " ::CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE"] - #[doc = " \\notefnerr"] pub fn cuInit(Flags: ::std::os::raw::c_uint) -> CUresult; } extern "C" { - #[doc = " \\brief Returns the latest CUDA version supported by driver"] - #[doc = ""] - #[doc = " Returns in \\p *driverVersion the version of CUDA supported by"] - #[doc = " the driver. The version is returned as"] - #[doc = " (1000 × major + 10 × minor). For example, CUDA 9.2"] - #[doc = " would be represented by 9020."] - #[doc = ""] - #[doc = " This function automatically returns ::CUDA_ERROR_INVALID_VALUE if"] - #[doc = " \\p driverVersion is NULL."] - #[doc = ""] - #[doc = " \\param driverVersion - Returns the CUDA driver version"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaDriverGetVersion,"] - #[doc = " ::cudaRuntimeGetVersion"] pub fn cuDriverGetVersion( driverVersion: *mut ::std::os::raw::c_int, ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns a handle to a compute device"] - #[doc = ""] - #[doc = " Returns in \\p *device a device handle given an ordinal in the range [0,"] - #[doc = " ::cuDeviceGetCount()-1]."] - #[doc = ""] - #[doc = " \\param device - Returned device handle"] - #[doc = " \\param ordinal - Device number to get handle for"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuDeviceGetAttribute,"] - #[doc = " ::cuDeviceGetCount,"] - #[doc = " ::cuDeviceGetName,"] - #[doc = " ::cuDeviceGetUuid,"] - #[doc = " ::cuDeviceGetLuid,"] - #[doc = " ::cuDeviceTotalMem"] pub fn cuDeviceGet( device: *mut CUdevice, ordinal: ::std::os::raw::c_int, ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns the number of compute-capable devices"] - #[doc = ""] - #[doc = " Returns in \\p *count the number of devices with compute capability greater"] - #[doc = " than or equal to 2.0 that are available for execution. If there is no such"] - #[doc = " device, ::cuDeviceGetCount() returns 0."] - #[doc = ""] - #[doc = " \\param count - Returned number of compute-capable devices"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuDeviceGetAttribute,"] - #[doc = " ::cuDeviceGetName,"] - #[doc = " ::cuDeviceGetUuid,"] - #[doc = " ::cuDeviceGetLuid,"] - #[doc = " ::cuDeviceGet,"] - #[doc = " ::cuDeviceTotalMem,"] - #[doc = " ::cudaGetDeviceCount"] pub fn cuDeviceGetCount(count: *mut ::std::os::raw::c_int) -> CUresult; } extern "C" { - #[doc = " \\brief Returns an identifer string for the device"] - #[doc = ""] - #[doc = " Returns an ASCII string identifying the device \\p dev in the NULL-terminated"] - #[doc = " string pointed to by \\p name. \\p len specifies the maximum length of the"] - #[doc = " string that may be returned."] - #[doc = ""] - #[doc = " \\param name - Returned identifier string for the device"] - #[doc = " \\param len - Maximum length of string to store in \\p name"] - #[doc = " \\param dev - Device to get identifier string for"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuDeviceGetAttribute,"] - #[doc = " ::cuDeviceGetUuid,"] - #[doc = " ::cuDeviceGetLuid,"] - #[doc = " ::cuDeviceGetCount,"] - #[doc = " ::cuDeviceGet,"] - #[doc = " ::cuDeviceTotalMem,"] - #[doc = " ::cudaGetDeviceProperties"] pub fn cuDeviceGetName( name: *mut ::std::os::raw::c_char, len: ::std::os::raw::c_int, @@ -2309,236 +1254,12 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Return an UUID for the device"] - #[doc = ""] - #[doc = " Returns 16-octets identifing the device \\p dev in the structure"] - #[doc = " pointed by the \\p uuid."] - #[doc = ""] - #[doc = " \\param uuid - Returned UUID"] - #[doc = " \\param dev - Device to get identifier string for"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuDeviceGetAttribute,"] - #[doc = " ::cuDeviceGetCount,"] - #[doc = " ::cuDeviceGetName,"] - #[doc = " ::cuDeviceGetLuid,"] - #[doc = " ::cuDeviceGet,"] - #[doc = " ::cuDeviceTotalMem,"] - #[doc = " ::cudaGetDeviceProperties"] pub fn cuDeviceGetUuid(uuid: *mut CUuuid, dev: CUdevice) -> CUresult; } extern "C" { pub fn cuDeviceTotalMem_v2(bytes: *mut usize, dev: CUdevice) -> CUresult; } extern "C" { - #[doc = " \\brief Returns information about the device"] - #[doc = ""] - #[doc = " Returns in \\p *pi the integer value of the attribute \\p attrib on device"] - #[doc = " \\p dev. The supported attributes are:"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK: Maximum number of threads per"] - #[doc = " block;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X: Maximum x-dimension of a block;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y: Maximum y-dimension of a block;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z: Maximum z-dimension of a block;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X: Maximum x-dimension of a grid;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y: Maximum y-dimension of a grid;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z: Maximum z-dimension of a grid;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK: Maximum amount of"] - #[doc = " shared memory available to a thread block in bytes;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY: Memory available on device for"] - #[doc = " __constant__ variables in a CUDA C kernel in bytes;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_WARP_SIZE: Warp size in threads;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAX_PITCH: Maximum pitch in bytes allowed by the"] - #[doc = " memory copy functions that involve memory regions allocated through"] - #[doc = " ::cuMemAllocPitch();"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH: Maximum 1D"] - #[doc = " texture width;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH: Maximum width"] - #[doc = " for a 1D texture bound to linear memory;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH: Maximum"] - #[doc = " mipmapped 1D texture width;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH: Maximum 2D"] - #[doc = " texture width;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT: Maximum 2D"] - #[doc = " texture height;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH: Maximum width"] - #[doc = " for a 2D texture bound to linear memory;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT: Maximum height"] - #[doc = " for a 2D texture bound to linear memory;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH: Maximum pitch"] - #[doc = " in bytes for a 2D texture bound to linear memory;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH: Maximum"] - #[doc = " mipmapped 2D texture width;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT: Maximum"] - #[doc = " mipmapped 2D texture height;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH: Maximum 3D"] - #[doc = " texture width;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT: Maximum 3D"] - #[doc = " texture height;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH: Maximum 3D"] - #[doc = " texture depth;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE:"] - #[doc = " Alternate maximum 3D texture width, 0 if no alternate"] - #[doc = " maximum 3D texture size is supported;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE:"] - #[doc = " Alternate maximum 3D texture height, 0 if no alternate"] - #[doc = " maximum 3D texture size is supported;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE:"] - #[doc = " Alternate maximum 3D texture depth, 0 if no alternate"] - #[doc = " maximum 3D texture size is supported;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH:"] - #[doc = " Maximum cubemap texture width or height;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH:"] - #[doc = " Maximum 1D layered texture width;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS:"] - #[doc = " Maximum layers in a 1D layered texture;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH:"] - #[doc = " Maximum 2D layered texture width;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT:"] - #[doc = " Maximum 2D layered texture height;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS:"] - #[doc = " Maximum layers in a 2D layered texture;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH:"] - #[doc = " Maximum cubemap layered texture width or height;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS:"] - #[doc = " Maximum layers in a cubemap layered texture;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH:"] - #[doc = " Maximum 1D surface width;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH:"] - #[doc = " Maximum 2D surface width;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT:"] - #[doc = " Maximum 2D surface height;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH:"] - #[doc = " Maximum 3D surface width;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT:"] - #[doc = " Maximum 3D surface height;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH:"] - #[doc = " Maximum 3D surface depth;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH:"] - #[doc = " Maximum 1D layered surface width;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS:"] - #[doc = " Maximum layers in a 1D layered surface;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH:"] - #[doc = " Maximum 2D layered surface width;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT:"] - #[doc = " Maximum 2D layered surface height;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS:"] - #[doc = " Maximum layers in a 2D layered surface;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH:"] - #[doc = " Maximum cubemap surface width;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH:"] - #[doc = " Maximum cubemap layered surface width;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS:"] - #[doc = " Maximum layers in a cubemap layered surface;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK: Maximum number of 32-bit"] - #[doc = " registers available to a thread block;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_CLOCK_RATE: The typical clock frequency in kilohertz;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT: Alignment requirement; texture"] - #[doc = " base addresses aligned to ::textureAlign bytes do not need an offset"] - #[doc = " applied to texture fetches;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT: Pitch alignment requirement"] - #[doc = " for 2D texture references bound to pitched memory;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_GPU_OVERLAP: 1 if the device can concurrently copy"] - #[doc = " memory between host and device while executing a kernel, or 0 if not;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT: Number of multiprocessors on"] - #[doc = " the device;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT: 1 if there is a run time limit"] - #[doc = " for kernels executed on the device, or 0 if not;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_INTEGRATED: 1 if the device is integrated with the"] - #[doc = " memory subsystem, or 0 if not;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY: 1 if the device can map host"] - #[doc = " memory into the CUDA address space, or 0 if not;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE: Compute mode that device is currently"] - #[doc = " in. Available modes are as follows:"] - #[doc = " - ::CU_COMPUTEMODE_DEFAULT: Default mode - Device is not restricted and"] - #[doc = " can have multiple CUDA contexts present at a single time."] - #[doc = " - ::CU_COMPUTEMODE_PROHIBITED: Compute-prohibited mode - Device is"] - #[doc = " prohibited from creating new CUDA contexts."] - #[doc = " - ::CU_COMPUTEMODE_EXCLUSIVE_PROCESS: Compute-exclusive-process mode - Device"] - #[doc = " can have only one context used by a single process at a time."] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS: 1 if the device supports"] - #[doc = " executing multiple kernels within the same context simultaneously, or 0 if"] - #[doc = " not. It is not guaranteed that multiple kernels will be resident"] - #[doc = " on the device concurrently so this feature should not be relied upon for"] - #[doc = " correctness;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_ECC_ENABLED: 1 if error correction is enabled on the"] - #[doc = " device, 0 if error correction is disabled or not supported by the device;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_PCI_BUS_ID: PCI bus identifier of the device;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID: PCI device (also known as slot) identifier"] - #[doc = " of the device;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID: PCI domain identifier of the device"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_TCC_DRIVER: 1 if the device is using a TCC driver. TCC"] - #[doc = " is only available on Tesla hardware running Windows Vista or later;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE: Peak memory clock frequency in kilohertz;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH: Global memory bus width in bits;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE: Size of L2 cache in bytes. 0 if the device doesn't have L2 cache;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR: Maximum resident threads per multiprocessor;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING: 1 if the device shares a unified address space with"] - #[doc = " the host, or 0 if not;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR: Major compute capability version number;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR: Minor compute capability version number;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED: 1 if device supports caching globals"] - #[doc = " in L1 cache, 0 if caching globals in L1 cache is not supported by the device;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED: 1 if device supports caching locals"] - #[doc = " in L1 cache, 0 if caching locals in L1 cache is not supported by the device;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR: Maximum amount of"] - #[doc = " shared memory available to a multiprocessor in bytes; this amount is shared"] - #[doc = " by all thread blocks simultaneously resident on a multiprocessor;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR: Maximum number of 32-bit"] - #[doc = " registers available to a multiprocessor; this number is shared by all thread"] - #[doc = " blocks simultaneously resident on a multiprocessor;"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY: 1 if device supports allocating managed memory"] - #[doc = " on this system, 0 if allocating managed memory is not supported by the device on this system."] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD: 1 if device is on a multi-GPU board, 0 if not."] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID: Unique identifier for a group of devices"] - #[doc = " associated with the same board. Devices on the same multi-GPU board will share the same identifier."] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED: 1 if Link between the device and the host"] - #[doc = " supports native atomic operations."] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO: Ratio of single precision performance"] - #[doc = " (in floating-point operations per second) to double precision performance."] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS: Device suppports coherently accessing"] - #[doc = " pageable memory without calling cudaHostRegister on it."] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS: Device can coherently access managed memory"] - #[doc = " concurrently with the CPU."] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED: Device supports Compute Preemption."] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM: Device can access host registered"] - #[doc = " memory at the same virtual address as the CPU."] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN: The maximum per block shared memory size"] - #[doc = " suported on this device. This is the maximum value that can be opted into when using the cuFuncSetAttribute() call."] - #[doc = " For more details see ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES"] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES: Device accesses pageable memory via the host's"] - #[doc = " page tables."] - #[doc = " - ::CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST: The host can directly access managed memory on the device without migration."] - #[doc = ""] - #[doc = " \\param pi - Returned device attribute value"] - #[doc = " \\param attrib - Device attribute to query"] - #[doc = " \\param dev - Device handle"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuDeviceGetCount,"] - #[doc = " ::cuDeviceGetName,"] - #[doc = " ::cuDeviceGetUuid,"] - #[doc = " ::cuDeviceGet,"] - #[doc = " ::cuDeviceTotalMem,"] - #[doc = " ::cudaDeviceGetAttribute,"] - #[doc = " ::cudaGetDeviceProperties"] pub fn cuDeviceGetAttribute( pi: *mut ::std::os::raw::c_int, attrib: CUdevice_attribute, @@ -2546,102 +1267,12 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns properties for a selected device"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " This function was deprecated as of CUDA 5.0 and replaced by ::cuDeviceGetAttribute()."] - #[doc = ""] - #[doc = " Returns in \\p *prop the properties of device \\p dev. The ::CUdevprop"] - #[doc = " structure is defined as:"] - #[doc = ""] - #[doc = " \\code"] - #[doc = "typedef struct CUdevprop_st {"] - #[doc = "int maxThreadsPerBlock;"] - #[doc = "int maxThreadsDim[3];"] - #[doc = "int maxGridSize[3];"] - #[doc = "int sharedMemPerBlock;"] - #[doc = "int totalConstantMemory;"] - #[doc = "int SIMDWidth;"] - #[doc = "int memPitch;"] - #[doc = "int regsPerBlock;"] - #[doc = "int clockRate;"] - #[doc = "int textureAlign"] - #[doc = "} CUdevprop;"] - #[doc = " \\endcode"] - #[doc = " where:"] - #[doc = ""] - #[doc = " - ::maxThreadsPerBlock is the maximum number of threads per block;"] - #[doc = " - ::maxThreadsDim[3] is the maximum sizes of each dimension of a block;"] - #[doc = " - ::maxGridSize[3] is the maximum sizes of each dimension of a grid;"] - #[doc = " - ::sharedMemPerBlock is the total amount of shared memory available per"] - #[doc = " block in bytes;"] - #[doc = " - ::totalConstantMemory is the total amount of constant memory available on"] - #[doc = " the device in bytes;"] - #[doc = " - ::SIMDWidth is the warp size;"] - #[doc = " - ::memPitch is the maximum pitch allowed by the memory copy functions that"] - #[doc = " involve memory regions allocated through ::cuMemAllocPitch();"] - #[doc = " - ::regsPerBlock is the total number of registers available per block;"] - #[doc = " - ::clockRate is the clock frequency in kilohertz;"] - #[doc = " - ::textureAlign is the alignment requirement; texture base addresses that"] - #[doc = " are aligned to ::textureAlign bytes do not need an offset applied to"] - #[doc = " texture fetches."] - #[doc = ""] - #[doc = " \\param prop - Returned properties of device"] - #[doc = " \\param dev - Device to get properties for"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuDeviceGetAttribute,"] - #[doc = " ::cuDeviceGetCount,"] - #[doc = " ::cuDeviceGetName,"] - #[doc = " ::cuDeviceGetUuid,"] - #[doc = " ::cuDeviceGet,"] - #[doc = " ::cuDeviceTotalMem"] pub fn cuDeviceGetProperties( prop: *mut CUdevprop, dev: CUdevice, ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns the compute capability of the device"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " This function was deprecated as of CUDA 5.0 and its functionality superceded"] - #[doc = " by ::cuDeviceGetAttribute()."] - #[doc = ""] - #[doc = " Returns in \\p *major and \\p *minor the major and minor revision numbers that"] - #[doc = " define the compute capability of the device \\p dev."] - #[doc = ""] - #[doc = " \\param major - Major revision number"] - #[doc = " \\param minor - Minor revision number"] - #[doc = " \\param dev - Device handle"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuDeviceGetAttribute,"] - #[doc = " ::cuDeviceGetCount,"] - #[doc = " ::cuDeviceGetName,"] - #[doc = " ::cuDeviceGetUuid,"] - #[doc = " ::cuDeviceGet,"] - #[doc = " ::cuDeviceTotalMem"] pub fn cuDeviceComputeCapability( major: *mut ::std::os::raw::c_int, minor: *mut ::std::os::raw::c_int, @@ -2649,179 +1280,21 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Retain the primary context on the GPU"] - #[doc = ""] - #[doc = " Retains the primary context on the device, creating it if necessary,"] - #[doc = " increasing its usage count. The caller must call"] - #[doc = " ::cuDevicePrimaryCtxRelease() when done using the context."] - #[doc = " Unlike ::cuCtxCreate() the newly created context is not pushed onto the stack."] - #[doc = ""] - #[doc = " Context creation will fail with ::CUDA_ERROR_UNKNOWN if the compute mode of"] - #[doc = " the device is ::CU_COMPUTEMODE_PROHIBITED. The function ::cuDeviceGetAttribute()"] - #[doc = " can be used with ::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to determine the compute mode"] - #[doc = " of the device."] - #[doc = " The nvidia-smi tool can be used to set the compute mode for"] - #[doc = " devices. Documentation for nvidia-smi can be obtained by passing a"] - #[doc = " -h option to it."] - #[doc = ""] - #[doc = " Please note that the primary context always supports pinned allocations. Other"] - #[doc = " flags can be specified by ::cuDevicePrimaryCtxSetFlags()."] - #[doc = ""] - #[doc = " \\param pctx - Returned context handle of the new context"] - #[doc = " \\param dev - Device for which primary context is requested"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY,"] - #[doc = " ::CUDA_ERROR_UNKNOWN"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuDevicePrimaryCtxRelease,"] - #[doc = " ::cuDevicePrimaryCtxSetFlags,"] - #[doc = " ::cuCtxCreate,"] - #[doc = " ::cuCtxGetApiVersion,"] - #[doc = " ::cuCtxGetCacheConfig,"] - #[doc = " ::cuCtxGetDevice,"] - #[doc = " ::cuCtxGetFlags,"] - #[doc = " ::cuCtxGetLimit,"] - #[doc = " ::cuCtxPopCurrent,"] - #[doc = " ::cuCtxPushCurrent,"] - #[doc = " ::cuCtxSetCacheConfig,"] - #[doc = " ::cuCtxSetLimit,"] - #[doc = " ::cuCtxSynchronize"] pub fn cuDevicePrimaryCtxRetain( pctx: *mut CUcontext, dev: CUdevice, ) -> CUresult; } extern "C" { - #[doc = " \\brief Release the primary context on the GPU"] - #[doc = ""] - #[doc = " Releases the primary context interop on the device by decreasing the usage"] - #[doc = " count by 1. If the usage drops to 0 the primary context of device \\p dev"] - #[doc = " will be destroyed regardless of how many threads it is current to."] - #[doc = ""] - #[doc = " Please note that unlike ::cuCtxDestroy() this method does not pop the context"] - #[doc = " from stack in any circumstances."] - #[doc = ""] - #[doc = " \\param dev - Device which primary context is released"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuDevicePrimaryCtxRetain,"] - #[doc = " ::cuCtxDestroy,"] - #[doc = " ::cuCtxGetApiVersion,"] - #[doc = " ::cuCtxGetCacheConfig,"] - #[doc = " ::cuCtxGetDevice,"] - #[doc = " ::cuCtxGetFlags,"] - #[doc = " ::cuCtxGetLimit,"] - #[doc = " ::cuCtxPopCurrent,"] - #[doc = " ::cuCtxPushCurrent,"] - #[doc = " ::cuCtxSetCacheConfig,"] - #[doc = " ::cuCtxSetLimit,"] - #[doc = " ::cuCtxSynchronize"] pub fn cuDevicePrimaryCtxRelease(dev: CUdevice) -> CUresult; } extern "C" { - #[doc = " \\brief Set flags for the primary context"] - #[doc = ""] - #[doc = " Sets the flags for the primary context on the device overwriting perviously"] - #[doc = " set ones. If the primary context is already created"] - #[doc = " ::CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE is returned."] - #[doc = ""] - #[doc = " The three LSBs of the \\p flags parameter can be used to control how the OS"] - #[doc = " thread, which owns the CUDA context at the time of an API call, interacts"] - #[doc = " with the OS scheduler when waiting for results from the GPU. Only one of"] - #[doc = " the scheduling flags can be set when creating a context."] - #[doc = ""] - #[doc = " - ::CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when waiting for"] - #[doc = " results from the GPU. This can decrease latency when waiting for the GPU,"] - #[doc = " but may lower the performance of CPU threads if they are performing work in"] - #[doc = " parallel with the CUDA thread."] - #[doc = ""] - #[doc = " - ::CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread when waiting for"] - #[doc = " results from the GPU. This can increase latency when waiting for the GPU,"] - #[doc = " but can increase the performance of CPU threads performing work in parallel"] - #[doc = " with the GPU."] - #[doc = ""] - #[doc = " - ::CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a"] - #[doc = " synchronization primitive when waiting for the GPU to finish work."] - #[doc = ""] - #[doc = " - ::CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a"] - #[doc = " synchronization primitive when waiting for the GPU to finish work.
"] - #[doc = " Deprecated: This flag was deprecated as of CUDA 4.0 and was"] - #[doc = " replaced with ::CU_CTX_SCHED_BLOCKING_SYNC."] - #[doc = ""] - #[doc = " - ::CU_CTX_SCHED_AUTO: The default value if the \\p flags parameter is zero,"] - #[doc = " uses a heuristic based on the number of active CUDA contexts in the"] - #[doc = " process \\e C and the number of logical processors in the system \\e P. If"] - #[doc = " \\e C > \\e P, then CUDA will yield to other OS threads when waiting for"] - #[doc = " the GPU (::CU_CTX_SCHED_YIELD), otherwise CUDA will not yield while"] - #[doc = " waiting for results and actively spin on the processor (::CU_CTX_SCHED_SPIN)."] - #[doc = " Additionally, on Tegra devices, ::CU_CTX_SCHED_AUTO uses a heuristic based on"] - #[doc = " the power profile of the platform and may choose ::CU_CTX_SCHED_BLOCKING_SYNC"] - #[doc = " for low-powered devices."] - #[doc = ""] - #[doc = " - ::CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce local memory"] - #[doc = " after resizing local memory for a kernel. This can prevent thrashing by"] - #[doc = " local memory allocations when launching many kernels with high local"] - #[doc = " memory usage at the cost of potentially increased memory usage."] - #[doc = ""] - #[doc = " \\param dev - Device for which the primary context flags are set"] - #[doc = " \\param flags - New flags for the device"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuDevicePrimaryCtxRetain,"] - #[doc = " ::cuDevicePrimaryCtxGetState,"] - #[doc = " ::cuCtxCreate,"] - #[doc = " ::cuCtxGetFlags,"] - #[doc = " ::cudaSetDeviceFlags"] pub fn cuDevicePrimaryCtxSetFlags( dev: CUdevice, flags: ::std::os::raw::c_uint, ) -> CUresult; } extern "C" { - #[doc = " \\brief Get the state of the primary context"] - #[doc = ""] - #[doc = " Returns in \\p *flags the flags for the primary context of \\p dev, and in"] - #[doc = " \\p *active whether it is active. See ::cuDevicePrimaryCtxSetFlags for flag"] - #[doc = " values."] - #[doc = ""] - #[doc = " \\param dev - Device to get primary context flags for"] - #[doc = " \\param flags - Pointer to store flags"] - #[doc = " \\param active - Pointer to store context state; 0 = inactive, 1 = active"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuDevicePrimaryCtxSetFlags,"] - #[doc = " ::cuCtxGetFlags,"] - #[doc = " ::cudaGetDeviceFlags"] pub fn cuDevicePrimaryCtxGetState( dev: CUdevice, flags: *mut ::std::os::raw::c_uint, @@ -2829,40 +1302,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Destroy all allocations and reset all state on the primary context"] - #[doc = ""] - #[doc = " Explicitly destroys and cleans up all resources associated with the current"] - #[doc = " device in the current process."] - #[doc = ""] - #[doc = " Note that it is responsibility of the calling function to ensure that no"] - #[doc = " other module in the process is using the device any more. For that reason"] - #[doc = " it is recommended to use ::cuDevicePrimaryCtxRelease() in most cases."] - #[doc = " However it is safe for other modules to call ::cuDevicePrimaryCtxRelease()"] - #[doc = " even after resetting the device."] - #[doc = ""] - #[doc = " \\param dev - Device for which primary context is destroyed"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE,"] - #[doc = " ::CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuDevicePrimaryCtxRetain,"] - #[doc = " ::cuDevicePrimaryCtxRelease,"] - #[doc = " ::cuCtxGetApiVersion,"] - #[doc = " ::cuCtxGetCacheConfig,"] - #[doc = " ::cuCtxGetDevice,"] - #[doc = " ::cuCtxGetFlags,"] - #[doc = " ::cuCtxGetLimit,"] - #[doc = " ::cuCtxPopCurrent,"] - #[doc = " ::cuCtxPushCurrent,"] - #[doc = " ::cuCtxSetCacheConfig,"] - #[doc = " ::cuCtxSetLimit,"] - #[doc = " ::cuCtxSynchronize,"] - #[doc = " ::cudaDeviceReset"] pub fn cuDevicePrimaryCtxReset(dev: CUdevice) -> CUresult; } extern "C" { @@ -2882,739 +1321,72 @@ extern "C" { pub fn cuCtxPopCurrent_v2(pctx: *mut CUcontext) -> CUresult; } extern "C" { - #[doc = " \\brief Binds the specified CUDA context to the calling CPU thread"] - #[doc = ""] - #[doc = " Binds the specified CUDA context to the calling CPU thread."] - #[doc = " If \\p ctx is NULL then the CUDA context previously bound to the"] - #[doc = " calling CPU thread is unbound and ::CUDA_SUCCESS is returned."] - #[doc = ""] - #[doc = " If there exists a CUDA context stack on the calling CPU thread, this"] - #[doc = " will replace the top of that stack with \\p ctx."] - #[doc = " If \\p ctx is NULL then this will be equivalent to popping the top"] - #[doc = " of the calling CPU thread's CUDA context stack (or a no-op if the"] - #[doc = " calling CPU thread's CUDA context stack is empty)."] - #[doc = ""] - #[doc = " \\param ctx - Context to bind to the calling CPU thread"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuCtxGetCurrent,"] - #[doc = " ::cuCtxCreate,"] - #[doc = " ::cuCtxDestroy,"] - #[doc = " ::cudaSetDevice"] pub fn cuCtxSetCurrent(ctx: CUcontext) -> CUresult; } extern "C" { - #[doc = " \\brief Returns the CUDA context bound to the calling CPU thread."] - #[doc = ""] - #[doc = " Returns in \\p *pctx the CUDA context bound to the calling CPU thread."] - #[doc = " If no context is bound to the calling CPU thread then \\p *pctx is"] - #[doc = " set to NULL and ::CUDA_SUCCESS is returned."] - #[doc = ""] - #[doc = " \\param pctx - Returned context handle"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuCtxSetCurrent,"] - #[doc = " ::cuCtxCreate,"] - #[doc = " ::cuCtxDestroy,"] - #[doc = " ::cudaGetDevice"] pub fn cuCtxGetCurrent(pctx: *mut CUcontext) -> CUresult; } extern "C" { - #[doc = " \\brief Returns the device ID for the current context"] - #[doc = ""] - #[doc = " Returns in \\p *device the ordinal of the current context's device."] - #[doc = ""] - #[doc = " \\param device - Returned device ID for the current context"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuCtxCreate,"] - #[doc = " ::cuCtxDestroy,"] - #[doc = " ::cuCtxGetApiVersion,"] - #[doc = " ::cuCtxGetCacheConfig,"] - #[doc = " ::cuCtxGetFlags,"] - #[doc = " ::cuCtxGetLimit,"] - #[doc = " ::cuCtxPopCurrent,"] - #[doc = " ::cuCtxPushCurrent,"] - #[doc = " ::cuCtxSetCacheConfig,"] - #[doc = " ::cuCtxSetLimit,"] - #[doc = " ::cuCtxSynchronize,"] - #[doc = " ::cudaGetDevice"] pub fn cuCtxGetDevice(device: *mut CUdevice) -> CUresult; } extern "C" { - #[doc = " \\brief Returns the flags for the current context"] - #[doc = ""] - #[doc = " Returns in \\p *flags the flags of the current context. See ::cuCtxCreate"] - #[doc = " for flag values."] - #[doc = ""] - #[doc = " \\param flags - Pointer to store flags of current context"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuCtxCreate,"] - #[doc = " ::cuCtxGetApiVersion,"] - #[doc = " ::cuCtxGetCacheConfig,"] - #[doc = " ::cuCtxGetCurrent,"] - #[doc = " ::cuCtxGetDevice"] - #[doc = " ::cuCtxGetLimit,"] - #[doc = " ::cuCtxGetSharedMemConfig,"] - #[doc = " ::cuCtxGetStreamPriorityRange,"] - #[doc = " ::cudaGetDeviceFlags"] pub fn cuCtxGetFlags(flags: *mut ::std::os::raw::c_uint) -> CUresult; } extern "C" { - #[doc = " \\brief Block for a context's tasks to complete"] - #[doc = ""] - #[doc = " Blocks until the device has completed all preceding requested tasks."] - #[doc = " ::cuCtxSynchronize() returns an error if one of the preceding tasks failed."] - #[doc = " If the context was created with the ::CU_CTX_SCHED_BLOCKING_SYNC flag, the"] - #[doc = " CPU thread will block until the GPU context has finished its work."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuCtxCreate,"] - #[doc = " ::cuCtxDestroy,"] - #[doc = " ::cuCtxGetApiVersion,"] - #[doc = " ::cuCtxGetCacheConfig,"] - #[doc = " ::cuCtxGetDevice,"] - #[doc = " ::cuCtxGetFlags,"] - #[doc = " ::cuCtxGetLimit,"] - #[doc = " ::cuCtxPopCurrent,"] - #[doc = " ::cuCtxPushCurrent,"] - #[doc = " ::cuCtxSetCacheConfig,"] - #[doc = " ::cuCtxSetLimit,"] - #[doc = " ::cudaDeviceSynchronize"] pub fn cuCtxSynchronize() -> CUresult; } extern "C" { - #[doc = " \\brief Set resource limits"] - #[doc = ""] - #[doc = " Setting \\p limit to \\p value is a request by the application to update"] - #[doc = " the current limit maintained by the context. The driver is free to"] - #[doc = " modify the requested value to meet h/w requirements (this could be"] - #[doc = " clamping to minimum or maximum values, rounding up to nearest element"] - #[doc = " size, etc). The application can use ::cuCtxGetLimit() to find out exactly"] - #[doc = " what the limit has been set to."] - #[doc = ""] - #[doc = " Setting each ::CUlimit has its own specific restrictions, so each is"] - #[doc = " discussed here."] - #[doc = ""] - #[doc = " - ::CU_LIMIT_STACK_SIZE controls the stack size in bytes of each GPU thread."] - #[doc = " Note that the CUDA driver will set the \\p limit to the maximum of \\p value"] - #[doc = " and what the kernel function requires."] - #[doc = ""] - #[doc = " - ::CU_LIMIT_PRINTF_FIFO_SIZE controls the size in bytes of the FIFO used"] - #[doc = " by the ::printf() device system call. Setting ::CU_LIMIT_PRINTF_FIFO_SIZE"] - #[doc = " must be performed before launching any kernel that uses the ::printf()"] - #[doc = " device system call, otherwise ::CUDA_ERROR_INVALID_VALUE will be returned."] - #[doc = ""] - #[doc = " - ::CU_LIMIT_MALLOC_HEAP_SIZE controls the size in bytes of the heap used"] - #[doc = " by the ::malloc() and ::free() device system calls. Setting"] - #[doc = " ::CU_LIMIT_MALLOC_HEAP_SIZE must be performed before launching any kernel"] - #[doc = " that uses the ::malloc() or ::free() device system calls, otherwise"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE will be returned."] - #[doc = ""] - #[doc = " - ::CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH controls the maximum nesting depth of"] - #[doc = " a grid at which a thread can safely call ::cudaDeviceSynchronize(). Setting"] - #[doc = " this limit must be performed before any launch of a kernel that uses the"] - #[doc = " device runtime and calls ::cudaDeviceSynchronize() above the default sync"] - #[doc = " depth, two levels of grids. Calls to ::cudaDeviceSynchronize() will fail"] - #[doc = " with error code ::cudaErrorSyncDepthExceeded if the limitation is"] - #[doc = " violated. This limit can be set smaller than the default or up the maximum"] - #[doc = " launch depth of 24. When setting this limit, keep in mind that additional"] - #[doc = " levels of sync depth require the driver to reserve large amounts of device"] - #[doc = " memory which can no longer be used for user allocations. If these"] - #[doc = " reservations of device memory fail, ::cuCtxSetLimit will return"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY, and the limit can be reset to a lower value."] - #[doc = " This limit is only applicable to devices of compute capability 3.5 and"] - #[doc = " higher. Attempting to set this limit on devices of compute capability less"] - #[doc = " than 3.5 will result in the error ::CUDA_ERROR_UNSUPPORTED_LIMIT being"] - #[doc = " returned."] - #[doc = ""] - #[doc = " - ::CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT controls the maximum number of"] - #[doc = " outstanding device runtime launches that can be made from the current"] - #[doc = " context. A grid is outstanding from the point of launch up until the grid"] - #[doc = " is known to have been completed. Device runtime launches which violate"] - #[doc = " this limitation fail and return ::cudaErrorLaunchPendingCountExceeded when"] - #[doc = " ::cudaGetLastError() is called after launch. If more pending launches than"] - #[doc = " the default (2048 launches) are needed for a module using the device"] - #[doc = " runtime, this limit can be increased. Keep in mind that being able to"] - #[doc = " sustain additional pending launches will require the driver to reserve"] - #[doc = " larger amounts of device memory upfront which can no longer be used for"] - #[doc = " allocations. If these reservations fail, ::cuCtxSetLimit will return"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY, and the limit can be reset to a lower value."] - #[doc = " This limit is only applicable to devices of compute capability 3.5 and"] - #[doc = " higher. Attempting to set this limit on devices of compute capability less"] - #[doc = " than 3.5 will result in the error ::CUDA_ERROR_UNSUPPORTED_LIMIT being"] - #[doc = " returned."] - #[doc = ""] - #[doc = " - ::CU_LIMIT_MAX_L2_FETCH_GRANULARITY controls the L2 cache fetch granularity."] - #[doc = " Values can range from 0B to 128B. This is purely a performance hint and"] - #[doc = " it can be ignored or clamped depending on the platform."] - #[doc = ""] - #[doc = " \\param limit - Limit to set"] - #[doc = " \\param value - Size of limit"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_UNSUPPORTED_LIMIT,"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuCtxCreate,"] - #[doc = " ::cuCtxDestroy,"] - #[doc = " ::cuCtxGetApiVersion,"] - #[doc = " ::cuCtxGetCacheConfig,"] - #[doc = " ::cuCtxGetDevice,"] - #[doc = " ::cuCtxGetFlags,"] - #[doc = " ::cuCtxGetLimit,"] - #[doc = " ::cuCtxPopCurrent,"] - #[doc = " ::cuCtxPushCurrent,"] - #[doc = " ::cuCtxSetCacheConfig,"] - #[doc = " ::cuCtxSynchronize,"] - #[doc = " ::cudaDeviceSetLimit"] pub fn cuCtxSetLimit(limit: CUlimit, value: usize) -> CUresult; } extern "C" { - #[doc = " \\brief Returns resource limits"] - #[doc = ""] - #[doc = " Returns in \\p *pvalue the current size of \\p limit. The supported"] - #[doc = " ::CUlimit values are:"] - #[doc = " - ::CU_LIMIT_STACK_SIZE: stack size in bytes of each GPU thread."] - #[doc = " - ::CU_LIMIT_PRINTF_FIFO_SIZE: size in bytes of the FIFO used by the"] - #[doc = " ::printf() device system call."] - #[doc = " - ::CU_LIMIT_MALLOC_HEAP_SIZE: size in bytes of the heap used by the"] - #[doc = " ::malloc() and ::free() device system calls."] - #[doc = " - ::CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH: maximum grid depth at which a thread"] - #[doc = " can issue the device runtime call ::cudaDeviceSynchronize() to wait on"] - #[doc = " child grid launches to complete."] - #[doc = " - ::CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT: maximum number of outstanding"] - #[doc = " device runtime launches that can be made from this context."] - #[doc = " - ::CU_LIMIT_MAX_L2_FETCH_GRANULARITY: L2 cache fetch granularity."] - #[doc = ""] - #[doc = " \\param limit - Limit to query"] - #[doc = " \\param pvalue - Returned size of limit"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_UNSUPPORTED_LIMIT"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuCtxCreate,"] - #[doc = " ::cuCtxDestroy,"] - #[doc = " ::cuCtxGetApiVersion,"] - #[doc = " ::cuCtxGetCacheConfig,"] - #[doc = " ::cuCtxGetDevice,"] - #[doc = " ::cuCtxGetFlags,"] - #[doc = " ::cuCtxPopCurrent,"] - #[doc = " ::cuCtxPushCurrent,"] - #[doc = " ::cuCtxSetCacheConfig,"] - #[doc = " ::cuCtxSetLimit,"] - #[doc = " ::cuCtxSynchronize,"] - #[doc = " ::cudaDeviceGetLimit"] pub fn cuCtxGetLimit(pvalue: *mut usize, limit: CUlimit) -> CUresult; } extern "C" { - #[doc = " \\brief Returns the preferred cache configuration for the current context."] - #[doc = ""] - #[doc = " On devices where the L1 cache and shared memory use the same hardware"] - #[doc = " resources, this function returns through \\p pconfig the preferred cache configuration"] - #[doc = " for the current context. This is only a preference. The driver will use"] - #[doc = " the requested configuration if possible, but it is free to choose a different"] - #[doc = " configuration if required to execute functions."] - #[doc = ""] - #[doc = " This will return a \\p pconfig of ::CU_FUNC_CACHE_PREFER_NONE on devices"] - #[doc = " where the size of the L1 cache and shared memory are fixed."] - #[doc = ""] - #[doc = " The supported cache configurations are:"] - #[doc = " - ::CU_FUNC_CACHE_PREFER_NONE: no preference for shared memory or L1 (default)"] - #[doc = " - ::CU_FUNC_CACHE_PREFER_SHARED: prefer larger shared memory and smaller L1 cache"] - #[doc = " - ::CU_FUNC_CACHE_PREFER_L1: prefer larger L1 cache and smaller shared memory"] - #[doc = " - ::CU_FUNC_CACHE_PREFER_EQUAL: prefer equal sized L1 cache and shared memory"] - #[doc = ""] - #[doc = " \\param pconfig - Returned cache configuration"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuCtxCreate,"] - #[doc = " ::cuCtxDestroy,"] - #[doc = " ::cuCtxGetApiVersion,"] - #[doc = " ::cuCtxGetDevice,"] - #[doc = " ::cuCtxGetFlags,"] - #[doc = " ::cuCtxGetLimit,"] - #[doc = " ::cuCtxPopCurrent,"] - #[doc = " ::cuCtxPushCurrent,"] - #[doc = " ::cuCtxSetCacheConfig,"] - #[doc = " ::cuCtxSetLimit,"] - #[doc = " ::cuCtxSynchronize,"] - #[doc = " ::cuFuncSetCacheConfig,"] - #[doc = " ::cudaDeviceGetCacheConfig"] pub fn cuCtxGetCacheConfig(pconfig: *mut CUfunc_cache) -> CUresult; } extern "C" { - #[doc = " \\brief Sets the preferred cache configuration for the current context."] - #[doc = ""] - #[doc = " On devices where the L1 cache and shared memory use the same hardware"] - #[doc = " resources, this sets through \\p config the preferred cache configuration for"] - #[doc = " the current context. This is only a preference. The driver will use"] - #[doc = " the requested configuration if possible, but it is free to choose a different"] - #[doc = " configuration if required to execute the function. Any function preference"] - #[doc = " set via ::cuFuncSetCacheConfig() will be preferred over this context-wide"] - #[doc = " setting. Setting the context-wide cache configuration to"] - #[doc = " ::CU_FUNC_CACHE_PREFER_NONE will cause subsequent kernel launches to prefer"] - #[doc = " to not change the cache configuration unless required to launch the kernel."] - #[doc = ""] - #[doc = " This setting does nothing on devices where the size of the L1 cache and"] - #[doc = " shared memory are fixed."] - #[doc = ""] - #[doc = " Launching a kernel with a different preference than the most recent"] - #[doc = " preference setting may insert a device-side synchronization point."] - #[doc = ""] - #[doc = " The supported cache configurations are:"] - #[doc = " - ::CU_FUNC_CACHE_PREFER_NONE: no preference for shared memory or L1 (default)"] - #[doc = " - ::CU_FUNC_CACHE_PREFER_SHARED: prefer larger shared memory and smaller L1 cache"] - #[doc = " - ::CU_FUNC_CACHE_PREFER_L1: prefer larger L1 cache and smaller shared memory"] - #[doc = " - ::CU_FUNC_CACHE_PREFER_EQUAL: prefer equal sized L1 cache and shared memory"] - #[doc = ""] - #[doc = " \\param config - Requested cache configuration"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuCtxCreate,"] - #[doc = " ::cuCtxDestroy,"] - #[doc = " ::cuCtxGetApiVersion,"] - #[doc = " ::cuCtxGetCacheConfig,"] - #[doc = " ::cuCtxGetDevice,"] - #[doc = " ::cuCtxGetFlags,"] - #[doc = " ::cuCtxGetLimit,"] - #[doc = " ::cuCtxPopCurrent,"] - #[doc = " ::cuCtxPushCurrent,"] - #[doc = " ::cuCtxSetLimit,"] - #[doc = " ::cuCtxSynchronize,"] - #[doc = " ::cuFuncSetCacheConfig,"] - #[doc = " ::cudaDeviceSetCacheConfig"] pub fn cuCtxSetCacheConfig(config: CUfunc_cache) -> CUresult; } extern "C" { - #[doc = " \\brief Returns the current shared memory configuration for the current context."] - #[doc = ""] - #[doc = " This function will return in \\p pConfig the current size of shared memory banks"] - #[doc = " in the current context. On devices with configurable shared memory banks,"] - #[doc = " ::cuCtxSetSharedMemConfig can be used to change this setting, so that all"] - #[doc = " subsequent kernel launches will by default use the new bank size. When"] - #[doc = " ::cuCtxGetSharedMemConfig is called on devices without configurable shared"] - #[doc = " memory, it will return the fixed bank size of the hardware."] - #[doc = ""] - #[doc = " The returned bank configurations can be either:"] - #[doc = " - ::CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: shared memory bank width is"] - #[doc = " four bytes."] - #[doc = " - ::CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: shared memory bank width will"] - #[doc = " eight bytes."] - #[doc = ""] - #[doc = " \\param pConfig - returned shared memory configuration"] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuCtxCreate,"] - #[doc = " ::cuCtxDestroy,"] - #[doc = " ::cuCtxGetApiVersion,"] - #[doc = " ::cuCtxGetCacheConfig,"] - #[doc = " ::cuCtxGetDevice,"] - #[doc = " ::cuCtxGetFlags,"] - #[doc = " ::cuCtxGetLimit,"] - #[doc = " ::cuCtxPopCurrent,"] - #[doc = " ::cuCtxPushCurrent,"] - #[doc = " ::cuCtxSetLimit,"] - #[doc = " ::cuCtxSynchronize,"] - #[doc = " ::cuCtxGetSharedMemConfig,"] - #[doc = " ::cuFuncSetCacheConfig,"] - #[doc = " ::cudaDeviceGetSharedMemConfig"] pub fn cuCtxGetSharedMemConfig(pConfig: *mut CUsharedconfig) -> CUresult; } extern "C" { - #[doc = " \\brief Sets the shared memory configuration for the current context."] - #[doc = ""] - #[doc = " On devices with configurable shared memory banks, this function will set"] - #[doc = " the context's shared memory bank size which is used for subsequent kernel"] - #[doc = " launches."] - #[doc = ""] - #[doc = " Changed the shared memory configuration between launches may insert a device"] - #[doc = " side synchronization point between those launches."] - #[doc = ""] - #[doc = " Changing the shared memory bank size will not increase shared memory usage"] - #[doc = " or affect occupancy of kernels, but may have major effects on performance."] - #[doc = " Larger bank sizes will allow for greater potential bandwidth to shared memory,"] - #[doc = " but will change what kinds of accesses to shared memory will result in bank"] - #[doc = " conflicts."] - #[doc = ""] - #[doc = " This function will do nothing on devices with fixed shared memory bank size."] - #[doc = ""] - #[doc = " The supported bank configurations are:"] - #[doc = " - ::CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE: set bank width to the default initial"] - #[doc = " setting (currently, four bytes)."] - #[doc = " - ::CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: set shared memory bank width to"] - #[doc = " be natively four bytes."] - #[doc = " - ::CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: set shared memory bank width to"] - #[doc = " be natively eight bytes."] - #[doc = ""] - #[doc = " \\param config - requested shared memory configuration"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuCtxCreate,"] - #[doc = " ::cuCtxDestroy,"] - #[doc = " ::cuCtxGetApiVersion,"] - #[doc = " ::cuCtxGetCacheConfig,"] - #[doc = " ::cuCtxGetDevice,"] - #[doc = " ::cuCtxGetFlags,"] - #[doc = " ::cuCtxGetLimit,"] - #[doc = " ::cuCtxPopCurrent,"] - #[doc = " ::cuCtxPushCurrent,"] - #[doc = " ::cuCtxSetLimit,"] - #[doc = " ::cuCtxSynchronize,"] - #[doc = " ::cuCtxGetSharedMemConfig,"] - #[doc = " ::cuFuncSetCacheConfig,"] - #[doc = " ::cudaDeviceSetSharedMemConfig"] pub fn cuCtxSetSharedMemConfig(config: CUsharedconfig) -> CUresult; } extern "C" { - #[doc = " \\brief Gets the context's API version."] - #[doc = ""] - #[doc = " Returns a version number in \\p version corresponding to the capabilities of"] - #[doc = " the context (e.g. 3010 or 3020), which library developers can use to direct"] - #[doc = " callers to a specific API version. If \\p ctx is NULL, returns the API version"] - #[doc = " used to create the currently bound context."] - #[doc = ""] - #[doc = " Note that new API versions are only introduced when context capabilities are"] - #[doc = " changed that break binary compatibility, so the API version and driver version"] - #[doc = " may be different. For example, it is valid for the API version to be 3020 while"] - #[doc = " the driver version is 4020."] - #[doc = ""] - #[doc = " \\param ctx - Context to check"] - #[doc = " \\param version - Pointer to version"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_UNKNOWN"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuCtxCreate,"] - #[doc = " ::cuCtxDestroy,"] - #[doc = " ::cuCtxGetDevice,"] - #[doc = " ::cuCtxGetFlags,"] - #[doc = " ::cuCtxGetLimit,"] - #[doc = " ::cuCtxPopCurrent,"] - #[doc = " ::cuCtxPushCurrent,"] - #[doc = " ::cuCtxSetCacheConfig,"] - #[doc = " ::cuCtxSetLimit,"] - #[doc = " ::cuCtxSynchronize"] pub fn cuCtxGetApiVersion( ctx: CUcontext, version: *mut ::std::os::raw::c_uint, ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns numerical values that correspond to the least and"] - #[doc = " greatest stream priorities."] - #[doc = ""] - #[doc = " Returns in \\p *leastPriority and \\p *greatestPriority the numerical values that correspond"] - #[doc = " to the least and greatest stream priorities respectively. Stream priorities"] - #[doc = " follow a convention where lower numbers imply greater priorities. The range of"] - #[doc = " meaningful stream priorities is given by [\\p *greatestPriority, \\p *leastPriority]."] - #[doc = " If the user attempts to create a stream with a priority value that is"] - #[doc = " outside the meaningful range as specified by this API, the priority is"] - #[doc = " automatically clamped down or up to either \\p *leastPriority or \\p *greatestPriority"] - #[doc = " respectively. See ::cuStreamCreateWithPriority for details on creating a"] - #[doc = " priority stream."] - #[doc = " A NULL may be passed in for \\p *leastPriority or \\p *greatestPriority if the value"] - #[doc = " is not desired."] - #[doc = ""] - #[doc = " This function will return '0' in both \\p *leastPriority and \\p *greatestPriority if"] - #[doc = " the current context's device does not support stream priorities"] - #[doc = " (see ::cuDeviceGetAttribute)."] - #[doc = ""] - #[doc = " \\param leastPriority - Pointer to an int in which the numerical value for least"] - #[doc = " stream priority is returned"] - #[doc = " \\param greatestPriority - Pointer to an int in which the numerical value for greatest"] - #[doc = " stream priority is returned"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuStreamCreateWithPriority,"] - #[doc = " ::cuStreamGetPriority,"] - #[doc = " ::cuCtxGetDevice,"] - #[doc = " ::cuCtxGetFlags,"] - #[doc = " ::cuCtxSetLimit,"] - #[doc = " ::cuCtxSynchronize,"] - #[doc = " ::cudaDeviceGetStreamPriorityRange"] pub fn cuCtxGetStreamPriorityRange( leastPriority: *mut ::std::os::raw::c_int, greatestPriority: *mut ::std::os::raw::c_int, ) -> CUresult; } extern "C" { - #[doc = " \\brief Increment a context's usage-count"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Note that this function is deprecated and should not be used."] - #[doc = ""] - #[doc = " Increments the usage count of the context and passes back a context handle"] - #[doc = " in \\p *pctx that must be passed to ::cuCtxDetach() when the application is"] - #[doc = " done with the context. ::cuCtxAttach() fails if there is no context current"] - #[doc = " to the thread."] - #[doc = ""] - #[doc = " Currently, the \\p flags parameter must be 0."] - #[doc = ""] - #[doc = " \\param pctx - Returned context handle of the current context"] - #[doc = " \\param flags - Context attach flags (must be 0)"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuCtxCreate,"] - #[doc = " ::cuCtxDestroy,"] - #[doc = " ::cuCtxDetach,"] - #[doc = " ::cuCtxGetApiVersion,"] - #[doc = " ::cuCtxGetCacheConfig,"] - #[doc = " ::cuCtxGetDevice,"] - #[doc = " ::cuCtxGetFlags,"] - #[doc = " ::cuCtxGetLimit,"] - #[doc = " ::cuCtxPopCurrent,"] - #[doc = " ::cuCtxPushCurrent,"] - #[doc = " ::cuCtxSetCacheConfig,"] - #[doc = " ::cuCtxSetLimit,"] - #[doc = " ::cuCtxSynchronize"] pub fn cuCtxAttach( pctx: *mut CUcontext, flags: ::std::os::raw::c_uint, ) -> CUresult; } extern "C" { - #[doc = " \\brief Decrement a context's usage-count"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Note that this function is deprecated and should not be used."] - #[doc = ""] - #[doc = " Decrements the usage count of the context \\p ctx, and destroys the context"] - #[doc = " if the usage count goes to 0. The context must be a handle that was passed"] - #[doc = " back by ::cuCtxCreate() or ::cuCtxAttach(), and must be current to the"] - #[doc = " calling thread."] - #[doc = ""] - #[doc = " \\param ctx - Context to destroy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuCtxCreate,"] - #[doc = " ::cuCtxDestroy,"] - #[doc = " ::cuCtxGetApiVersion,"] - #[doc = " ::cuCtxGetCacheConfig,"] - #[doc = " ::cuCtxGetDevice,"] - #[doc = " ::cuCtxGetFlags,"] - #[doc = " ::cuCtxGetLimit,"] - #[doc = " ::cuCtxPopCurrent,"] - #[doc = " ::cuCtxPushCurrent,"] - #[doc = " ::cuCtxSetCacheConfig,"] - #[doc = " ::cuCtxSetLimit,"] - #[doc = " ::cuCtxSynchronize"] pub fn cuCtxDetach(ctx: CUcontext) -> CUresult; } extern "C" { - #[doc = " \\brief Loads a compute module"] - #[doc = ""] - #[doc = " Takes a filename \\p fname and loads the corresponding module \\p module into"] - #[doc = " the current context. The CUDA driver API does not attempt to lazily"] - #[doc = " allocate the resources needed by a module; if the memory for functions and"] - #[doc = " data (constant and global) needed by the module cannot be allocated,"] - #[doc = " ::cuModuleLoad() fails. The file should be a \\e cubin file as output by"] - #[doc = " \\b nvcc, or a \\e PTX file either as output by \\b nvcc or handwritten, or"] - #[doc = " a \\e fatbin file as output by \\b nvcc from toolchain 4.0 or later."] - #[doc = ""] - #[doc = " \\param module - Returned module"] - #[doc = " \\param fname - Filename of module to load"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_PTX,"] - #[doc = " ::CUDA_ERROR_NOT_FOUND,"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY,"] - #[doc = " ::CUDA_ERROR_FILE_NOT_FOUND,"] - #[doc = " ::CUDA_ERROR_NO_BINARY_FOR_GPU,"] - #[doc = " ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND,"] - #[doc = " ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED,"] - #[doc = " ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuModuleGetFunction,"] - #[doc = " ::cuModuleGetGlobal,"] - #[doc = " ::cuModuleGetTexRef,"] - #[doc = " ::cuModuleLoadData,"] - #[doc = " ::cuModuleLoadDataEx,"] - #[doc = " ::cuModuleLoadFatBinary,"] - #[doc = " ::cuModuleUnload"] pub fn cuModuleLoad( module: *mut CUmodule, fname: *const ::std::os::raw::c_char, ) -> CUresult; } extern "C" { - #[doc = " \\brief Load a module's data"] - #[doc = ""] - #[doc = " Takes a pointer \\p image and loads the corresponding module \\p module into"] - #[doc = " the current context. The pointer may be obtained by mapping a \\e cubin or"] - #[doc = " \\e PTX or \\e fatbin file, passing a \\e cubin or \\e PTX or \\e fatbin file"] - #[doc = " as a NULL-terminated text string, or incorporating a \\e cubin or \\e fatbin"] - #[doc = " object into the executable resources and using operating system calls such"] - #[doc = " as Windows \\c FindResource() to obtain the pointer."] - #[doc = ""] - #[doc = " \\param module - Returned module"] - #[doc = " \\param image - Module data to load"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_PTX,"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY,"] - #[doc = " ::CUDA_ERROR_NO_BINARY_FOR_GPU,"] - #[doc = " ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND,"] - #[doc = " ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED,"] - #[doc = " ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuModuleGetFunction,"] - #[doc = " ::cuModuleGetGlobal,"] - #[doc = " ::cuModuleGetTexRef,"] - #[doc = " ::cuModuleLoad,"] - #[doc = " ::cuModuleLoadDataEx,"] - #[doc = " ::cuModuleLoadFatBinary,"] - #[doc = " ::cuModuleUnload"] pub fn cuModuleLoadData( module: *mut CUmodule, image: *const ::std::os::raw::c_void, ) -> CUresult; } extern "C" { - #[doc = " \\brief Load a module's data with options"] - #[doc = ""] - #[doc = " Takes a pointer \\p image and loads the corresponding module \\p module into"] - #[doc = " the current context. The pointer may be obtained by mapping a \\e cubin or"] - #[doc = " \\e PTX or \\e fatbin file, passing a \\e cubin or \\e PTX or \\e fatbin file"] - #[doc = " as a NULL-terminated text string, or incorporating a \\e cubin or \\e fatbin"] - #[doc = " object into the executable resources and using operating system calls such"] - #[doc = " as Windows \\c FindResource() to obtain the pointer. Options are passed as"] - #[doc = " an array via \\p options and any corresponding parameters are passed in"] - #[doc = " \\p optionValues. The number of total options is supplied via \\p numOptions."] - #[doc = " Any outputs will be returned via \\p optionValues."] - #[doc = ""] - #[doc = " \\param module - Returned module"] - #[doc = " \\param image - Module data to load"] - #[doc = " \\param numOptions - Number of options"] - #[doc = " \\param options - Options for JIT"] - #[doc = " \\param optionValues - Option values for JIT"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_PTX,"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY,"] - #[doc = " ::CUDA_ERROR_NO_BINARY_FOR_GPU,"] - #[doc = " ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND,"] - #[doc = " ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED,"] - #[doc = " ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuModuleGetFunction,"] - #[doc = " ::cuModuleGetGlobal,"] - #[doc = " ::cuModuleGetTexRef,"] - #[doc = " ::cuModuleLoad,"] - #[doc = " ::cuModuleLoadData,"] - #[doc = " ::cuModuleLoadFatBinary,"] - #[doc = " ::cuModuleUnload"] pub fn cuModuleLoadDataEx( module: *mut CUmodule, image: *const ::std::os::raw::c_void, @@ -3624,100 +1396,15 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Load a module's data"] - #[doc = ""] - #[doc = " Takes a pointer \\p fatCubin and loads the corresponding module \\p module"] - #[doc = " into the current context. The pointer represents a fat binary object,"] - #[doc = " which is a collection of different \\e cubin and/or \\e PTX files, all"] - #[doc = " representing the same device code, but compiled and optimized for different"] - #[doc = " architectures."] - #[doc = ""] - #[doc = " Prior to CUDA 4.0, there was no documented API for constructing and using"] - #[doc = " fat binary objects by programmers. Starting with CUDA 4.0, fat binary"] - #[doc = " objects can be constructed by providing the -fatbin option to \\b nvcc."] - #[doc = " More information can be found in the \\b nvcc document."] - #[doc = ""] - #[doc = " \\param module - Returned module"] - #[doc = " \\param fatCubin - Fat binary to load"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_PTX,"] - #[doc = " ::CUDA_ERROR_NOT_FOUND,"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY,"] - #[doc = " ::CUDA_ERROR_NO_BINARY_FOR_GPU,"] - #[doc = " ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND,"] - #[doc = " ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED,"] - #[doc = " ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuModuleGetFunction,"] - #[doc = " ::cuModuleGetGlobal,"] - #[doc = " ::cuModuleGetTexRef,"] - #[doc = " ::cuModuleLoad,"] - #[doc = " ::cuModuleLoadData,"] - #[doc = " ::cuModuleLoadDataEx,"] - #[doc = " ::cuModuleUnload"] pub fn cuModuleLoadFatBinary( module: *mut CUmodule, fatCubin: *const ::std::os::raw::c_void, ) -> CUresult; } extern "C" { - #[doc = " \\brief Unloads a module"] - #[doc = ""] - #[doc = " Unloads a module \\p hmod from the current context."] - #[doc = ""] - #[doc = " \\param hmod - Module to unload"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuModuleGetFunction,"] - #[doc = " ::cuModuleGetGlobal,"] - #[doc = " ::cuModuleGetTexRef,"] - #[doc = " ::cuModuleLoad,"] - #[doc = " ::cuModuleLoadData,"] - #[doc = " ::cuModuleLoadDataEx,"] - #[doc = " ::cuModuleLoadFatBinary"] pub fn cuModuleUnload(hmod: CUmodule) -> CUresult; } extern "C" { - #[doc = " \\brief Returns a function handle"] - #[doc = ""] - #[doc = " Returns in \\p *hfunc the handle of the function of name \\p name located in"] - #[doc = " module \\p hmod. If no function of that name exists, ::cuModuleGetFunction()"] - #[doc = " returns ::CUDA_ERROR_NOT_FOUND."] - #[doc = ""] - #[doc = " \\param hfunc - Returned function handle"] - #[doc = " \\param hmod - Module to retrieve function from"] - #[doc = " \\param name - Name of function to retrieve"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_NOT_FOUND"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuModuleGetGlobal,"] - #[doc = " ::cuModuleGetTexRef,"] - #[doc = " ::cuModuleLoad,"] - #[doc = " ::cuModuleLoadData,"] - #[doc = " ::cuModuleLoadDataEx,"] - #[doc = " ::cuModuleLoadFatBinary,"] - #[doc = " ::cuModuleUnload"] pub fn cuModuleGetFunction( hfunc: *mut CUfunction, hmod: CUmodule, @@ -3733,36 +1420,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns a handle to a texture reference"] - #[doc = ""] - #[doc = " Returns in \\p *pTexRef the handle of the texture reference of name \\p name"] - #[doc = " in the module \\p hmod. If no texture reference of that name exists,"] - #[doc = " ::cuModuleGetTexRef() returns ::CUDA_ERROR_NOT_FOUND. This texture reference"] - #[doc = " handle should not be destroyed, since it will be destroyed when the module"] - #[doc = " is unloaded."] - #[doc = ""] - #[doc = " \\param pTexRef - Returned texture reference"] - #[doc = " \\param hmod - Module to retrieve texture reference from"] - #[doc = " \\param name - Name of texture reference to retrieve"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_NOT_FOUND"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuModuleGetFunction,"] - #[doc = " ::cuModuleGetGlobal,"] - #[doc = " ::cuModuleGetSurfRef,"] - #[doc = " ::cuModuleLoad,"] - #[doc = " ::cuModuleLoadData,"] - #[doc = " ::cuModuleLoadDataEx,"] - #[doc = " ::cuModuleLoadFatBinary,"] - #[doc = " ::cuModuleUnload,"] - #[doc = " ::cudaGetTextureReference"] pub fn cuModuleGetTexRef( pTexRef: *mut CUtexref, hmod: CUmodule, @@ -3770,34 +1427,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns a handle to a surface reference"] - #[doc = ""] - #[doc = " Returns in \\p *pSurfRef the handle of the surface reference of name \\p name"] - #[doc = " in the module \\p hmod. If no surface reference of that name exists,"] - #[doc = " ::cuModuleGetSurfRef() returns ::CUDA_ERROR_NOT_FOUND."] - #[doc = ""] - #[doc = " \\param pSurfRef - Returned surface reference"] - #[doc = " \\param hmod - Module to retrieve surface reference from"] - #[doc = " \\param name - Name of surface reference to retrieve"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_NOT_FOUND"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuModuleGetFunction,"] - #[doc = " ::cuModuleGetGlobal,"] - #[doc = " ::cuModuleGetTexRef,"] - #[doc = " ::cuModuleLoad,"] - #[doc = " ::cuModuleLoadData,"] - #[doc = " ::cuModuleLoadDataEx,"] - #[doc = " ::cuModuleLoadFatBinary,"] - #[doc = " ::cuModuleUnload,"] - #[doc = " ::cudaGetSurfaceReference"] pub fn cuModuleGetSurfRef( pSurfRef: *mut CUsurfref, hmod: CUmodule, @@ -3835,27 +1464,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Complete a pending linker invocation"] - #[doc = ""] - #[doc = " Completes the pending linker action and returns the cubin image for the linked"] - #[doc = " device code, which can be used with ::cuModuleLoadData. The cubin is owned by"] - #[doc = " \\p state, so it should be loaded before \\p state is destroyed via ::cuLinkDestroy."] - #[doc = " This call does not destroy \\p state."] - #[doc = ""] - #[doc = " \\param state A pending linker invocation"] - #[doc = " \\param cubinOut On success, this will point to the output image"] - #[doc = " \\param sizeOut Optional parameter to receive the size of the generated image"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY"] - #[doc = ""] - #[doc = " \\sa ::cuLinkCreate,"] - #[doc = " ::cuLinkAddData,"] - #[doc = " ::cuLinkAddFile,"] - #[doc = " ::cuLinkDestroy,"] - #[doc = " ::cuModuleLoadData"] pub fn cuLinkComplete( state: CUlinkState, cubinOut: *mut *mut ::std::os::raw::c_void, @@ -3863,15 +1471,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Destroys state for a JIT linker invocation."] - #[doc = ""] - #[doc = " \\param state State object for the linker invocation"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE"] - #[doc = ""] - #[doc = " \\sa ::cuLinkCreate"] pub fn cuLinkDestroy(state: CUlinkState) -> CUresult; } extern "C" { @@ -3906,113 +1505,9 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Frees page-locked host memory"] - #[doc = ""] - #[doc = " Frees the memory space pointed to by \\p p, which must have been returned by"] - #[doc = " a previous call to ::cuMemAllocHost()."] - #[doc = ""] - #[doc = " \\param p - Pointer to memory to free"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,"] - #[doc = " ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,"] - #[doc = " ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,"] - #[doc = " ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,"] - #[doc = " ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,"] - #[doc = " ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,"] - #[doc = " ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree,"] - #[doc = " ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,"] - #[doc = " ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,"] - #[doc = " ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,"] - #[doc = " ::cudaFreeHost"] pub fn cuMemFreeHost(p: *mut ::std::os::raw::c_void) -> CUresult; } extern "C" { - #[doc = " \\brief Allocates page-locked host memory"] - #[doc = ""] - #[doc = " Allocates \\p bytesize bytes of host memory that is page-locked and accessible"] - #[doc = " to the device. The driver tracks the virtual memory ranges allocated with"] - #[doc = " this function and automatically accelerates calls to functions such as"] - #[doc = " ::cuMemcpyHtoD(). Since the memory can be accessed directly by the device,"] - #[doc = " it can be read or written with much higher bandwidth than pageable memory"] - #[doc = " obtained with functions such as ::malloc(). Allocating excessive amounts of"] - #[doc = " pinned memory may degrade system performance, since it reduces the amount"] - #[doc = " of memory available to the system for paging. As a result, this function is"] - #[doc = " best used sparingly to allocate staging areas for data exchange between"] - #[doc = " host and device."] - #[doc = ""] - #[doc = " The \\p Flags parameter enables different options to be specified that"] - #[doc = " affect the allocation, as follows."] - #[doc = ""] - #[doc = " - ::CU_MEMHOSTALLOC_PORTABLE: The memory returned by this call will be"] - #[doc = " considered as pinned memory by all CUDA contexts, not just the one that"] - #[doc = " performed the allocation."] - #[doc = ""] - #[doc = " - ::CU_MEMHOSTALLOC_DEVICEMAP: Maps the allocation into the CUDA address"] - #[doc = " space. The device pointer to the memory may be obtained by calling"] - #[doc = " ::cuMemHostGetDevicePointer()."] - #[doc = ""] - #[doc = " - ::CU_MEMHOSTALLOC_WRITECOMBINED: Allocates the memory as write-combined"] - #[doc = " (WC). WC memory can be transferred across the PCI Express bus more"] - #[doc = " quickly on some system configurations, but cannot be read efficiently by"] - #[doc = " most CPUs. WC memory is a good option for buffers that will be written by"] - #[doc = " the CPU and read by the GPU via mapped pinned memory or host->device"] - #[doc = " transfers."] - #[doc = ""] - #[doc = " All of these flags are orthogonal to one another: a developer may allocate"] - #[doc = " memory that is portable, mapped and/or write-combined with no restrictions."] - #[doc = ""] - #[doc = " The CUDA context must have been created with the ::CU_CTX_MAP_HOST flag in"] - #[doc = " order for the ::CU_MEMHOSTALLOC_DEVICEMAP flag to have any effect."] - #[doc = ""] - #[doc = " The ::CU_MEMHOSTALLOC_DEVICEMAP flag may be specified on CUDA contexts for"] - #[doc = " devices that do not support mapped pinned memory. The failure is deferred"] - #[doc = " to ::cuMemHostGetDevicePointer() because the memory may be mapped into"] - #[doc = " other CUDA contexts via the ::CU_MEMHOSTALLOC_PORTABLE flag."] - #[doc = ""] - #[doc = " The memory allocated by this function must be freed with ::cuMemFreeHost()."] - #[doc = ""] - #[doc = " Note all host memory allocated using ::cuMemHostAlloc() will automatically"] - #[doc = " be immediately accessible to all contexts on all devices which support unified"] - #[doc = " addressing (as may be queried using ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING)."] - #[doc = " Unless the flag ::CU_MEMHOSTALLOC_WRITECOMBINED is specified, the device pointer"] - #[doc = " that may be used to access this host memory from those contexts is always equal"] - #[doc = " to the returned host pointer \\p *pp. If the flag ::CU_MEMHOSTALLOC_WRITECOMBINED"] - #[doc = " is specified, then the function ::cuMemHostGetDevicePointer() must be used"] - #[doc = " to query the device pointer, even if the context supports unified addressing."] - #[doc = " See \\ref CUDA_UNIFIED for additional details."] - #[doc = ""] - #[doc = " \\param pp - Returned host pointer to page-locked memory"] - #[doc = " \\param bytesize - Requested allocation size in bytes"] - #[doc = " \\param Flags - Flags for allocation request"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,"] - #[doc = " ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,"] - #[doc = " ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,"] - #[doc = " ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,"] - #[doc = " ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,"] - #[doc = " ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,"] - #[doc = " ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,"] - #[doc = " ::cuMemGetAddressRange, ::cuMemGetInfo,"] - #[doc = " ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,"] - #[doc = " ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,"] - #[doc = " ::cudaHostAlloc"] pub fn cuMemHostAlloc( pp: *mut *mut ::std::os::raw::c_void, bytesize: usize, @@ -4027,141 +1522,12 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Passes back flags that were used for a pinned allocation"] - #[doc = ""] - #[doc = " Passes back the flags \\p pFlags that were specified when allocating"] - #[doc = " the pinned host buffer \\p p allocated by ::cuMemHostAlloc."] - #[doc = ""] - #[doc = " ::cuMemHostGetFlags() will fail if the pointer does not reside in"] - #[doc = " an allocation performed by ::cuMemAllocHost() or ::cuMemHostAlloc()."] - #[doc = ""] - #[doc = " \\param pFlags - Returned flags word"] - #[doc = " \\param p - Host pointer"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuMemAllocHost,"] - #[doc = " ::cuMemHostAlloc,"] - #[doc = " ::cudaHostGetFlags"] pub fn cuMemHostGetFlags( pFlags: *mut ::std::os::raw::c_uint, p: *mut ::std::os::raw::c_void, ) -> CUresult; } extern "C" { - #[doc = " \\brief Allocates memory that will be automatically managed by the Unified Memory system"] - #[doc = ""] - #[doc = " Allocates \\p bytesize bytes of managed memory on the device and returns in"] - #[doc = " \\p *dptr a pointer to the allocated memory. If the device doesn't support"] - #[doc = " allocating managed memory, ::CUDA_ERROR_NOT_SUPPORTED is returned. Support"] - #[doc = " for managed memory can be queried using the device attribute"] - #[doc = " ::CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY. The allocated memory is suitably"] - #[doc = " aligned for any kind of variable. The memory is not cleared. If \\p bytesize"] - #[doc = " is 0, ::cuMemAllocManaged returns ::CUDA_ERROR_INVALID_VALUE. The pointer"] - #[doc = " is valid on the CPU and on all GPUs in the system that support managed memory."] - #[doc = " All accesses to this pointer must obey the Unified Memory programming model."] - #[doc = ""] - #[doc = " \\p flags specifies the default stream association for this allocation."] - #[doc = " \\p flags must be one of ::CU_MEM_ATTACH_GLOBAL or ::CU_MEM_ATTACH_HOST. If"] - #[doc = " ::CU_MEM_ATTACH_GLOBAL is specified, then this memory is accessible from"] - #[doc = " any stream on any device. If ::CU_MEM_ATTACH_HOST is specified, then the"] - #[doc = " allocation should not be accessed from devices that have a zero value for the"] - #[doc = " device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS; an explicit call to"] - #[doc = " ::cuStreamAttachMemAsync will be required to enable access on such devices."] - #[doc = ""] - #[doc = " If the association is later changed via ::cuStreamAttachMemAsync to"] - #[doc = " a single stream, the default association as specifed during ::cuMemAllocManaged"] - #[doc = " is restored when that stream is destroyed. For __managed__ variables, the"] - #[doc = " default association is always ::CU_MEM_ATTACH_GLOBAL. Note that destroying a"] - #[doc = " stream is an asynchronous operation, and as a result, the change to default"] - #[doc = " association won't happen until all work in the stream has completed."] - #[doc = ""] - #[doc = " Memory allocated with ::cuMemAllocManaged should be released with ::cuMemFree."] - #[doc = ""] - #[doc = " Device memory oversubscription is possible for GPUs that have a non-zero value for the"] - #[doc = " device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Managed memory on"] - #[doc = " such GPUs may be evicted from device memory to host memory at any time by the Unified"] - #[doc = " Memory driver in order to make room for other allocations."] - #[doc = ""] - #[doc = " In a multi-GPU system where all GPUs have a non-zero value for the device attribute"] - #[doc = " ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS, managed memory may not be populated when this"] - #[doc = " API returns and instead may be populated on access. In such systems, managed memory can"] - #[doc = " migrate to any processor's memory at any time. The Unified Memory driver will employ heuristics to"] - #[doc = " maintain data locality and prevent excessive page faults to the extent possible. The application"] - #[doc = " can also guide the driver about memory usage patterns via ::cuMemAdvise. The application"] - #[doc = " can also explicitly migrate memory to a desired processor's memory via"] - #[doc = " ::cuMemPrefetchAsync."] - #[doc = ""] - #[doc = " In a multi-GPU system where all of the GPUs have a zero value for the device attribute"] - #[doc = " ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS and all the GPUs have peer-to-peer support"] - #[doc = " with each other, the physical storage for managed memory is created on the GPU which is active"] - #[doc = " at the time ::cuMemAllocManaged is called. All other GPUs will reference the data at reduced"] - #[doc = " bandwidth via peer mappings over the PCIe bus. The Unified Memory driver does not migrate"] - #[doc = " memory among such GPUs."] - #[doc = ""] - #[doc = " In a multi-GPU system where not all GPUs have peer-to-peer support with each other and"] - #[doc = " where the value of the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS"] - #[doc = " is zero for at least one of those GPUs, the location chosen for physical storage of managed"] - #[doc = " memory is system-dependent."] - #[doc = " - On Linux, the location chosen will be device memory as long as the current set of active"] - #[doc = " contexts are on devices that either have peer-to-peer support with each other or have a"] - #[doc = " non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS."] - #[doc = " If there is an active context on a GPU that does not have a non-zero value for that device"] - #[doc = " attribute and it does not have peer-to-peer support with the other devices that have active"] - #[doc = " contexts on them, then the location for physical storage will be 'zero-copy' or host memory."] - #[doc = " Note that this means that managed memory that is located in device memory is migrated to"] - #[doc = " host memory if a new context is created on a GPU that doesn't have a non-zero value for"] - #[doc = " the device attribute and does not support peer-to-peer with at least one of the other devices"] - #[doc = " that has an active context. This in turn implies that context creation may fail if there is"] - #[doc = " insufficient host memory to migrate all managed allocations."] - #[doc = " - On Windows, the physical storage is always created in 'zero-copy' or host memory."] - #[doc = " All GPUs will reference the data at reduced bandwidth over the PCIe bus. In these"] - #[doc = " circumstances, use of the environment variable CUDA_VISIBLE_DEVICES is recommended to"] - #[doc = " restrict CUDA to only use those GPUs that have peer-to-peer support."] - #[doc = " Alternatively, users can also set CUDA_MANAGED_FORCE_DEVICE_ALLOC to a"] - #[doc = " non-zero value to force the driver to always use device memory for physical storage."] - #[doc = " When this environment variable is set to a non-zero value, all contexts created in"] - #[doc = " that process on devices that support managed memory have to be peer-to-peer compatible"] - #[doc = " with each other. Context creation will fail if a context is created on a device that"] - #[doc = " supports managed memory and is not peer-to-peer compatible with any of the other"] - #[doc = " managed memory supporting devices on which contexts were previously created, even if"] - #[doc = " those contexts have been destroyed. These environment variables are described"] - #[doc = " in the CUDA programming guide under the \"CUDA environment variables\" section."] - #[doc = " - On ARM, managed memory is not available on discrete gpu with Drive PX-2."] - #[doc = ""] - #[doc = " \\param dptr - Returned device pointer"] - #[doc = " \\param bytesize - Requested allocation size in bytes"] - #[doc = " \\param flags - Must be one of ::CU_MEM_ATTACH_GLOBAL or ::CU_MEM_ATTACH_HOST"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_NOT_SUPPORTED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,"] - #[doc = " ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAllocHost,"] - #[doc = " ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,"] - #[doc = " ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,"] - #[doc = " ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,"] - #[doc = " ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,"] - #[doc = " ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,"] - #[doc = " ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,"] - #[doc = " ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,"] - #[doc = " ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,"] - #[doc = " ::cuDeviceGetAttribute, ::cuStreamAttachMemAsync,"] - #[doc = " ::cudaMallocManaged"] pub fn cuMemAllocManaged( dptr: *mut CUdeviceptr, bytesize: usize, @@ -4169,65 +1535,12 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns a handle to a compute device"] - #[doc = ""] - #[doc = " Returns in \\p *device a device handle given a PCI bus ID string."] - #[doc = ""] - #[doc = " \\param dev - Returned device handle"] - #[doc = ""] - #[doc = " \\param pciBusId - String in one of the following forms:"] - #[doc = " [domain]:[bus]:[device].[function]"] - #[doc = " [domain]:[bus]:[device]"] - #[doc = " [bus]:[device].[function]"] - #[doc = " where \\p domain, \\p bus, \\p device, and \\p function are all hexadecimal values"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuDeviceGet,"] - #[doc = " ::cuDeviceGetAttribute,"] - #[doc = " ::cuDeviceGetPCIBusId,"] - #[doc = " ::cudaDeviceGetByPCIBusId"] pub fn cuDeviceGetByPCIBusId( dev: *mut CUdevice, pciBusId: *const ::std::os::raw::c_char, ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns a PCI Bus Id string for the device"] - #[doc = ""] - #[doc = " Returns an ASCII string identifying the device \\p dev in the NULL-terminated"] - #[doc = " string pointed to by \\p pciBusId. \\p len specifies the maximum length of the"] - #[doc = " string that may be returned."] - #[doc = ""] - #[doc = " \\param pciBusId - Returned identifier string for the device in the following format"] - #[doc = " [domain]:[bus]:[device].[function]"] - #[doc = " where \\p domain, \\p bus, \\p device, and \\p function are all hexadecimal values."] - #[doc = " pciBusId should be large enough to store 13 characters including the NULL-terminator."] - #[doc = ""] - #[doc = " \\param len - Maximum length of string to store in \\p name"] - #[doc = ""] - #[doc = " \\param dev - Device to get identifier string for"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuDeviceGet,"] - #[doc = " ::cuDeviceGetAttribute,"] - #[doc = " ::cuDeviceGetByPCIBusId,"] - #[doc = " ::cudaDeviceGetPCIBusId"] pub fn cuDeviceGetPCIBusId( pciBusId: *mut ::std::os::raw::c_char, len: ::std::os::raw::c_int, @@ -4235,190 +1548,24 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Gets an interprocess handle for a previously allocated event"] - #[doc = ""] - #[doc = " Takes as input a previously allocated event. This event must have been"] - #[doc = " created with the ::CU_EVENT_INTERPROCESS and ::CU_EVENT_DISABLE_TIMING"] - #[doc = " flags set. This opaque handle may be copied into other processes and"] - #[doc = " opened with ::cuIpcOpenEventHandle to allow efficient hardware"] - #[doc = " synchronization between GPU work in different processes."] - #[doc = ""] - #[doc = " After the event has been opened in the importing process,"] - #[doc = " ::cuEventRecord, ::cuEventSynchronize, ::cuStreamWaitEvent and"] - #[doc = " ::cuEventQuery may be used in either process. Performing operations"] - #[doc = " on the imported event after the exported event has been freed"] - #[doc = " with ::cuEventDestroy will result in undefined behavior."] - #[doc = ""] - #[doc = " IPC functionality is restricted to devices with support for unified"] - #[doc = " addressing on Linux and Windows operating systems."] - #[doc = " IPC functionality on Windows is restricted to GPUs in TCC mode"] - #[doc = ""] - #[doc = " \\param pHandle - Pointer to a user allocated CUipcEventHandle"] - #[doc = " in which to return the opaque event handle"] - #[doc = " \\param event - Event allocated with ::CU_EVENT_INTERPROCESS and"] - #[doc = " ::CU_EVENT_DISABLE_TIMING flags."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY,"] - #[doc = " ::CUDA_ERROR_MAP_FAILED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuEventCreate,"] - #[doc = " ::cuEventDestroy,"] - #[doc = " ::cuEventSynchronize,"] - #[doc = " ::cuEventQuery,"] - #[doc = " ::cuStreamWaitEvent,"] - #[doc = " ::cuIpcOpenEventHandle,"] - #[doc = " ::cuIpcGetMemHandle,"] - #[doc = " ::cuIpcOpenMemHandle,"] - #[doc = " ::cuIpcCloseMemHandle,"] - #[doc = " ::cudaIpcGetEventHandle"] pub fn cuIpcGetEventHandle( pHandle: *mut CUipcEventHandle, event: CUevent, ) -> CUresult; } extern "C" { - #[doc = " \\brief Opens an interprocess event handle for use in the current process"] - #[doc = ""] - #[doc = " Opens an interprocess event handle exported from another process with"] - #[doc = " ::cuIpcGetEventHandle. This function returns a ::CUevent that behaves like"] - #[doc = " a locally created event with the ::CU_EVENT_DISABLE_TIMING flag specified."] - #[doc = " This event must be freed with ::cuEventDestroy."] - #[doc = ""] - #[doc = " Performing operations on the imported event after the exported event has"] - #[doc = " been freed with ::cuEventDestroy will result in undefined behavior."] - #[doc = ""] - #[doc = " IPC functionality is restricted to devices with support for unified"] - #[doc = " addressing on Linux and Windows operating systems."] - #[doc = " IPC functionality on Windows is restricted to GPUs in TCC mode"] - #[doc = ""] - #[doc = " \\param phEvent - Returns the imported event"] - #[doc = " \\param handle - Interprocess handle to open"] - #[doc = ""] - #[doc = " \\returns"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_MAP_FAILED,"] - #[doc = " ::CUDA_ERROR_PEER_ACCESS_UNSUPPORTED,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuEventCreate,"] - #[doc = " ::cuEventDestroy,"] - #[doc = " ::cuEventSynchronize,"] - #[doc = " ::cuEventQuery,"] - #[doc = " ::cuStreamWaitEvent,"] - #[doc = " ::cuIpcGetEventHandle,"] - #[doc = " ::cuIpcGetMemHandle,"] - #[doc = " ::cuIpcOpenMemHandle,"] - #[doc = " ::cuIpcCloseMemHandle,"] - #[doc = " ::cudaIpcOpenEventHandle"] pub fn cuIpcOpenEventHandle( phEvent: *mut CUevent, handle: CUipcEventHandle, ) -> CUresult; } extern "C" { - #[doc = " \\brief Gets an interprocess memory handle for an existing device memory"] - #[doc = " allocation"] - #[doc = ""] - #[doc = " Takes a pointer to the base of an existing device memory allocation created"] - #[doc = " with ::cuMemAlloc and exports it for use in another process. This is a"] - #[doc = " lightweight operation and may be called multiple times on an allocation"] - #[doc = " without adverse effects."] - #[doc = ""] - #[doc = " If a region of memory is freed with ::cuMemFree and a subsequent call"] - #[doc = " to ::cuMemAlloc returns memory with the same device address,"] - #[doc = " ::cuIpcGetMemHandle will return a unique handle for the"] - #[doc = " new memory."] - #[doc = ""] - #[doc = " IPC functionality is restricted to devices with support for unified"] - #[doc = " addressing on Linux and Windows operating systems."] - #[doc = " IPC functionality on Windows is restricted to GPUs in TCC mode"] - #[doc = ""] - #[doc = " \\param pHandle - Pointer to user allocated ::CUipcMemHandle to return"] - #[doc = " the handle in."] - #[doc = " \\param dptr - Base pointer to previously allocated device memory"] - #[doc = ""] - #[doc = " \\returns"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY,"] - #[doc = " ::CUDA_ERROR_MAP_FAILED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuMemAlloc,"] - #[doc = " ::cuMemFree,"] - #[doc = " ::cuIpcGetEventHandle,"] - #[doc = " ::cuIpcOpenEventHandle,"] - #[doc = " ::cuIpcOpenMemHandle,"] - #[doc = " ::cuIpcCloseMemHandle,"] - #[doc = " ::cudaIpcGetMemHandle"] pub fn cuIpcGetMemHandle( pHandle: *mut CUipcMemHandle, dptr: CUdeviceptr, ) -> CUresult; } extern "C" { - #[doc = " \\brief Opens an interprocess memory handle exported from another process"] - #[doc = " and returns a device pointer usable in the local process."] - #[doc = ""] - #[doc = " Maps memory exported from another process with ::cuIpcGetMemHandle into"] - #[doc = " the current device address space. For contexts on different devices"] - #[doc = " ::cuIpcOpenMemHandle can attempt to enable peer access between the"] - #[doc = " devices as if the user called ::cuCtxEnablePeerAccess. This behavior is"] - #[doc = " controlled by the ::CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS flag."] - #[doc = " ::cuDeviceCanAccessPeer can determine if a mapping is possible."] - #[doc = ""] - #[doc = " ::cuIpcOpenMemHandle can open handles to devices that may not be visible"] - #[doc = " in the process calling the API."] - #[doc = ""] - #[doc = " Contexts that may open ::CUipcMemHandles are restricted in the following way."] - #[doc = " ::CUipcMemHandles from each ::CUdevice in a given process may only be opened"] - #[doc = " by one ::CUcontext per ::CUdevice per other process."] - #[doc = ""] - #[doc = " Memory returned from ::cuIpcOpenMemHandle must be freed with"] - #[doc = " ::cuIpcCloseMemHandle."] - #[doc = ""] - #[doc = " Calling ::cuMemFree on an exported memory region before calling"] - #[doc = " ::cuIpcCloseMemHandle in the importing context will result in undefined"] - #[doc = " behavior."] - #[doc = ""] - #[doc = " IPC functionality is restricted to devices with support for unified"] - #[doc = " addressing on Linux and Windows operating systems."] - #[doc = " IPC functionality on Windows is restricted to GPUs in TCC mode"] - #[doc = ""] - #[doc = " \\param pdptr - Returned device pointer"] - #[doc = " \\param handle - ::CUipcMemHandle to open"] - #[doc = " \\param Flags - Flags for this operation. Must be specified as ::CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS"] - #[doc = ""] - #[doc = " \\returns"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_MAP_FAILED,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_TOO_MANY_PEERS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\note No guarantees are made about the address returned in \\p *pdptr."] - #[doc = " In particular, multiple processes may not receive the same address for the same \\p handle."] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuMemAlloc,"] - #[doc = " ::cuMemFree,"] - #[doc = " ::cuIpcGetEventHandle,"] - #[doc = " ::cuIpcOpenEventHandle,"] - #[doc = " ::cuIpcGetMemHandle,"] - #[doc = " ::cuIpcCloseMemHandle,"] - #[doc = " ::cuCtxEnablePeerAccess,"] - #[doc = " ::cuDeviceCanAccessPeer,"] - #[doc = " ::cudaIpcOpenMemHandle"] pub fn cuIpcOpenMemHandle( pdptr: *mut CUdeviceptr, handle: CUipcMemHandle, @@ -4426,35 +1573,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Close memory mapped with ::cuIpcOpenMemHandle"] - #[doc = ""] - #[doc = " Unmaps memory returnd by ::cuIpcOpenMemHandle. The original allocation"] - #[doc = " in the exporting process as well as imported mappings in other processes"] - #[doc = " will be unaffected."] - #[doc = ""] - #[doc = " Any resources used to enable peer access will be freed if this is the"] - #[doc = " last mapping using them."] - #[doc = ""] - #[doc = " IPC functionality is restricted to devices with support for unified"] - #[doc = " addressing on Linux and Windows operating systems."] - #[doc = " IPC functionality on Windows is restricted to GPUs in TCC mode"] - #[doc = ""] - #[doc = " \\param dptr - Device pointer returned by ::cuIpcOpenMemHandle"] - #[doc = ""] - #[doc = " \\returns"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_MAP_FAILED,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\sa"] - #[doc = " ::cuMemAlloc,"] - #[doc = " ::cuMemFree,"] - #[doc = " ::cuIpcGetEventHandle,"] - #[doc = " ::cuIpcOpenEventHandle,"] - #[doc = " ::cuIpcGetMemHandle,"] - #[doc = " ::cuIpcOpenMemHandle,"] - #[doc = " ::cudaIpcCloseMemHandle"] pub fn cuIpcCloseMemHandle(dptr: CUdeviceptr) -> CUresult; } extern "C" { @@ -4465,66 +1583,9 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Unregisters a memory range that was registered with cuMemHostRegister."] - #[doc = ""] - #[doc = " Unmaps the memory range whose base address is specified by \\p p, and makes"] - #[doc = " it pageable again."] - #[doc = ""] - #[doc = " The base address must be the same one specified to ::cuMemHostRegister()."] - #[doc = ""] - #[doc = " \\param p - Host pointer to memory to unregister"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY,"] - #[doc = " ::CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED,"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuMemHostRegister,"] - #[doc = " ::cudaHostUnregister"] pub fn cuMemHostUnregister(p: *mut ::std::os::raw::c_void) -> CUresult; } extern "C" { - #[doc = " \\brief Copies memory"] - #[doc = ""] - #[doc = " Copies data between two pointers."] - #[doc = " \\p dst and \\p src are base pointers of the destination and source, respectively."] - #[doc = " \\p ByteCount specifies the number of bytes to copy."] - #[doc = " Note that this function infers the type of the transfer (host to host, host to"] - #[doc = " device, device to device, or device to host) from the pointer values. This"] - #[doc = " function is only allowed in contexts which support unified addressing."] - #[doc = ""] - #[doc = " \\param dst - Destination unified virtual address space pointer"] - #[doc = " \\param src - Source unified virtual address space pointer"] - #[doc = " \\param ByteCount - Size of memory copy in bytes"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = " \\note_sync"] - #[doc = ""] - #[doc = " \\sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,"] - #[doc = " ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,"] - #[doc = " ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,"] - #[doc = " ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,"] - #[doc = " ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA,"] - #[doc = " ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,"] - #[doc = " ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,"] - #[doc = " ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,"] - #[doc = " ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,"] - #[doc = " ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,"] - #[doc = " ::cudaMemcpy,"] - #[doc = " ::cudaMemcpyToSymbol,"] - #[doc = " ::cudaMemcpyFromSymbol"] pub fn cuMemcpy( dst: CUdeviceptr, src: CUdeviceptr, @@ -4532,32 +1593,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Copies device memory between two contexts"] - #[doc = ""] - #[doc = " Copies from device memory in one context to device memory in another"] - #[doc = " context. \\p dstDevice is the base device pointer of the destination memory"] - #[doc = " and \\p dstContext is the destination context. \\p srcDevice is the base"] - #[doc = " device pointer of the source memory and \\p srcContext is the source pointer."] - #[doc = " \\p ByteCount specifies the number of bytes to copy."] - #[doc = ""] - #[doc = " \\param dstDevice - Destination device pointer"] - #[doc = " \\param dstContext - Destination context"] - #[doc = " \\param srcDevice - Source device pointer"] - #[doc = " \\param srcContext - Source context"] - #[doc = " \\param ByteCount - Size of memory copy in bytes"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = " \\note_sync"] - #[doc = ""] - #[doc = " \\sa ::cuMemcpyDtoD, ::cuMemcpy3DPeer, ::cuMemcpyDtoDAsync, ::cuMemcpyPeerAsync,"] - #[doc = " ::cuMemcpy3DPeerAsync,"] - #[doc = " ::cudaMemcpyPeer"] pub fn cuMemcpyPeer( dstDevice: CUdeviceptr, dstContext: CUcontext, @@ -4638,69 +1673,9 @@ extern "C" { pub fn cuMemcpy3D_v2(pCopy: *const CUDA_MEMCPY3D) -> CUresult; } extern "C" { - #[doc = " \\brief Copies memory between contexts"] - #[doc = ""] - #[doc = " Perform a 3D memory copy according to the parameters specified in"] - #[doc = " \\p pCopy. See the definition of the ::CUDA_MEMCPY3D_PEER structure"] - #[doc = " for documentation of its parameters."] - #[doc = ""] - #[doc = " \\param pCopy - Parameters for the memory copy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = " \\note_sync"] - #[doc = ""] - #[doc = " \\sa ::cuMemcpyDtoD, ::cuMemcpyPeer, ::cuMemcpyDtoDAsync, ::cuMemcpyPeerAsync,"] - #[doc = " ::cuMemcpy3DPeerAsync,"] - #[doc = " ::cudaMemcpy3DPeer"] pub fn cuMemcpy3DPeer(pCopy: *const CUDA_MEMCPY3D_PEER) -> CUresult; } extern "C" { - #[doc = " \\brief Copies memory asynchronously"] - #[doc = ""] - #[doc = " Copies data between two pointers."] - #[doc = " \\p dst and \\p src are base pointers of the destination and source, respectively."] - #[doc = " \\p ByteCount specifies the number of bytes to copy."] - #[doc = " Note that this function infers the type of the transfer (host to host, host to"] - #[doc = " device, device to device, or device to host) from the pointer values. This"] - #[doc = " function is only allowed in contexts which support unified addressing."] - #[doc = ""] - #[doc = " \\param dst - Destination unified virtual address space pointer"] - #[doc = " \\param src - Source unified virtual address space pointer"] - #[doc = " \\param ByteCount - Size of memory copy in bytes"] - #[doc = " \\param hStream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE"] - #[doc = " \\notefnerr"] - #[doc = " \\note_async"] - #[doc = " \\note_null_stream"] - #[doc = ""] - #[doc = " \\sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,"] - #[doc = " ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,"] - #[doc = " ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,"] - #[doc = " ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,"] - #[doc = " ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD,"] - #[doc = " ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,"] - #[doc = " ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,"] - #[doc = " ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,"] - #[doc = " ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,"] - #[doc = " ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,"] - #[doc = " ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async,"] - #[doc = " ::cuMemsetD32, ::cuMemsetD32Async,"] - #[doc = " ::cudaMemcpyAsync,"] - #[doc = " ::cudaMemcpyToSymbolAsync,"] - #[doc = " ::cudaMemcpyFromSymbolAsync"] pub fn cuMemcpyAsync( dst: CUdeviceptr, src: CUdeviceptr, @@ -4709,35 +1684,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Copies device memory between two contexts asynchronously."] - #[doc = ""] - #[doc = " Copies from device memory in one context to device memory in another"] - #[doc = " context. \\p dstDevice is the base device pointer of the destination memory"] - #[doc = " and \\p dstContext is the destination context. \\p srcDevice is the base"] - #[doc = " device pointer of the source memory and \\p srcContext is the source pointer."] - #[doc = " \\p ByteCount specifies the number of bytes to copy."] - #[doc = ""] - #[doc = " \\param dstDevice - Destination device pointer"] - #[doc = " \\param dstContext - Destination context"] - #[doc = " \\param srcDevice - Source device pointer"] - #[doc = " \\param srcContext - Source context"] - #[doc = " \\param ByteCount - Size of memory copy in bytes"] - #[doc = " \\param hStream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE"] - #[doc = " \\notefnerr"] - #[doc = " \\note_async"] - #[doc = " \\note_null_stream"] - #[doc = ""] - #[doc = " \\sa ::cuMemcpyDtoD, ::cuMemcpyPeer, ::cuMemcpy3DPeer, ::cuMemcpyDtoDAsync,"] - #[doc = " ::cuMemcpy3DPeerAsync,"] - #[doc = " ::cudaMemcpyPeerAsync"] pub fn cuMemcpyPeerAsync( dstDevice: CUdeviceptr, dstContext: CUcontext, @@ -4802,28 +1748,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Copies memory between contexts asynchronously."] - #[doc = ""] - #[doc = " Perform a 3D memory copy according to the parameters specified in"] - #[doc = " \\p pCopy. See the definition of the ::CUDA_MEMCPY3D_PEER structure"] - #[doc = " for documentation of its parameters."] - #[doc = ""] - #[doc = " \\param pCopy - Parameters for the memory copy"] - #[doc = " \\param hStream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = " \\note_async"] - #[doc = " \\note_null_stream"] - #[doc = ""] - #[doc = " \\sa ::cuMemcpyDtoD, ::cuMemcpyPeer, ::cuMemcpyDtoDAsync, ::cuMemcpyPeerAsync,"] - #[doc = " ::cuMemcpy3DPeerAsync,"] - #[doc = " ::cudaMemcpy3DPeerAsync"] pub fn cuMemcpy3DPeerAsync( pCopy: *const CUDA_MEMCPY3D_PEER, hStream: CUstream, @@ -4878,39 +1802,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets device memory"] - #[doc = ""] - #[doc = " Sets the memory range of \\p N 8-bit values to the specified value"] - #[doc = " \\p uc."] - #[doc = ""] - #[doc = " \\param dstDevice - Destination device pointer"] - #[doc = " \\param uc - Value to set"] - #[doc = " \\param N - Number of elements"] - #[doc = " \\param hStream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = " \\note_memset"] - #[doc = " \\note_null_stream"] - #[doc = ""] - #[doc = " \\sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,"] - #[doc = " ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,"] - #[doc = " ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,"] - #[doc = " ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,"] - #[doc = " ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,"] - #[doc = " ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,"] - #[doc = " ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,"] - #[doc = " ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,"] - #[doc = " ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,"] - #[doc = " ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,"] - #[doc = " ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD16Async,"] - #[doc = " ::cuMemsetD32, ::cuMemsetD32Async,"] - #[doc = " ::cudaMemsetAsync"] pub fn cuMemsetD8Async( dstDevice: CUdeviceptr, uc: ::std::os::raw::c_uchar, @@ -4919,39 +1810,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets device memory"] - #[doc = ""] - #[doc = " Sets the memory range of \\p N 16-bit values to the specified value"] - #[doc = " \\p us. The \\p dstDevice pointer must be two byte aligned."] - #[doc = ""] - #[doc = " \\param dstDevice - Destination device pointer"] - #[doc = " \\param us - Value to set"] - #[doc = " \\param N - Number of elements"] - #[doc = " \\param hStream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = " \\note_memset"] - #[doc = " \\note_null_stream"] - #[doc = ""] - #[doc = " \\sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,"] - #[doc = " ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,"] - #[doc = " ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,"] - #[doc = " ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,"] - #[doc = " ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,"] - #[doc = " ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,"] - #[doc = " ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,"] - #[doc = " ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,"] - #[doc = " ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,"] - #[doc = " ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,"] - #[doc = " ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16,"] - #[doc = " ::cuMemsetD32, ::cuMemsetD32Async,"] - #[doc = " ::cudaMemsetAsync"] pub fn cuMemsetD16Async( dstDevice: CUdeviceptr, us: ::std::os::raw::c_ushort, @@ -4960,38 +1818,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets device memory"] - #[doc = ""] - #[doc = " Sets the memory range of \\p N 32-bit values to the specified value"] - #[doc = " \\p ui. The \\p dstDevice pointer must be four byte aligned."] - #[doc = ""] - #[doc = " \\param dstDevice - Destination device pointer"] - #[doc = " \\param ui - Value to set"] - #[doc = " \\param N - Number of elements"] - #[doc = " \\param hStream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = " \\note_memset"] - #[doc = " \\note_null_stream"] - #[doc = ""] - #[doc = " \\sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,"] - #[doc = " ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,"] - #[doc = " ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,"] - #[doc = " ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,"] - #[doc = " ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,"] - #[doc = " ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,"] - #[doc = " ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,"] - #[doc = " ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,"] - #[doc = " ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,"] - #[doc = " ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,"] - #[doc = " ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, ::cuMemsetD32,"] - #[doc = " ::cudaMemsetAsync"] pub fn cuMemsetD32Async( dstDevice: CUdeviceptr, ui: ::std::os::raw::c_uint, @@ -5000,44 +1826,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets device memory"] - #[doc = ""] - #[doc = " Sets the 2D memory range of \\p Width 8-bit values to the specified value"] - #[doc = " \\p uc. \\p Height specifies the number of rows to set, and \\p dstPitch"] - #[doc = " specifies the number of bytes between each row. This function performs"] - #[doc = " fastest when the pitch is one that has been passed back by"] - #[doc = " ::cuMemAllocPitch()."] - #[doc = ""] - #[doc = " \\param dstDevice - Destination device pointer"] - #[doc = " \\param dstPitch - Pitch of destination device pointer"] - #[doc = " \\param uc - Value to set"] - #[doc = " \\param Width - Width of row"] - #[doc = " \\param Height - Number of rows"] - #[doc = " \\param hStream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = " \\note_memset"] - #[doc = " \\note_null_stream"] - #[doc = ""] - #[doc = " \\sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,"] - #[doc = " ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,"] - #[doc = " ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,"] - #[doc = " ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,"] - #[doc = " ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,"] - #[doc = " ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,"] - #[doc = " ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,"] - #[doc = " ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,"] - #[doc = " ::cuMemHostGetDevicePointer, ::cuMemsetD2D8,"] - #[doc = " ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async,"] - #[doc = " ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async,"] - #[doc = " ::cuMemsetD32, ::cuMemsetD32Async,"] - #[doc = " ::cudaMemset2DAsync"] pub fn cuMemsetD2D8Async( dstDevice: CUdeviceptr, dstPitch: usize, @@ -5048,45 +1836,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets device memory"] - #[doc = ""] - #[doc = " Sets the 2D memory range of \\p Width 16-bit values to the specified value"] - #[doc = " \\p us. \\p Height specifies the number of rows to set, and \\p dstPitch"] - #[doc = " specifies the number of bytes between each row. The \\p dstDevice pointer"] - #[doc = " and \\p dstPitch offset must be two byte aligned. This function performs"] - #[doc = " fastest when the pitch is one that has been passed back by"] - #[doc = " ::cuMemAllocPitch()."] - #[doc = ""] - #[doc = " \\param dstDevice - Destination device pointer"] - #[doc = " \\param dstPitch - Pitch of destination device pointer"] - #[doc = " \\param us - Value to set"] - #[doc = " \\param Width - Width of row"] - #[doc = " \\param Height - Number of rows"] - #[doc = " \\param hStream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = " \\note_memset"] - #[doc = " \\note_null_stream"] - #[doc = ""] - #[doc = " \\sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,"] - #[doc = " ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,"] - #[doc = " ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,"] - #[doc = " ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,"] - #[doc = " ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,"] - #[doc = " ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,"] - #[doc = " ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,"] - #[doc = " ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,"] - #[doc = " ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,"] - #[doc = " ::cuMemsetD2D16, ::cuMemsetD2D32, ::cuMemsetD2D32Async,"] - #[doc = " ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async,"] - #[doc = " ::cuMemsetD32, ::cuMemsetD32Async,"] - #[doc = " ::cudaMemset2DAsync"] pub fn cuMemsetD2D16Async( dstDevice: CUdeviceptr, dstPitch: usize, @@ -5097,45 +1846,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets device memory"] - #[doc = ""] - #[doc = " Sets the 2D memory range of \\p Width 32-bit values to the specified value"] - #[doc = " \\p ui. \\p Height specifies the number of rows to set, and \\p dstPitch"] - #[doc = " specifies the number of bytes between each row. The \\p dstDevice pointer"] - #[doc = " and \\p dstPitch offset must be four byte aligned. This function performs"] - #[doc = " fastest when the pitch is one that has been passed back by"] - #[doc = " ::cuMemAllocPitch()."] - #[doc = ""] - #[doc = " \\param dstDevice - Destination device pointer"] - #[doc = " \\param dstPitch - Pitch of destination device pointer"] - #[doc = " \\param ui - Value to set"] - #[doc = " \\param Width - Width of row"] - #[doc = " \\param Height - Number of rows"] - #[doc = " \\param hStream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = " \\note_memset"] - #[doc = " \\note_null_stream"] - #[doc = ""] - #[doc = " \\sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,"] - #[doc = " ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,"] - #[doc = " ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,"] - #[doc = " ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,"] - #[doc = " ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,"] - #[doc = " ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,"] - #[doc = " ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,"] - #[doc = " ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,"] - #[doc = " ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async,"] - #[doc = " ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32,"] - #[doc = " ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async,"] - #[doc = " ::cuMemsetD32, ::cuMemsetD32Async,"] - #[doc = " ::cudaMemset2DAsync"] pub fn cuMemsetD2D32Async( dstDevice: CUdeviceptr, dstPitch: usize, @@ -5158,33 +1868,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Destroys a CUDA array"] - #[doc = ""] - #[doc = " Destroys the CUDA array \\p hArray."] - #[doc = ""] - #[doc = " \\param hArray - Array to destroy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_ARRAY_IS_MAPPED,"] - #[doc = " ::CUDA_ERROR_CONTEXT_IS_DESTROYED"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate,"] - #[doc = " ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost,"] - #[doc = " ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned,"] - #[doc = " ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD,"] - #[doc = " ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync,"] - #[doc = " ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync,"] - #[doc = " ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost,"] - #[doc = " ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc,"] - #[doc = " ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16,"] - #[doc = " ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32,"] - #[doc = " ::cudaFreeArray"] pub fn cuArrayDestroy(hArray: CUarray) -> CUresult; } extern "C" { @@ -5200,144 +1883,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Creates a CUDA mipmapped array"] - #[doc = ""] - #[doc = " Creates a CUDA mipmapped array according to the ::CUDA_ARRAY3D_DESCRIPTOR structure"] - #[doc = " \\p pMipmappedArrayDesc and returns a handle to the new CUDA mipmapped array in \\p *pHandle."] - #[doc = " \\p numMipmapLevels specifies the number of mipmap levels to be allocated. This value is"] - #[doc = " clamped to the range [1, 1 + floor(log2(max(width, height, depth)))]."] - #[doc = ""] - #[doc = " The ::CUDA_ARRAY3D_DESCRIPTOR is defined as:"] - #[doc = ""] - #[doc = " \\code"] - #[doc = "typedef struct {"] - #[doc = "unsigned int Width;"] - #[doc = "unsigned int Height;"] - #[doc = "unsigned int Depth;"] - #[doc = "CUarray_format Format;"] - #[doc = "unsigned int NumChannels;"] - #[doc = "unsigned int Flags;"] - #[doc = "} CUDA_ARRAY3D_DESCRIPTOR;"] - #[doc = " \\endcode"] - #[doc = " where:"] - #[doc = ""] - #[doc = " - \\p Width, \\p Height, and \\p Depth are the width, height, and depth of the"] - #[doc = " CUDA array (in elements); the following types of CUDA arrays can be allocated:"] - #[doc = " - A 1D mipmapped array is allocated if \\p Height and \\p Depth extents are both zero."] - #[doc = " - A 2D mipmapped array is allocated if only \\p Depth extent is zero."] - #[doc = " - A 3D mipmapped array is allocated if all three extents are non-zero."] - #[doc = " - A 1D layered CUDA mipmapped array is allocated if only \\p Height is zero and the"] - #[doc = " ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 1D array. The number"] - #[doc = " of layers is determined by the depth extent."] - #[doc = " - A 2D layered CUDA mipmapped array is allocated if all three extents are non-zero and"] - #[doc = " the ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 2D array. The number"] - #[doc = " of layers is determined by the depth extent."] - #[doc = " - A cubemap CUDA mipmapped array is allocated if all three extents are non-zero and the"] - #[doc = " ::CUDA_ARRAY3D_CUBEMAP flag is set. \\p Width must be equal to \\p Height, and"] - #[doc = " \\p Depth must be six. A cubemap is a special type of 2D layered CUDA array,"] - #[doc = " where the six layers represent the six faces of a cube. The order of the six"] - #[doc = " layers in memory is the same as that listed in ::CUarray_cubemap_face."] - #[doc = " - A cubemap layered CUDA mipmapped array is allocated if all three extents are non-zero,"] - #[doc = " and both, ::CUDA_ARRAY3D_CUBEMAP and ::CUDA_ARRAY3D_LAYERED flags are set."] - #[doc = " \\p Width must be equal to \\p Height, and \\p Depth must be a multiple of six."] - #[doc = " A cubemap layered CUDA array is a special type of 2D layered CUDA array that"] - #[doc = " consists of a collection of cubemaps. The first six layers represent the first"] - #[doc = " cubemap, the next six layers form the second cubemap, and so on."] - #[doc = ""] - #[doc = " - ::Format specifies the format of the elements; ::CUarray_format is"] - #[doc = " defined as:"] - #[doc = " \\code"] - #[doc = "typedef enum CUarray_format_enum {"] - #[doc = "CU_AD_FORMAT_UNSIGNED_INT8 = 0x01,"] - #[doc = "CU_AD_FORMAT_UNSIGNED_INT16 = 0x02,"] - #[doc = "CU_AD_FORMAT_UNSIGNED_INT32 = 0x03,"] - #[doc = "CU_AD_FORMAT_SIGNED_INT8 = 0x08,"] - #[doc = "CU_AD_FORMAT_SIGNED_INT16 = 0x09,"] - #[doc = "CU_AD_FORMAT_SIGNED_INT32 = 0x0a,"] - #[doc = "CU_AD_FORMAT_HALF = 0x10,"] - #[doc = "CU_AD_FORMAT_FLOAT = 0x20"] - #[doc = "} CUarray_format;"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " - \\p NumChannels specifies the number of packed components per CUDA array"] - #[doc = " element; it may be 1, 2, or 4;"] - #[doc = ""] - #[doc = " - ::Flags may be set to"] - #[doc = " - ::CUDA_ARRAY3D_LAYERED to enable creation of layered CUDA mipmapped arrays. If this flag is set,"] - #[doc = " \\p Depth specifies the number of layers, not the depth of a 3D array."] - #[doc = " - ::CUDA_ARRAY3D_SURFACE_LDST to enable surface references to be bound to individual mipmap levels of"] - #[doc = " the CUDA mipmapped array. If this flag is not set, ::cuSurfRefSetArray will fail when attempting to"] - #[doc = " bind a mipmap level of the CUDA mipmapped array to a surface reference."] - #[doc = " - ::CUDA_ARRAY3D_CUBEMAP to enable creation of mipmapped cubemaps. If this flag is set, \\p Width must be"] - #[doc = " equal to \\p Height, and \\p Depth must be six. If the ::CUDA_ARRAY3D_LAYERED flag is also set,"] - #[doc = " then \\p Depth must be a multiple of six."] - #[doc = " - ::CUDA_ARRAY3D_TEXTURE_GATHER to indicate that the CUDA mipmapped array will be used for texture gather."] - #[doc = " Texture gather can only be performed on 2D CUDA mipmapped arrays."] - #[doc = ""] - #[doc = " \\p Width, \\p Height and \\p Depth must meet certain size requirements as listed in the following table."] - #[doc = " All values are specified in elements. Note that for brevity's sake, the full name of the device attribute"] - #[doc = " is not specified. For ex., TEXTURE1D_MIPMAPPED_WIDTH refers to the device attribute"] - #[doc = " ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH."] - #[doc = ""] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = "
CUDA array typeValid extents that must always be met
{(width range in elements), (height range),"] - #[doc = " (depth range)}
Valid extents with CUDA_ARRAY3D_SURFACE_LDST set
"] - #[doc = " {(width range in elements), (height range), (depth range)}
1D{ (1,TEXTURE1D_MIPMAPPED_WIDTH), 0, 0 }{ (1,SURFACE1D_WIDTH), 0, 0 }
2D{ (1,TEXTURE2D_MIPMAPPED_WIDTH), (1,TEXTURE2D_MIPMAPPED_HEIGHT), 0 }{ (1,SURFACE2D_WIDTH), (1,SURFACE2D_HEIGHT), 0 }
3D{ (1,TEXTURE3D_WIDTH), (1,TEXTURE3D_HEIGHT), (1,TEXTURE3D_DEPTH) }"] - #[doc = "
OR
{ (1,TEXTURE3D_WIDTH_ALTERNATE), (1,TEXTURE3D_HEIGHT_ALTERNATE),"] - #[doc = " (1,TEXTURE3D_DEPTH_ALTERNATE) }
{ (1,SURFACE3D_WIDTH), (1,SURFACE3D_HEIGHT),"] - #[doc = " (1,SURFACE3D_DEPTH) }
1D Layered{ (1,TEXTURE1D_LAYERED_WIDTH), 0,"] - #[doc = " (1,TEXTURE1D_LAYERED_LAYERS) }{ (1,SURFACE1D_LAYERED_WIDTH), 0,"] - #[doc = " (1,SURFACE1D_LAYERED_LAYERS) }
2D Layered{ (1,TEXTURE2D_LAYERED_WIDTH), (1,TEXTURE2D_LAYERED_HEIGHT),"] - #[doc = " (1,TEXTURE2D_LAYERED_LAYERS) }{ (1,SURFACE2D_LAYERED_WIDTH), (1,SURFACE2D_LAYERED_HEIGHT),"] - #[doc = " (1,SURFACE2D_LAYERED_LAYERS) }
Cubemap{ (1,TEXTURECUBEMAP_WIDTH), (1,TEXTURECUBEMAP_WIDTH), 6 }{ (1,SURFACECUBEMAP_WIDTH),"] - #[doc = " (1,SURFACECUBEMAP_WIDTH), 6 }
Cubemap Layered{ (1,TEXTURECUBEMAP_LAYERED_WIDTH), (1,TEXTURECUBEMAP_LAYERED_WIDTH),"] - #[doc = " (1,TEXTURECUBEMAP_LAYERED_LAYERS) }{ (1,SURFACECUBEMAP_LAYERED_WIDTH), (1,SURFACECUBEMAP_LAYERED_WIDTH),"] - #[doc = " (1,SURFACECUBEMAP_LAYERED_LAYERS) }
"] - #[doc = ""] - #[doc = ""] - #[doc = " \\param pHandle - Returned mipmapped array"] - #[doc = " \\param pMipmappedArrayDesc - mipmapped array descriptor"] - #[doc = " \\param numMipmapLevels - Number of mipmap levels"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY,"] - #[doc = " ::CUDA_ERROR_UNKNOWN"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuMipmappedArrayDestroy,"] - #[doc = " ::cuMipmappedArrayGetLevel,"] - #[doc = " ::cuArrayCreate,"] - #[doc = " ::cudaMallocMipmappedArray"] pub fn cuMipmappedArrayCreate( pHandle: *mut CUmipmappedArray, pMipmappedArrayDesc: *const CUDA_ARRAY3D_DESCRIPTOR, @@ -5345,32 +1890,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Gets a mipmap level of a CUDA mipmapped array"] - #[doc = ""] - #[doc = " Returns in \\p *pLevelArray a CUDA array that represents a single mipmap level"] - #[doc = " of the CUDA mipmapped array \\p hMipmappedArray."] - #[doc = ""] - #[doc = " If \\p level is greater than the maximum number of levels in this mipmapped array,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE is returned."] - #[doc = ""] - #[doc = " \\param pLevelArray - Returned mipmap level CUDA array"] - #[doc = " \\param hMipmappedArray - CUDA mipmapped array"] - #[doc = " \\param level - Mipmap level"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuMipmappedArrayCreate,"] - #[doc = " ::cuMipmappedArrayDestroy,"] - #[doc = " ::cuArrayCreate,"] - #[doc = " ::cudaGetMipmappedArrayLevel"] pub fn cuMipmappedArrayGetLevel( pLevelArray: *mut CUarray, hMipmappedArray: CUmipmappedArray, @@ -5378,182 +1897,11 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Destroys a CUDA mipmapped array"] - #[doc = ""] - #[doc = " Destroys the CUDA mipmapped array \\p hMipmappedArray."] - #[doc = ""] - #[doc = " \\param hMipmappedArray - Mipmapped array to destroy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_ARRAY_IS_MAPPED,"] - #[doc = " ::CUDA_ERROR_CONTEXT_IS_DESTROYED"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuMipmappedArrayCreate,"] - #[doc = " ::cuMipmappedArrayGetLevel,"] - #[doc = " ::cuArrayCreate,"] - #[doc = " ::cudaFreeMipmappedArray"] pub fn cuMipmappedArrayDestroy( hMipmappedArray: CUmipmappedArray, ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns information about a pointer"] - #[doc = ""] - #[doc = " The supported attributes are:"] - #[doc = ""] - #[doc = " - ::CU_POINTER_ATTRIBUTE_CONTEXT:"] - #[doc = ""] - #[doc = " Returns in \\p *data the ::CUcontext in which \\p ptr was allocated or"] - #[doc = " registered."] - #[doc = " The type of \\p data must be ::CUcontext *."] - #[doc = ""] - #[doc = " If \\p ptr was not allocated by, mapped by, or registered with"] - #[doc = " a ::CUcontext which uses unified virtual addressing then"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE is returned."] - #[doc = ""] - #[doc = " - ::CU_POINTER_ATTRIBUTE_MEMORY_TYPE:"] - #[doc = ""] - #[doc = " Returns in \\p *data the physical memory type of the memory that"] - #[doc = " \\p ptr addresses as a ::CUmemorytype enumerated value."] - #[doc = " The type of \\p data must be unsigned int."] - #[doc = ""] - #[doc = " If \\p ptr addresses device memory then \\p *data is set to"] - #[doc = " ::CU_MEMORYTYPE_DEVICE. The particular ::CUdevice on which the"] - #[doc = " memory resides is the ::CUdevice of the ::CUcontext returned by the"] - #[doc = " ::CU_POINTER_ATTRIBUTE_CONTEXT attribute of \\p ptr."] - #[doc = ""] - #[doc = " If \\p ptr addresses host memory then \\p *data is set to"] - #[doc = " ::CU_MEMORYTYPE_HOST."] - #[doc = ""] - #[doc = " If \\p ptr was not allocated by, mapped by, or registered with"] - #[doc = " a ::CUcontext which uses unified virtual addressing then"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE is returned."] - #[doc = ""] - #[doc = " If the current ::CUcontext does not support unified virtual"] - #[doc = " addressing then ::CUDA_ERROR_INVALID_CONTEXT is returned."] - #[doc = ""] - #[doc = " - ::CU_POINTER_ATTRIBUTE_DEVICE_POINTER:"] - #[doc = ""] - #[doc = " Returns in \\p *data the device pointer value through which"] - #[doc = " \\p ptr may be accessed by kernels running in the current"] - #[doc = " ::CUcontext."] - #[doc = " The type of \\p data must be CUdeviceptr *."] - #[doc = ""] - #[doc = " If there exists no device pointer value through which"] - #[doc = " kernels running in the current ::CUcontext may access"] - #[doc = " \\p ptr then ::CUDA_ERROR_INVALID_VALUE is returned."] - #[doc = ""] - #[doc = " If there is no current ::CUcontext then"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT is returned."] - #[doc = ""] - #[doc = " Except in the exceptional disjoint addressing cases discussed"] - #[doc = " below, the value returned in \\p *data will equal the input"] - #[doc = " value \\p ptr."] - #[doc = ""] - #[doc = " - ::CU_POINTER_ATTRIBUTE_HOST_POINTER:"] - #[doc = ""] - #[doc = " Returns in \\p *data the host pointer value through which"] - #[doc = " \\p ptr may be accessed by by the host program."] - #[doc = " The type of \\p data must be void **."] - #[doc = " If there exists no host pointer value through which"] - #[doc = " the host program may directly access \\p ptr then"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE is returned."] - #[doc = ""] - #[doc = " Except in the exceptional disjoint addressing cases discussed"] - #[doc = " below, the value returned in \\p *data will equal the input"] - #[doc = " value \\p ptr."] - #[doc = ""] - #[doc = " - ::CU_POINTER_ATTRIBUTE_P2P_TOKENS:"] - #[doc = ""] - #[doc = " Returns in \\p *data two tokens for use with the nv-p2p.h Linux"] - #[doc = " kernel interface. \\p data must be a struct of type"] - #[doc = " CUDA_POINTER_ATTRIBUTE_P2P_TOKENS."] - #[doc = ""] - #[doc = " \\p ptr must be a pointer to memory obtained from :cuMemAlloc()."] - #[doc = " Note that p2pToken and vaSpaceToken are only valid for the"] - #[doc = " lifetime of the source allocation. A subsequent allocation at"] - #[doc = " the same address may return completely different tokens."] - #[doc = " Querying this attribute has a side effect of setting the attribute"] - #[doc = " ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS for the region of memory that"] - #[doc = " \\p ptr points to."] - #[doc = ""] - #[doc = " - ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS:"] - #[doc = ""] - #[doc = " A boolean attribute which when set, ensures that synchronous memory operations"] - #[doc = " initiated on the region of memory that \\p ptr points to will always synchronize."] - #[doc = " See further documentation in the section titled \"API synchronization behavior\""] - #[doc = " to learn more about cases when synchronous memory operations can"] - #[doc = " exhibit asynchronous behavior."] - #[doc = ""] - #[doc = " - ::CU_POINTER_ATTRIBUTE_BUFFER_ID:"] - #[doc = ""] - #[doc = " Returns in \\p *data a buffer ID which is guaranteed to be unique within the process."] - #[doc = " \\p data must point to an unsigned long long."] - #[doc = ""] - #[doc = " \\p ptr must be a pointer to memory obtained from a CUDA memory allocation API."] - #[doc = " Every memory allocation from any of the CUDA memory allocation APIs will"] - #[doc = " have a unique ID over a process lifetime. Subsequent allocations do not reuse IDs"] - #[doc = " from previous freed allocations. IDs are only unique within a single process."] - #[doc = ""] - #[doc = ""] - #[doc = " - ::CU_POINTER_ATTRIBUTE_IS_MANAGED:"] - #[doc = ""] - #[doc = " Returns in \\p *data a boolean that indicates whether the pointer points to"] - #[doc = " managed memory or not."] - #[doc = ""] - #[doc = " - ::CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL:"] - #[doc = ""] - #[doc = " Returns in \\p *data an integer representing a device ordinal of a device against"] - #[doc = " which the memory was allocated or registered."] - #[doc = ""] - #[doc = " \\par"] - #[doc = ""] - #[doc = " Note that for most allocations in the unified virtual address space"] - #[doc = " the host and device pointer for accessing the allocation will be the"] - #[doc = " same. The exceptions to this are"] - #[doc = " - user memory registered using ::cuMemHostRegister"] - #[doc = " - host memory allocated using ::cuMemHostAlloc with the"] - #[doc = " ::CU_MEMHOSTALLOC_WRITECOMBINED flag"] - #[doc = " For these types of allocation there will exist separate, disjoint host"] - #[doc = " and device addresses for accessing the allocation. In particular"] - #[doc = " - The host address will correspond to an invalid unmapped device address"] - #[doc = " (which will result in an exception if accessed from the device)"] - #[doc = " - The device address will correspond to an invalid unmapped host address"] - #[doc = " (which will result in an exception if accessed from the host)."] - #[doc = " For these types of allocations, querying ::CU_POINTER_ATTRIBUTE_HOST_POINTER"] - #[doc = " and ::CU_POINTER_ATTRIBUTE_DEVICE_POINTER may be used to retrieve the host"] - #[doc = " and device addresses from either address."] - #[doc = ""] - #[doc = " \\param data - Returned pointer attribute value"] - #[doc = " \\param attribute - Pointer attribute to query"] - #[doc = " \\param ptr - Pointer"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuPointerSetAttribute,"] - #[doc = " ::cuMemAlloc,"] - #[doc = " ::cuMemFree,"] - #[doc = " ::cuMemAllocHost,"] - #[doc = " ::cuMemFreeHost,"] - #[doc = " ::cuMemHostAlloc,"] - #[doc = " ::cuMemHostRegister,"] - #[doc = " ::cuMemHostUnregister,"] - #[doc = " ::cudaPointerGetAttributes"] pub fn cuPointerGetAttribute( data: *mut ::std::os::raw::c_void, attribute: CUpointer_attribute, @@ -5561,70 +1909,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Prefetches memory to the specified destination device"] - #[doc = ""] - #[doc = " Prefetches memory to the specified destination device. \\p devPtr is the"] - #[doc = " base device pointer of the memory to be prefetched and \\p dstDevice is the"] - #[doc = " destination device. \\p count specifies the number of bytes to copy. \\p hStream"] - #[doc = " is the stream in which the operation is enqueued. The memory range must refer"] - #[doc = " to managed memory allocated via ::cuMemAllocManaged or declared via __managed__ variables."] - #[doc = ""] - #[doc = " Passing in CU_DEVICE_CPU for \\p dstDevice will prefetch the data to host memory. If"] - #[doc = " \\p dstDevice is a GPU, then the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS"] - #[doc = " must be non-zero. Additionally, \\p hStream must be associated with a device that has a"] - #[doc = " non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS."] - #[doc = ""] - #[doc = " The start address and end address of the memory range will be rounded down and rounded up"] - #[doc = " respectively to be aligned to CPU page size before the prefetch operation is enqueued"] - #[doc = " in the stream."] - #[doc = ""] - #[doc = " If no physical memory has been allocated for this region, then this memory region"] - #[doc = " will be populated and mapped on the destination device. If there's insufficient"] - #[doc = " memory to prefetch the desired region, the Unified Memory driver may evict pages from other"] - #[doc = " ::cuMemAllocManaged allocations to host memory in order to make room. Device memory"] - #[doc = " allocated using ::cuMemAlloc or ::cuArrayCreate will not be evicted."] - #[doc = ""] - #[doc = " By default, any mappings to the previous location of the migrated pages are removed and"] - #[doc = " mappings for the new location are only setup on \\p dstDevice. The exact behavior however"] - #[doc = " also depends on the settings applied to this memory range via ::cuMemAdvise as described"] - #[doc = " below:"] - #[doc = ""] - #[doc = " If ::CU_MEM_ADVISE_SET_READ_MOSTLY was set on any subset of this memory range,"] - #[doc = " then that subset will create a read-only copy of the pages on \\p dstDevice."] - #[doc = ""] - #[doc = " If ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION was called on any subset of this memory"] - #[doc = " range, then the pages will be migrated to \\p dstDevice even if \\p dstDevice is not the"] - #[doc = " preferred location of any pages in the memory range."] - #[doc = ""] - #[doc = " If ::CU_MEM_ADVISE_SET_ACCESSED_BY was called on any subset of this memory range,"] - #[doc = " then mappings to those pages from all the appropriate processors are updated to"] - #[doc = " refer to the new location if establishing such a mapping is possible. Otherwise,"] - #[doc = " those mappings are cleared."] - #[doc = ""] - #[doc = " Note that this API is not required for functionality and only serves to improve performance"] - #[doc = " by allowing the application to migrate data to a suitable location before it is accessed."] - #[doc = " Memory accesses to this range are always coherent and are allowed even when the data is"] - #[doc = " actively being migrated."] - #[doc = ""] - #[doc = " Note that this function is asynchronous with respect to the host and all work"] - #[doc = " on other devices."] - #[doc = ""] - #[doc = " \\param devPtr - Pointer to be prefetched"] - #[doc = " \\param count - Size in bytes"] - #[doc = " \\param dstDevice - Destination device to prefetch to"] - #[doc = " \\param hStream - Stream to enqueue prefetch operation"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE"] - #[doc = " \\notefnerr"] - #[doc = " \\note_async"] - #[doc = " \\note_null_stream"] - #[doc = ""] - #[doc = " \\sa ::cuMemcpy, ::cuMemcpyPeer, ::cuMemcpyAsync,"] - #[doc = " ::cuMemcpy3DPeerAsync, ::cuMemAdvise,"] - #[doc = " ::cudaMemPrefetchAsync"] pub fn cuMemPrefetchAsync( devPtr: CUdeviceptr, count: usize, @@ -5633,116 +1917,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Advise about the usage of a given memory range"] - #[doc = ""] - #[doc = " Advise the Unified Memory subsystem about the usage pattern for the memory range"] - #[doc = " starting at \\p devPtr with a size of \\p count bytes. The start address and end address of the memory"] - #[doc = " range will be rounded down and rounded up respectively to be aligned to CPU page size before the"] - #[doc = " advice is applied. The memory range must refer to managed memory allocated via ::cuMemAllocManaged"] - #[doc = " or declared via __managed__ variables. The memory range could also refer to system-allocated pageable"] - #[doc = " memory provided it represents a valid, host-accessible region of memory and all additional constraints"] - #[doc = " imposed by \\p advice as outlined below are also satisfied. Specifying an invalid system-allocated pageable"] - #[doc = " memory range results in an error being returned."] - #[doc = ""] - #[doc = " The \\p advice parameter can take the following values:"] - #[doc = " - ::CU_MEM_ADVISE_SET_READ_MOSTLY: This implies that the data is mostly going to be read"] - #[doc = " from and only occasionally written to. Any read accesses from any processor to this region will create a"] - #[doc = " read-only copy of at least the accessed pages in that processor's memory. Additionally, if ::cuMemPrefetchAsync"] - #[doc = " is called on this region, it will create a read-only copy of the data on the destination processor."] - #[doc = " If any processor writes to this region, all copies of the corresponding page will be invalidated"] - #[doc = " except for the one where the write occurred. The \\p device argument is ignored for this advice."] - #[doc = " Note that for a page to be read-duplicated, the accessing processor must either be the CPU or a GPU"] - #[doc = " that has a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS."] - #[doc = " Also, if a context is created on a device that does not have the device attribute"] - #[doc = " ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS set, then read-duplication will not occur until"] - #[doc = " all such contexts are destroyed."] - #[doc = " If the memory region refers to valid system-allocated pageable memory, then the accessing device must"] - #[doc = " have a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS for a read-only"] - #[doc = " copy to be created on that device. Note however that if the accessing device also has a non-zero value for the"] - #[doc = " device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, then setting this advice"] - #[doc = " will not create a read-only copy when that device accesses this memory region."] - #[doc = ""] - #[doc = " - ::CU_MEM_ADVISE_UNSET_READ_MOSTLY: Undoes the effect of ::CU_MEM_ADVISE_SET_READ_MOSTLY and also prevents the"] - #[doc = " Unified Memory driver from attempting heuristic read-duplication on the memory range. Any read-duplicated"] - #[doc = " copies of the data will be collapsed into a single copy. The location for the collapsed"] - #[doc = " copy will be the preferred location if the page has a preferred location and one of the read-duplicated"] - #[doc = " copies was resident at that location. Otherwise, the location chosen is arbitrary."] - #[doc = ""] - #[doc = " - ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION: This advice sets the preferred location for the"] - #[doc = " data to be the memory belonging to \\p device. Passing in CU_DEVICE_CPU for \\p device sets the"] - #[doc = " preferred location as host memory. If \\p device is a GPU, then it must have a non-zero value for the"] - #[doc = " device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Setting the preferred location"] - #[doc = " does not cause data to migrate to that location immediately. Instead, it guides the migration policy"] - #[doc = " when a fault occurs on that memory region. If the data is already in its preferred location and the"] - #[doc = " faulting processor can establish a mapping without requiring the data to be migrated, then"] - #[doc = " data migration will be avoided. On the other hand, if the data is not in its preferred location"] - #[doc = " or if a direct mapping cannot be established, then it will be migrated to the processor accessing"] - #[doc = " it. It is important to note that setting the preferred location does not prevent data prefetching"] - #[doc = " done using ::cuMemPrefetchAsync."] - #[doc = " Having a preferred location can override the page thrash detection and resolution logic in the Unified"] - #[doc = " Memory driver. Normally, if a page is detected to be constantly thrashing between for example host and device"] - #[doc = " memory, the page may eventually be pinned to host memory by the Unified Memory driver. But"] - #[doc = " if the preferred location is set as device memory, then the page will continue to thrash indefinitely."] - #[doc = " If ::CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory region or any subset of it, then the"] - #[doc = " policies associated with that advice will override the policies of this advice, unless read accesses from"] - #[doc = " \\p device will not result in a read-only copy being created on that device as outlined in description for"] - #[doc = " the advice ::CU_MEM_ADVISE_SET_READ_MOSTLY."] - #[doc = " If the memory region refers to valid system-allocated pageable memory, then \\p device must have a non-zero"] - #[doc = " value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, if \\p device has"] - #[doc = " a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES,"] - #[doc = " then this call has no effect. Note however that this behavior may change in the future."] - #[doc = ""] - #[doc = " - ::CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION: Undoes the effect of ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION"] - #[doc = " and changes the preferred location to none."] - #[doc = ""] - #[doc = " - ::CU_MEM_ADVISE_SET_ACCESSED_BY: This advice implies that the data will be accessed by \\p device."] - #[doc = " Passing in ::CU_DEVICE_CPU for \\p device will set the advice for the CPU. If \\p device is a GPU, then"] - #[doc = " the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS must be non-zero."] - #[doc = " This advice does not cause data migration and has no impact on the location of the data per se. Instead,"] - #[doc = " it causes the data to always be mapped in the specified processor's page tables, as long as the"] - #[doc = " location of the data permits a mapping to be established. If the data gets migrated for any reason,"] - #[doc = " the mappings are updated accordingly."] - #[doc = " This advice is recommended in scenarios where data locality is not important, but avoiding faults is."] - #[doc = " Consider for example a system containing multiple GPUs with peer-to-peer access enabled, where the"] - #[doc = " data located on one GPU is occasionally accessed by peer GPUs. In such scenarios, migrating data"] - #[doc = " over to the other GPUs is not as important because the accesses are infrequent and the overhead of"] - #[doc = " migration may be too high. But preventing faults can still help improve performance, and so having"] - #[doc = " a mapping set up in advance is useful. Note that on CPU access of this data, the data may be migrated"] - #[doc = " to host memory because the CPU typically cannot access device memory directly. Any GPU that had the"] - #[doc = " ::CU_MEM_ADVISE_SET_ACCESSED_BY flag set for this data will now have its mapping updated to point to the"] - #[doc = " page in host memory."] - #[doc = " If ::CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory region or any subset of it, then the"] - #[doc = " policies associated with that advice will override the policies of this advice. Additionally, if the"] - #[doc = " preferred location of this memory region or any subset of it is also \\p device, then the policies"] - #[doc = " associated with ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION will override the policies of this advice."] - #[doc = " If the memory region refers to valid system-allocated pageable memory, then \\p device must have a non-zero"] - #[doc = " value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, if \\p device has"] - #[doc = " a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES,"] - #[doc = " then this call has no effect."] - #[doc = ""] - #[doc = " - ::CU_MEM_ADVISE_UNSET_ACCESSED_BY: Undoes the effect of ::CU_MEM_ADVISE_SET_ACCESSED_BY. Any mappings to"] - #[doc = " the data from \\p device may be removed at any time causing accesses to result in non-fatal page faults."] - #[doc = " If the memory region refers to valid system-allocated pageable memory, then \\p device must have a non-zero"] - #[doc = " value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, if \\p device has"] - #[doc = " a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES,"] - #[doc = " then this call has no effect."] - #[doc = ""] - #[doc = " \\param devPtr - Pointer to memory to set the advice for"] - #[doc = " \\param count - Size in bytes of the memory range"] - #[doc = " \\param advice - Advice to be applied for the specified memory range"] - #[doc = " \\param device - Device to apply the advice for"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE"] - #[doc = " \\notefnerr"] - #[doc = " \\note_async"] - #[doc = " \\note_null_stream"] - #[doc = ""] - #[doc = " \\sa ::cuMemcpy, ::cuMemcpyPeer, ::cuMemcpyAsync,"] - #[doc = " ::cuMemcpy3DPeerAsync, ::cuMemPrefetchAsync,"] - #[doc = " ::cudaMemAdvise"] pub fn cuMemAdvise( devPtr: CUdeviceptr, count: usize, @@ -5751,60 +1925,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Query an attribute of a given memory range"] - #[doc = ""] - #[doc = " Query an attribute about the memory range starting at \\p devPtr with a size of \\p count bytes. The"] - #[doc = " memory range must refer to managed memory allocated via ::cuMemAllocManaged or declared via"] - #[doc = " __managed__ variables."] - #[doc = ""] - #[doc = " The \\p attribute parameter can take the following values:"] - #[doc = " - ::CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY: If this attribute is specified, \\p data will be interpreted"] - #[doc = " as a 32-bit integer, and \\p dataSize must be 4. The result returned will be 1 if all pages in the given"] - #[doc = " memory range have read-duplication enabled, or 0 otherwise."] - #[doc = " - ::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION: If this attribute is specified, \\p data will be"] - #[doc = " interpreted as a 32-bit integer, and \\p dataSize must be 4. The result returned will be a GPU device"] - #[doc = " id if all pages in the memory range have that GPU as their preferred location, or it will be CU_DEVICE_CPU"] - #[doc = " if all pages in the memory range have the CPU as their preferred location, or it will be CU_DEVICE_INVALID"] - #[doc = " if either all the pages don't have the same preferred location or some of the pages don't have a"] - #[doc = " preferred location at all. Note that the actual location of the pages in the memory range at the time of"] - #[doc = " the query may be different from the preferred location."] - #[doc = " - ::CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY: If this attribute is specified, \\p data will be interpreted"] - #[doc = " as an array of 32-bit integers, and \\p dataSize must be a non-zero multiple of 4. The result returned"] - #[doc = " will be a list of device ids that had ::CU_MEM_ADVISE_SET_ACCESSED_BY set for that entire memory range."] - #[doc = " If any device does not have that advice set for the entire memory range, that device will not be included."] - #[doc = " If \\p data is larger than the number of devices that have that advice set for that memory range,"] - #[doc = " CU_DEVICE_INVALID will be returned in all the extra space provided. For ex., if \\p dataSize is 12"] - #[doc = " (i.e. \\p data has 3 elements) and only device 0 has the advice set, then the result returned will be"] - #[doc = " { 0, CU_DEVICE_INVALID, CU_DEVICE_INVALID }. If \\p data is smaller than the number of devices that have"] - #[doc = " that advice set, then only as many devices will be returned as can fit in the array. There is no"] - #[doc = " guarantee on which specific devices will be returned, however."] - #[doc = " - ::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION: If this attribute is specified, \\p data will be"] - #[doc = " interpreted as a 32-bit integer, and \\p dataSize must be 4. The result returned will be the last location"] - #[doc = " to which all pages in the memory range were prefetched explicitly via ::cuMemPrefetchAsync. This will either be"] - #[doc = " a GPU id or CU_DEVICE_CPU depending on whether the last location for prefetch was a GPU or the CPU"] - #[doc = " respectively. If any page in the memory range was never explicitly prefetched or if all pages were not"] - #[doc = " prefetched to the same location, CU_DEVICE_INVALID will be returned. Note that this simply returns the"] - #[doc = " last location that the applicaton requested to prefetch the memory range to. It gives no indication as to"] - #[doc = " whether the prefetch operation to that location has completed or even begun."] - #[doc = ""] - #[doc = " \\param data - A pointers to a memory location where the result"] - #[doc = " of each attribute query will be written to."] - #[doc = " \\param dataSize - Array containing the size of data"] - #[doc = " \\param attribute - The attribute to query"] - #[doc = " \\param devPtr - Start of the range to query"] - #[doc = " \\param count - Size of the range to query"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE"] - #[doc = " \\notefnerr"] - #[doc = " \\note_async"] - #[doc = " \\note_null_stream"] - #[doc = ""] - #[doc = " \\sa ::cuMemRangeGetAttributes, ::cuMemPrefetchAsync,"] - #[doc = " ::cuMemAdvise,"] - #[doc = " ::cudaMemRangeGetAttribute"] pub fn cuMemRangeGetAttribute( data: *mut ::std::os::raw::c_void, dataSize: usize, @@ -5814,42 +1934,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Query attributes of a given memory range."] - #[doc = ""] - #[doc = " Query attributes of the memory range starting at \\p devPtr with a size of \\p count bytes. The"] - #[doc = " memory range must refer to managed memory allocated via ::cuMemAllocManaged or declared via"] - #[doc = " __managed__ variables. The \\p attributes array will be interpreted to have \\p numAttributes"] - #[doc = " entries. The \\p dataSizes array will also be interpreted to have \\p numAttributes entries."] - #[doc = " The results of the query will be stored in \\p data."] - #[doc = ""] - #[doc = " The list of supported attributes are given below. Please refer to ::cuMemRangeGetAttribute for"] - #[doc = " attribute descriptions and restrictions."] - #[doc = ""] - #[doc = " - ::CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY"] - #[doc = " - ::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION"] - #[doc = " - ::CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY"] - #[doc = " - ::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION"] - #[doc = ""] - #[doc = " \\param data - A two-dimensional array containing pointers to memory"] - #[doc = " locations where the result of each attribute query will be written to."] - #[doc = " \\param dataSizes - Array containing the sizes of each result"] - #[doc = " \\param attributes - An array of attributes to query"] - #[doc = " (numAttributes and the number of attributes in this array should match)"] - #[doc = " \\param numAttributes - Number of attributes to query"] - #[doc = " \\param devPtr - Start of the range to query"] - #[doc = " \\param count - Size of the range to query"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuMemRangeGetAttribute, ::cuMemAdvise"] - #[doc = " ::cuMemPrefetchAsync,"] - #[doc = " ::cudaMemRangeGetAttributes"] pub fn cuMemRangeGetAttributes( data: *mut *mut ::std::os::raw::c_void, dataSizes: *mut usize, @@ -5860,44 +1944,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Set attributes on a previously allocated memory region"] - #[doc = ""] - #[doc = " The supported attributes are:"] - #[doc = ""] - #[doc = " - ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS:"] - #[doc = ""] - #[doc = " A boolean attribute that can either be set (1) or unset (0). When set,"] - #[doc = " the region of memory that \\p ptr points to is guaranteed to always synchronize"] - #[doc = " memory operations that are synchronous. If there are some previously initiated"] - #[doc = " synchronous memory operations that are pending when this attribute is set, the"] - #[doc = " function does not return until those memory operations are complete."] - #[doc = " See further documentation in the section titled \"API synchronization behavior\""] - #[doc = " to learn more about cases when synchronous memory operations can"] - #[doc = " exhibit asynchronous behavior."] - #[doc = " \\p value will be considered as a pointer to an unsigned integer to which this attribute is to be set."] - #[doc = ""] - #[doc = " \\param value - Pointer to memory containing the value to be set"] - #[doc = " \\param attribute - Pointer attribute to set"] - #[doc = " \\param ptr - Pointer to a memory region allocated using CUDA memory allocation APIs"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuPointerGetAttribute,"] - #[doc = " ::cuPointerGetAttributes,"] - #[doc = " ::cuMemAlloc,"] - #[doc = " ::cuMemFree,"] - #[doc = " ::cuMemAllocHost,"] - #[doc = " ::cuMemFreeHost,"] - #[doc = " ::cuMemHostAlloc,"] - #[doc = " ::cuMemHostRegister,"] - #[doc = " ::cuMemHostUnregister"] pub fn cuPointerSetAttribute( value: *const ::std::os::raw::c_void, attribute: CUpointer_attribute, @@ -5905,45 +1951,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns information about a pointer."] - #[doc = ""] - #[doc = " The supported attributes are (refer to ::cuPointerGetAttribute for attribute descriptions and restrictions):"] - #[doc = ""] - #[doc = " - ::CU_POINTER_ATTRIBUTE_CONTEXT"] - #[doc = " - ::CU_POINTER_ATTRIBUTE_MEMORY_TYPE"] - #[doc = " - ::CU_POINTER_ATTRIBUTE_DEVICE_POINTER"] - #[doc = " - ::CU_POINTER_ATTRIBUTE_HOST_POINTER"] - #[doc = " - ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS"] - #[doc = " - ::CU_POINTER_ATTRIBUTE_BUFFER_ID"] - #[doc = " - ::CU_POINTER_ATTRIBUTE_IS_MANAGED"] - #[doc = " - ::CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL"] - #[doc = ""] - #[doc = " \\param numAttributes - Number of attributes to query"] - #[doc = " \\param attributes - An array of attributes to query"] - #[doc = " (numAttributes and the number of attributes in this array should match)"] - #[doc = " \\param data - A two-dimensional array containing pointers to memory"] - #[doc = " locations where the result of each attribute query will be written to."] - #[doc = " \\param ptr - Pointer to query"] - #[doc = ""] - #[doc = " Unlike ::cuPointerGetAttribute, this function will not return an error when the \\p ptr"] - #[doc = " encountered is not a valid CUDA pointer. Instead, the attributes are assigned default NULL values"] - #[doc = " and CUDA_SUCCESS is returned."] - #[doc = ""] - #[doc = " If \\p ptr was not allocated by, mapped by, or registered with a ::CUcontext which uses UVA"] - #[doc = " (Unified Virtual Addressing), ::CUDA_ERROR_INVALID_CONTEXT is returned."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuPointerGetAttribute,"] - #[doc = " ::cuPointerSetAttribute,"] - #[doc = " ::cudaPointerGetAttributes"] pub fn cuPointerGetAttributes( numAttributes: ::std::os::raw::c_uint, attributes: *mut CUpointer_attribute, @@ -5952,88 +1959,12 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Create a stream"] - #[doc = ""] - #[doc = " Creates a stream and returns a handle in \\p phStream. The \\p Flags argument"] - #[doc = " determines behaviors of the stream. Valid values for \\p Flags are:"] - #[doc = " - ::CU_STREAM_DEFAULT: Default stream creation flag."] - #[doc = " - ::CU_STREAM_NON_BLOCKING: Specifies that work running in the created"] - #[doc = " stream may run concurrently with work in stream 0 (the NULL stream), and that"] - #[doc = " the created stream should perform no implicit synchronization with stream 0."] - #[doc = ""] - #[doc = " \\param phStream - Returned newly created stream"] - #[doc = " \\param Flags - Parameters for stream creation"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuStreamDestroy,"] - #[doc = " ::cuStreamCreateWithPriority,"] - #[doc = " ::cuStreamGetPriority,"] - #[doc = " ::cuStreamGetFlags,"] - #[doc = " ::cuStreamWaitEvent,"] - #[doc = " ::cuStreamQuery,"] - #[doc = " ::cuStreamSynchronize,"] - #[doc = " ::cuStreamAddCallback,"] - #[doc = " ::cudaStreamCreate,"] - #[doc = " ::cudaStreamCreateWithFlags"] pub fn cuStreamCreate( phStream: *mut CUstream, Flags: ::std::os::raw::c_uint, ) -> CUresult; } extern "C" { - #[doc = " \\brief Create a stream with the given priority"] - #[doc = ""] - #[doc = " Creates a stream with the specified priority and returns a handle in \\p phStream."] - #[doc = " This API alters the scheduler priority of work in the stream. Work in a higher"] - #[doc = " priority stream may preempt work already executing in a low priority stream."] - #[doc = ""] - #[doc = " \\p priority follows a convention where lower numbers represent higher priorities."] - #[doc = " '0' represents default priority. The range of meaningful numerical priorities can"] - #[doc = " be queried using ::cuCtxGetStreamPriorityRange. If the specified priority is"] - #[doc = " outside the numerical range returned by ::cuCtxGetStreamPriorityRange,"] - #[doc = " it will automatically be clamped to the lowest or the highest number in the range."] - #[doc = ""] - #[doc = " \\param phStream - Returned newly created stream"] - #[doc = " \\param flags - Flags for stream creation. See ::cuStreamCreate for a list of"] - #[doc = " valid flags"] - #[doc = " \\param priority - Stream priority. Lower numbers represent higher priorities."] - #[doc = " See ::cuCtxGetStreamPriorityRange for more information about"] - #[doc = " meaningful stream priorities that can be passed."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\note Stream priorities are supported only on GPUs"] - #[doc = " with compute capability 3.5 or higher."] - #[doc = ""] - #[doc = " \\note In the current implementation, only compute kernels launched in"] - #[doc = " priority streams are affected by the stream's priority. Stream priorities have"] - #[doc = " no effect on host-to-device and device-to-host memory operations."] - #[doc = ""] - #[doc = " \\sa ::cuStreamDestroy,"] - #[doc = " ::cuStreamCreate,"] - #[doc = " ::cuStreamGetPriority,"] - #[doc = " ::cuCtxGetStreamPriorityRange,"] - #[doc = " ::cuStreamGetFlags,"] - #[doc = " ::cuStreamWaitEvent,"] - #[doc = " ::cuStreamQuery,"] - #[doc = " ::cuStreamSynchronize,"] - #[doc = " ::cuStreamAddCallback,"] - #[doc = " ::cudaStreamCreateWithPriority"] pub fn cuStreamCreateWithPriority( phStream: *mut CUstream, flags: ::std::os::raw::c_uint, @@ -6041,138 +1972,21 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Query the priority of a given stream"] - #[doc = ""] - #[doc = " Query the priority of a stream created using ::cuStreamCreate or ::cuStreamCreateWithPriority"] - #[doc = " and return the priority in \\p priority. Note that if the stream was created with a"] - #[doc = " priority outside the numerical range returned by ::cuCtxGetStreamPriorityRange,"] - #[doc = " this function returns the clamped priority."] - #[doc = " See ::cuStreamCreateWithPriority for details about priority clamping."] - #[doc = ""] - #[doc = " \\param hStream - Handle to the stream to be queried"] - #[doc = " \\param priority - Pointer to a signed integer in which the stream's priority is returned"] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuStreamDestroy,"] - #[doc = " ::cuStreamCreate,"] - #[doc = " ::cuStreamCreateWithPriority,"] - #[doc = " ::cuCtxGetStreamPriorityRange,"] - #[doc = " ::cuStreamGetFlags,"] - #[doc = " ::cudaStreamGetPriority"] pub fn cuStreamGetPriority( hStream: CUstream, priority: *mut ::std::os::raw::c_int, ) -> CUresult; } extern "C" { - #[doc = " \\brief Query the flags of a given stream"] - #[doc = ""] - #[doc = " Query the flags of a stream created using ::cuStreamCreate or ::cuStreamCreateWithPriority"] - #[doc = " and return the flags in \\p flags."] - #[doc = ""] - #[doc = " \\param hStream - Handle to the stream to be queried"] - #[doc = " \\param flags - Pointer to an unsigned integer in which the stream's flags are returned"] - #[doc = " The value returned in \\p flags is a logical 'OR' of all flags that"] - #[doc = " were used while creating this stream. See ::cuStreamCreate for the list"] - #[doc = " of valid flags"] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuStreamDestroy,"] - #[doc = " ::cuStreamCreate,"] - #[doc = " ::cuStreamGetPriority,"] - #[doc = " ::cudaStreamGetFlags"] pub fn cuStreamGetFlags( hStream: CUstream, flags: *mut ::std::os::raw::c_uint, ) -> CUresult; } extern "C" { - #[doc = " \\brief Query the context associated with a stream"] - #[doc = ""] - #[doc = " Returns the CUDA context that the stream is associated with."] - #[doc = ""] - #[doc = " The stream handle \\p hStream can refer to any of the following:"] - #[doc = " "] - #[doc = ""] - #[doc = " \\param hStream - Handle to the stream to be queried"] - #[doc = " \\param pctx - Returned context associated with the stream"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuStreamDestroy,"] - #[doc = " ::cuStreamCreateWithPriority,"] - #[doc = " ::cuStreamGetPriority,"] - #[doc = " ::cuStreamGetFlags,"] - #[doc = " ::cuStreamWaitEvent,"] - #[doc = " ::cuStreamQuery,"] - #[doc = " ::cuStreamSynchronize,"] - #[doc = " ::cuStreamAddCallback,"] - #[doc = " ::cudaStreamCreate,"] - #[doc = " ::cudaStreamCreateWithFlags"] pub fn cuStreamGetCtx(hStream: CUstream, pctx: *mut CUcontext) -> CUresult; } extern "C" { - #[doc = " \\brief Make a compute stream wait on an event"] - #[doc = ""] - #[doc = " Makes all future work submitted to \\p hStream wait for all work captured in"] - #[doc = " \\p hEvent. See ::cuEventRecord() for details on what is captured by an event."] - #[doc = " The synchronization will be performed efficiently on the device when applicable."] - #[doc = " \\p hEvent may be from a different context or device than \\p hStream."] - #[doc = ""] - #[doc = " \\param hStream - Stream to wait"] - #[doc = " \\param hEvent - Event to wait on (may not be NULL)"] - #[doc = " \\param Flags - Parameters for the operation (must be 0)"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuStreamCreate,"] - #[doc = " ::cuEventRecord,"] - #[doc = " ::cuStreamQuery,"] - #[doc = " ::cuStreamSynchronize,"] - #[doc = " ::cuStreamAddCallback,"] - #[doc = " ::cuStreamDestroy,"] - #[doc = " ::cudaStreamWaitEvent"] pub fn cuStreamWaitEvent( hStream: CUstream, hEvent: CUevent, @@ -6180,77 +1994,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Add a callback to a compute stream"] - #[doc = ""] - #[doc = " \\note This function is slated for eventual deprecation and removal. If"] - #[doc = " you do not require the callback to execute in case of a device error,"] - #[doc = " consider using ::cuLaunchHostFunc. Additionally, this function is not"] - #[doc = " supported with ::cuStreamBeginCapture and ::cuStreamEndCapture, unlike"] - #[doc = " ::cuLaunchHostFunc."] - #[doc = ""] - #[doc = " Adds a callback to be called on the host after all currently enqueued"] - #[doc = " items in the stream have completed. For each"] - #[doc = " cuStreamAddCallback call, the callback will be executed exactly once."] - #[doc = " The callback will block later work in the stream until it is finished."] - #[doc = ""] - #[doc = " The callback may be passed ::CUDA_SUCCESS or an error code. In the event"] - #[doc = " of a device error, all subsequently executed callbacks will receive an"] - #[doc = " appropriate ::CUresult."] - #[doc = ""] - #[doc = " Callbacks must not make any CUDA API calls. Attempting to use a CUDA API"] - #[doc = " will result in ::CUDA_ERROR_NOT_PERMITTED. Callbacks must not perform any"] - #[doc = " synchronization that may depend on outstanding device work or other callbacks"] - #[doc = " that are not mandated to run earlier. Callbacks without a mandated order"] - #[doc = " (in independent streams) execute in undefined order and may be serialized."] - #[doc = ""] - #[doc = " For the purposes of Unified Memory, callback execution makes a number of"] - #[doc = " guarantees:"] - #[doc = " "] - #[doc = ""] - #[doc = " \\param hStream - Stream to add callback to"] - #[doc = " \\param callback - The function to call once preceding stream operations are complete"] - #[doc = " \\param userData - User specified data to be passed to the callback function"] - #[doc = " \\param flags - Reserved for future use, must be 0"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_NOT_SUPPORTED"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuStreamCreate,"] - #[doc = " ::cuStreamQuery,"] - #[doc = " ::cuStreamSynchronize,"] - #[doc = " ::cuStreamWaitEvent,"] - #[doc = " ::cuStreamDestroy,"] - #[doc = " ::cuMemAllocManaged,"] - #[doc = " ::cuStreamAttachMemAsync,"] - #[doc = " ::cuStreamLaunchHostFunc,"] - #[doc = " ::cudaStreamAddCallback"] pub fn cuStreamAddCallback( hStream: CUstream, callback: CUstreamCallback, @@ -6265,154 +2008,23 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Swaps the stream capture interaction mode for a thread"] - #[doc = ""] - #[doc = " Sets the calling thread's stream capture interaction mode to the value contained"] - #[doc = " in \\p *mode, and overwrites \\p *mode with the previous mode for the thread. To"] - #[doc = " facilitate deterministic behavior across function or module boundaries, callers"] - #[doc = " are encouraged to use this API in a push-pop fashion: \\code"] - #[doc = "CUstreamCaptureMode mode = desiredMode;"] - #[doc = "cuThreadExchangeStreamCaptureMode(&mode);"] - #[doc = "..."] - #[doc = "cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " During stream capture (see ::cuStreamBeginCapture), some actions, such as a call"] - #[doc = " to ::cudaMalloc, may be unsafe. In the case of ::cudaMalloc, the operation is"] - #[doc = " not enqueued asynchronously to a stream, and is not observed by stream capture."] - #[doc = " Therefore, if the sequence of operations captured via ::cuStreamBeginCapture"] - #[doc = " depended on the allocation being replayed whenever the graph is launched, the"] - #[doc = " captured graph would be invalid."] - #[doc = ""] - #[doc = " Therefore, stream capture places restrictions on API calls that can be made within"] - #[doc = " or concurrently to a ::cuStreamBeginCapture-::cuStreamEndCapture sequence. This"] - #[doc = " behavior can be controlled via this API and flags to ::cuStreamBeginCapture."] - #[doc = ""] - #[doc = " A thread's mode is one of the following:"] - #[doc = " - \\p CU_STREAM_CAPTURE_MODE_GLOBAL: This is the default mode. If the local thread has"] - #[doc = " an ongoing capture sequence that was not initiated with"] - #[doc = " \\p CU_STREAM_CAPTURE_MODE_RELAXED at \\p cuStreamBeginCapture, or if any other thread"] - #[doc = " has a concurrent capture sequence initiated with \\p CU_STREAM_CAPTURE_MODE_GLOBAL,"] - #[doc = " this thread is prohibited from potentially unsafe API calls."] - #[doc = " - \\p CU_STREAM_CAPTURE_MODE_THREAD_LOCAL: If the local thread has an ongoing capture"] - #[doc = " sequence not initiated with \\p CU_STREAM_CAPTURE_MODE_RELAXED, it is prohibited"] - #[doc = " from potentially unsafe API calls. Concurrent capture sequences in other threads"] - #[doc = " are ignored."] - #[doc = " - \\p CU_STREAM_CAPTURE_MODE_RELAXED: The local thread is not prohibited from potentially"] - #[doc = " unsafe API calls. Note that the thread is still prohibited from API calls which"] - #[doc = " necessarily conflict with stream capture, for example, attempting ::cuEventQuery"] - #[doc = " on an event that was last recorded inside a capture sequence."] - #[doc = ""] - #[doc = " \\param mode - Pointer to mode value to swap with the current mode"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuStreamBeginCapture"] pub fn cuThreadExchangeStreamCaptureMode( mode: *mut CUstreamCaptureMode, ) -> CUresult; } extern "C" { - #[doc = " \\brief Ends capture on a stream, returning the captured graph"] - #[doc = ""] - #[doc = " End capture on \\p hStream, returning the captured graph via \\p phGraph."] - #[doc = " Capture must have been initiated on \\p hStream via a call to ::cuStreamBeginCapture."] - #[doc = " If capture was invalidated, due to a violation of the rules of stream capture, then"] - #[doc = " a NULL graph will be returned."] - #[doc = ""] - #[doc = " If the \\p mode argument to ::cuStreamBeginCapture was not"] - #[doc = " ::CU_STREAM_CAPTURE_MODE_RELAXED, this call must be from the same thread as"] - #[doc = " ::cuStreamBeginCapture."] - #[doc = ""] - #[doc = " \\param hStream - Stream to query"] - #[doc = " \\param phGraph - The captured graph"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuStreamCreate,"] - #[doc = " ::cuStreamBeginCapture,"] - #[doc = " ::cuStreamIsCapturing"] pub fn cuStreamEndCapture( hStream: CUstream, phGraph: *mut CUgraph, ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns a stream's capture status"] - #[doc = ""] - #[doc = " Return the capture status of \\p hStream via \\p captureStatus. After a successful"] - #[doc = " call, \\p *captureStatus will contain one of the following:"] - #[doc = " - ::CU_STREAM_CAPTURE_STATUS_NONE: The stream is not capturing."] - #[doc = " - ::CU_STREAM_CAPTURE_STATUS_ACTIVE: The stream is capturing."] - #[doc = " - ::CU_STREAM_CAPTURE_STATUS_INVALIDATED: The stream was capturing but an error"] - #[doc = " has invalidated the capture sequence. The capture sequence must be terminated"] - #[doc = " with ::cuStreamEndCapture on the stream where it was initiated in order to"] - #[doc = " continue using \\p hStream."] - #[doc = ""] - #[doc = " Note that, if this is called on ::CU_STREAM_LEGACY (the \"null stream\") while"] - #[doc = " a blocking stream in the same context is capturing, it will return"] - #[doc = " ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT and \\p *captureStatus is unspecified"] - #[doc = " after the call. The blocking stream capture is not invalidated."] - #[doc = ""] - #[doc = " When a blocking stream is capturing, the legacy stream is in an"] - #[doc = " unusable state until the blocking stream capture is terminated. The legacy"] - #[doc = " stream is not supported for stream capture, but attempted use would have an"] - #[doc = " implicit dependency on the capturing stream(s)."] - #[doc = ""] - #[doc = " \\param hStream - Stream to query"] - #[doc = " \\param captureStatus - Returns the stream's capture status"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuStreamCreate,"] - #[doc = " ::cuStreamBeginCapture,"] - #[doc = " ::cuStreamEndCapture"] pub fn cuStreamIsCapturing( hStream: CUstream, captureStatus: *mut CUstreamCaptureStatus, ) -> CUresult; } extern "C" { - #[doc = " \\brief Query capture status of a stream"] - #[doc = ""] - #[doc = " Query the capture status of a stream and and get an id for"] - #[doc = " the capture sequence, which is unique over the lifetime of the process."] - #[doc = ""] - #[doc = " If called on ::CU_STREAM_LEGACY (the \"null stream\") while a stream not created"] - #[doc = " with ::CU_STREAM_NON_BLOCKING is capturing, returns ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT."] - #[doc = ""] - #[doc = " A valid id is returned only if both of the following are true:"] - #[doc = " - the call returns CUDA_SUCCESS"] - #[doc = " - captureStatus is set to ::CU_STREAM_CAPTURE_STATUS_ACTIVE"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuStreamBeginCapture,"] - #[doc = " ::cuStreamIsCapturing"] pub fn cuStreamGetCaptureInfo( hStream: CUstream, captureStatus: *mut CUstreamCaptureStatus, @@ -6420,90 +2032,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Attach memory to a stream asynchronously"] - #[doc = ""] - #[doc = " Enqueues an operation in \\p hStream to specify stream association of"] - #[doc = " \\p length bytes of memory starting from \\p dptr. This function is a"] - #[doc = " stream-ordered operation, meaning that it is dependent on, and will"] - #[doc = " only take effect when, previous work in stream has completed. Any"] - #[doc = " previous association is automatically replaced."] - #[doc = ""] - #[doc = " \\p dptr must point to one of the following types of memories:"] - #[doc = " - managed memory declared using the __managed__ keyword or allocated with"] - #[doc = " ::cuMemAllocManaged."] - #[doc = " - a valid host-accessible region of system-allocated pageable memory. This"] - #[doc = " type of memory may only be specified if the device associated with the"] - #[doc = " stream reports a non-zero value for the device attribute"] - #[doc = " ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS."] - #[doc = ""] - #[doc = " For managed allocations, \\p length must be either zero or the entire"] - #[doc = " allocation's size. Both indicate that the entire allocation's stream"] - #[doc = " association is being changed. Currently, it is not possible to change stream"] - #[doc = " association for a portion of a managed allocation."] - #[doc = ""] - #[doc = " For pageable host allocations, \\p length must be non-zero."] - #[doc = ""] - #[doc = " The stream association is specified using \\p flags which must be"] - #[doc = " one of ::CUmemAttach_flags."] - #[doc = " If the ::CU_MEM_ATTACH_GLOBAL flag is specified, the memory can be accessed"] - #[doc = " by any stream on any device."] - #[doc = " If the ::CU_MEM_ATTACH_HOST flag is specified, the program makes a guarantee"] - #[doc = " that it won't access the memory on the device from any stream on a device that"] - #[doc = " has a zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS."] - #[doc = " If the ::CU_MEM_ATTACH_SINGLE flag is specified and \\p hStream is associated with"] - #[doc = " a device that has a zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS,"] - #[doc = " the program makes a guarantee that it will only access the memory on the device"] - #[doc = " from \\p hStream. It is illegal to attach singly to the NULL stream, because the"] - #[doc = " NULL stream is a virtual global stream and not a specific stream. An error will"] - #[doc = " be returned in this case."] - #[doc = ""] - #[doc = " When memory is associated with a single stream, the Unified Memory system will"] - #[doc = " allow CPU access to this memory region so long as all operations in \\p hStream"] - #[doc = " have completed, regardless of whether other streams are active. In effect,"] - #[doc = " this constrains exclusive ownership of the managed memory region by"] - #[doc = " an active GPU to per-stream activity instead of whole-GPU activity."] - #[doc = ""] - #[doc = " Accessing memory on the device from streams that are not associated with"] - #[doc = " it will produce undefined results. No error checking is performed by the"] - #[doc = " Unified Memory system to ensure that kernels launched into other streams"] - #[doc = " do not access this region."] - #[doc = ""] - #[doc = " It is a program's responsibility to order calls to ::cuStreamAttachMemAsync"] - #[doc = " via events, synchronization or other means to ensure legal access to memory"] - #[doc = " at all times. Data visibility and coherency will be changed appropriately"] - #[doc = " for all kernels which follow a stream-association change."] - #[doc = ""] - #[doc = " If \\p hStream is destroyed while data is associated with it, the association is"] - #[doc = " removed and the association reverts to the default visibility of the allocation"] - #[doc = " as specified at ::cuMemAllocManaged. For __managed__ variables, the default"] - #[doc = " association is always ::CU_MEM_ATTACH_GLOBAL. Note that destroying a stream is an"] - #[doc = " asynchronous operation, and as a result, the change to default association won't"] - #[doc = " happen until all work in the stream has completed."] - #[doc = ""] - #[doc = " \\param hStream - Stream in which to enqueue the attach operation"] - #[doc = " \\param dptr - Pointer to memory (must be a pointer to managed memory or"] - #[doc = " to a valid host-accessible region of system-allocated"] - #[doc = " pageable memory)"] - #[doc = " \\param length - Length of memory"] - #[doc = " \\param flags - Must be one of ::CUmemAttach_flags"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_NOT_SUPPORTED"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuStreamCreate,"] - #[doc = " ::cuStreamQuery,"] - #[doc = " ::cuStreamSynchronize,"] - #[doc = " ::cuStreamWaitEvent,"] - #[doc = " ::cuStreamDestroy,"] - #[doc = " ::cuMemAllocManaged,"] - #[doc = " ::cudaStreamAttachMemAsync"] pub fn cuStreamAttachMemAsync( hStream: CUstream, dptr: CUdeviceptr, @@ -6512,253 +2040,33 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Determine status of a compute stream"] - #[doc = ""] - #[doc = " Returns ::CUDA_SUCCESS if all operations in the stream specified by"] - #[doc = " \\p hStream have completed, or ::CUDA_ERROR_NOT_READY if not."] - #[doc = ""] - #[doc = " For the purposes of Unified Memory, a return value of ::CUDA_SUCCESS"] - #[doc = " is equivalent to having called ::cuStreamSynchronize()."] - #[doc = ""] - #[doc = " \\param hStream - Stream to query status of"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_NOT_READY"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuStreamCreate,"] - #[doc = " ::cuStreamWaitEvent,"] - #[doc = " ::cuStreamDestroy,"] - #[doc = " ::cuStreamSynchronize,"] - #[doc = " ::cuStreamAddCallback,"] - #[doc = " ::cudaStreamQuery"] pub fn cuStreamQuery(hStream: CUstream) -> CUresult; } extern "C" { - #[doc = " \\brief Wait until a stream's tasks are completed"] - #[doc = ""] - #[doc = " Waits until the device has completed all operations in the stream specified"] - #[doc = " by \\p hStream. If the context was created with the"] - #[doc = " ::CU_CTX_SCHED_BLOCKING_SYNC flag, the CPU thread will block until the"] - #[doc = " stream is finished with all of its tasks."] - #[doc = ""] - #[doc = " \\param hStream - Stream to wait for"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE"] - #[doc = ""] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuStreamCreate,"] - #[doc = " ::cuStreamDestroy,"] - #[doc = " ::cuStreamWaitEvent,"] - #[doc = " ::cuStreamQuery,"] - #[doc = " ::cuStreamAddCallback,"] - #[doc = " ::cudaStreamSynchronize"] pub fn cuStreamSynchronize(hStream: CUstream) -> CUresult; } extern "C" { pub fn cuStreamDestroy_v2(hStream: CUstream) -> CUresult; } extern "C" { - #[doc = " \\brief Creates an event"] - #[doc = ""] - #[doc = " Creates an event *phEvent for the current context with the flags specified via"] - #[doc = " \\p Flags. Valid flags include:"] - #[doc = " - ::CU_EVENT_DEFAULT: Default event creation flag."] - #[doc = " - ::CU_EVENT_BLOCKING_SYNC: Specifies that the created event should use blocking"] - #[doc = " synchronization. A CPU thread that uses ::cuEventSynchronize() to wait on"] - #[doc = " an event created with this flag will block until the event has actually"] - #[doc = " been recorded."] - #[doc = " - ::CU_EVENT_DISABLE_TIMING: Specifies that the created event does not need"] - #[doc = " to record timing data. Events created with this flag specified and"] - #[doc = " the ::CU_EVENT_BLOCKING_SYNC flag not specified will provide the best"] - #[doc = " performance when used with ::cuStreamWaitEvent() and ::cuEventQuery()."] - #[doc = " - ::CU_EVENT_INTERPROCESS: Specifies that the created event may be used as an"] - #[doc = " interprocess event by ::cuIpcGetEventHandle(). ::CU_EVENT_INTERPROCESS must"] - #[doc = " be specified along with ::CU_EVENT_DISABLE_TIMING."] - #[doc = ""] - #[doc = " \\param phEvent - Returns newly created event"] - #[doc = " \\param Flags - Event creation flags"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuEventRecord,"] - #[doc = " ::cuEventQuery,"] - #[doc = " ::cuEventSynchronize,"] - #[doc = " ::cuEventDestroy,"] - #[doc = " ::cuEventElapsedTime,"] - #[doc = " ::cudaEventCreate,"] - #[doc = " ::cudaEventCreateWithFlags"] pub fn cuEventCreate( phEvent: *mut CUevent, Flags: ::std::os::raw::c_uint, ) -> CUresult; } extern "C" { - #[doc = " \\brief Records an event"] - #[doc = ""] - #[doc = " Captures in \\p hEvent the contents of \\p hStream at the time of this call."] - #[doc = " \\p hEvent and \\p hStream must be from the same context."] - #[doc = " Calls such as ::cuEventQuery() or ::cuStreamWaitEvent() will then"] - #[doc = " examine or wait for completion of the work that was captured. Uses of"] - #[doc = " \\p hStream after this call do not modify \\p hEvent. See note on default"] - #[doc = " stream behavior for what is captured in the default case."] - #[doc = ""] - #[doc = " ::cuEventRecord() can be called multiple times on the same event and"] - #[doc = " will overwrite the previously captured state. Other APIs such as"] - #[doc = " ::cuStreamWaitEvent() use the most recently captured state at the time"] - #[doc = " of the API call, and are not affected by later calls to"] - #[doc = " ::cuEventRecord(). Before the first call to ::cuEventRecord(), an"] - #[doc = " event represents an empty set of work, so for example ::cuEventQuery()"] - #[doc = " would return ::CUDA_SUCCESS."] - #[doc = ""] - #[doc = " \\param hEvent - Event to record"] - #[doc = " \\param hStream - Stream to record event for"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuEventCreate,"] - #[doc = " ::cuEventQuery,"] - #[doc = " ::cuEventSynchronize,"] - #[doc = " ::cuStreamWaitEvent,"] - #[doc = " ::cuEventDestroy,"] - #[doc = " ::cuEventElapsedTime,"] - #[doc = " ::cudaEventRecord"] pub fn cuEventRecord(hEvent: CUevent, hStream: CUstream) -> CUresult; } extern "C" { - #[doc = " \\brief Queries an event's status"] - #[doc = ""] - #[doc = " Queries the status of all work currently captured by \\p hEvent. See"] - #[doc = " ::cuEventRecord() for details on what is captured by an event."] - #[doc = ""] - #[doc = " Returns ::CUDA_SUCCESS if all captured work has been completed, or"] - #[doc = " ::CUDA_ERROR_NOT_READY if any captured work is incomplete."] - #[doc = ""] - #[doc = " For the purposes of Unified Memory, a return value of ::CUDA_SUCCESS"] - #[doc = " is equivalent to having called ::cuEventSynchronize()."] - #[doc = ""] - #[doc = " \\param hEvent - Event to query"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_NOT_READY"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuEventCreate,"] - #[doc = " ::cuEventRecord,"] - #[doc = " ::cuEventSynchronize,"] - #[doc = " ::cuEventDestroy,"] - #[doc = " ::cuEventElapsedTime,"] - #[doc = " ::cudaEventQuery"] pub fn cuEventQuery(hEvent: CUevent) -> CUresult; } extern "C" { - #[doc = " \\brief Waits for an event to complete"] - #[doc = ""] - #[doc = " Waits until the completion of all work currently captured in \\p hEvent."] - #[doc = " See ::cuEventRecord() for details on what is captured by an event."] - #[doc = ""] - #[doc = " Waiting for an event that was created with the ::CU_EVENT_BLOCKING_SYNC"] - #[doc = " flag will cause the calling CPU thread to block until the event has"] - #[doc = " been completed by the device. If the ::CU_EVENT_BLOCKING_SYNC flag has"] - #[doc = " not been set, then the CPU thread will busy-wait until the event has"] - #[doc = " been completed by the device."] - #[doc = ""] - #[doc = " \\param hEvent - Event to wait for"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuEventCreate,"] - #[doc = " ::cuEventRecord,"] - #[doc = " ::cuEventQuery,"] - #[doc = " ::cuEventDestroy,"] - #[doc = " ::cuEventElapsedTime,"] - #[doc = " ::cudaEventSynchronize"] pub fn cuEventSynchronize(hEvent: CUevent) -> CUresult; } extern "C" { pub fn cuEventDestroy_v2(hEvent: CUevent) -> CUresult; } extern "C" { - #[doc = " \\brief Computes the elapsed time between two events"] - #[doc = ""] - #[doc = " Computes the elapsed time between two events (in milliseconds with a"] - #[doc = " resolution of around 0.5 microseconds)."] - #[doc = ""] - #[doc = " If either event was last recorded in a non-NULL stream, the resulting time"] - #[doc = " may be greater than expected (even if both used the same stream handle). This"] - #[doc = " happens because the ::cuEventRecord() operation takes place asynchronously"] - #[doc = " and there is no guarantee that the measured latency is actually just between"] - #[doc = " the two events. Any number of other different stream operations could execute"] - #[doc = " in between the two measured events, thus altering the timing in a significant"] - #[doc = " way."] - #[doc = ""] - #[doc = " If ::cuEventRecord() has not been called on either event then"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE is returned. If ::cuEventRecord() has been called"] - #[doc = " on both events but one or both of them has not yet been completed (that is,"] - #[doc = " ::cuEventQuery() would return ::CUDA_ERROR_NOT_READY on at least one of the"] - #[doc = " events), ::CUDA_ERROR_NOT_READY is returned. If either event was created with"] - #[doc = " the ::CU_EVENT_DISABLE_TIMING flag, then this function will return"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE."] - #[doc = ""] - #[doc = " \\param pMilliseconds - Time between \\p hStart and \\p hEnd in ms"] - #[doc = " \\param hStart - Starting event"] - #[doc = " \\param hEnd - Ending event"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_NOT_READY"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuEventCreate,"] - #[doc = " ::cuEventRecord,"] - #[doc = " ::cuEventQuery,"] - #[doc = " ::cuEventSynchronize,"] - #[doc = " ::cuEventDestroy,"] - #[doc = " ::cudaEventElapsedTime"] pub fn cuEventElapsedTime( pMilliseconds: *mut f32, hStart: CUevent, @@ -6766,181 +2074,12 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Imports an external memory object"] - #[doc = ""] - #[doc = " Imports an externally allocated memory object and returns"] - #[doc = " a handle to that in \\p extMem_out."] - #[doc = ""] - #[doc = " The properties of the handle being imported must be described in"] - #[doc = " \\p memHandleDesc. The ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC structure"] - #[doc = " is defined as follows:"] - #[doc = ""] - #[doc = " \\code"] - #[doc = "typedef struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st {"] - #[doc = "CUexternalMemoryHandleType type;"] - #[doc = "union {"] - #[doc = "int fd;"] - #[doc = "struct {"] - #[doc = "void *handle;"] - #[doc = "const void *name;"] - #[doc = "} win32;"] - #[doc = "} handle;"] - #[doc = "unsigned long long size;"] - #[doc = "unsigned int flags;"] - #[doc = "} CUDA_EXTERNAL_MEMORY_HANDLE_DESC;"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " where ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type specifies the type"] - #[doc = " of handle being imported. ::CUexternalMemoryHandleType is"] - #[doc = " defined as:"] - #[doc = ""] - #[doc = " \\code"] - #[doc = "typedef enum CUexternalMemoryHandleType_enum {"] - #[doc = "CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD = 1,"] - #[doc = "CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 = 2,"] - #[doc = "CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3,"] - #[doc = "CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP = 4,"] - #[doc = "CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE = 5"] - #[doc = "} CUexternalMemoryHandleType;"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is"] - #[doc = " ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD, then"] - #[doc = " ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::fd must be a valid"] - #[doc = " file descriptor referencing a memory object. Ownership of"] - #[doc = " the file descriptor is transferred to the CUDA driver when the"] - #[doc = " handle is imported successfully. Performing any operations on the"] - #[doc = " file descriptor after it is imported results in undefined behavior."] - #[doc = ""] - #[doc = " If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is"] - #[doc = " ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32, then exactly one"] - #[doc = " of ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and"] - #[doc = " ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must not be"] - #[doc = " NULL. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle"] - #[doc = " is not NULL, then it must represent a valid shared NT handle that"] - #[doc = " references a memory object. Ownership of this handle is"] - #[doc = " not transferred to CUDA after the import operation, so the"] - #[doc = " application must release the handle using the appropriate system"] - #[doc = " call. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name"] - #[doc = " is not NULL, then it must point to a NULL-terminated array of"] - #[doc = " UTF-16 characters that refers to a memory object."] - #[doc = ""] - #[doc = " If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is"] - #[doc = " ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT, then"] - #[doc = " ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle must"] - #[doc = " be non-NULL and"] - #[doc = " ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name"] - #[doc = " must be NULL. The handle specified must be a globally shared KMT"] - #[doc = " handle. This handle does not hold a reference to the underlying"] - #[doc = " object, and thus will be invalid when all references to the"] - #[doc = " memory object are destroyed."] - #[doc = ""] - #[doc = " If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is"] - #[doc = " ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP, then exactly one"] - #[doc = " of ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and"] - #[doc = " ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must not be"] - #[doc = " NULL. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle"] - #[doc = " is not NULL, then it must represent a valid shared NT handle that"] - #[doc = " is returned by ID3DDevice::CreateSharedHandle when referring to a"] - #[doc = " ID3D12Heap object. This handle holds a reference to the underlying"] - #[doc = " object. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name"] - #[doc = " is not NULL, then it must point to a NULL-terminated array of"] - #[doc = " UTF-16 characters that refers to a ID3D12Heap object."] - #[doc = ""] - #[doc = " If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is"] - #[doc = " ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE, then exactly one"] - #[doc = " of ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and"] - #[doc = " ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must not be"] - #[doc = " NULL. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle"] - #[doc = " is not NULL, then it must represent a valid shared NT handle that"] - #[doc = " is returned by ID3DDevice::CreateSharedHandle when referring to a"] - #[doc = " ID3D12Resource object. This handle holds a reference to the"] - #[doc = " underlying object. If"] - #[doc = " ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name"] - #[doc = " is not NULL, then it must point to a NULL-terminated array of"] - #[doc = " UTF-16 characters that refers to a ID3D12Resource object."] - #[doc = ""] - #[doc = " The size of the memory object must be specified in"] - #[doc = " ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::size."] - #[doc = ""] - #[doc = " Specifying the flag ::CUDA_EXTERNAL_MEMORY_DEDICATED in"] - #[doc = " ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::flags indicates that the"] - #[doc = " resource is a dedicated resource. The definition of what a"] - #[doc = " dedicated resource is outside the scope of this extension."] - #[doc = ""] - #[doc = " \\param extMem_out - Returned handle to an external memory object"] - #[doc = " \\param memHandleDesc - Memory import handle descriptor"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\note If the Vulkan memory imported into CUDA is mapped on the CPU then the"] - #[doc = " application must use vkInvalidateMappedMemoryRanges/vkFlushMappedMemoryRanges"] - #[doc = " as well as appropriate Vulkan pipeline barriers to maintain coherence between"] - #[doc = " CPU and GPU. For more information on these APIs, please refer to \"Synchronization"] - #[doc = " and Cache Control\" chapter from Vulkan specification."] - #[doc = ""] - #[doc = " \\sa ::cuDestroyExternalMemory,"] - #[doc = " ::cuExternalMemoryGetMappedBuffer,"] - #[doc = " ::cuExternalMemoryGetMappedMipmappedArray"] pub fn cuImportExternalMemory( extMem_out: *mut CUexternalMemory, memHandleDesc: *const CUDA_EXTERNAL_MEMORY_HANDLE_DESC, ) -> CUresult; } extern "C" { - #[doc = " \\brief Maps a buffer onto an imported memory object"] - #[doc = ""] - #[doc = " Maps a buffer onto an imported memory object and returns a device"] - #[doc = " pointer in \\p devPtr."] - #[doc = ""] - #[doc = " The properties of the buffer being mapped must be described in"] - #[doc = " \\p bufferDesc. The ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC structure is"] - #[doc = " defined as follows:"] - #[doc = ""] - #[doc = " \\code"] - #[doc = "typedef struct CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st {"] - #[doc = "unsigned long long offset;"] - #[doc = "unsigned long long size;"] - #[doc = "unsigned int flags;"] - #[doc = "} CUDA_EXTERNAL_MEMORY_BUFFER_DESC;"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " where ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC::offset is the offset in"] - #[doc = " the memory object where the buffer's base address is."] - #[doc = " ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC::size is the size of the buffer."] - #[doc = " ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC::flags must be zero."] - #[doc = ""] - #[doc = " The offset and size have to be suitably aligned to match the"] - #[doc = " requirements of the external API. Mapping two buffers whose ranges"] - #[doc = " overlap may or may not result in the same virtual address being"] - #[doc = " returned for the overlapped portion. In such cases, the application"] - #[doc = " must ensure that all accesses to that region from the GPU are"] - #[doc = " volatile. Otherwise writes made via one address are not guaranteed"] - #[doc = " to be visible via the other address, even if they're issued by the"] - #[doc = " same thread. It is recommended that applications map the combined"] - #[doc = " range instead of mapping separate buffers and then apply the"] - #[doc = " appropriate offsets to the returned pointer to derive the"] - #[doc = " individual buffers."] - #[doc = ""] - #[doc = " The returned pointer \\p devPtr must be freed using ::cuMemFree."] - #[doc = ""] - #[doc = " \\param devPtr - Returned device pointer to buffer"] - #[doc = " \\param extMem - Handle to external memory object"] - #[doc = " \\param bufferDesc - Buffer descriptor"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuImportExternalMemory"] - #[doc = " ::cuDestroyExternalMemory,"] - #[doc = " ::cuExternalMemoryGetMappedMipmappedArray"] pub fn cuExternalMemoryGetMappedBuffer( devPtr: *mut CUdeviceptr, extMem: CUexternalMemory, @@ -6948,51 +2087,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Maps a CUDA mipmapped array onto an external memory object"] - #[doc = ""] - #[doc = " Maps a CUDA mipmapped array onto an external object and returns a"] - #[doc = " handle to it in \\p mipmap."] - #[doc = ""] - #[doc = " The properties of the CUDA mipmapped array being mapped must be"] - #[doc = " described in \\p mipmapDesc. The structure"] - #[doc = " ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC is defined as follows:"] - #[doc = ""] - #[doc = " \\code"] - #[doc = "typedef struct CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st {"] - #[doc = "unsigned long long offset;"] - #[doc = "CUDA_ARRAY3D_DESCRIPTOR arrayDesc;"] - #[doc = "unsigned int numLevels;"] - #[doc = "} CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC;"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " where ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::offset is the"] - #[doc = " offset in the memory object where the base level of the mipmap"] - #[doc = " chain is."] - #[doc = " ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::arrayDesc describes"] - #[doc = " the format, dimensions and type of the base level of the mipmap"] - #[doc = " chain. For further details on these parameters, please refer to the"] - #[doc = " documentation for ::cuMipmappedArrayCreate. Note that if the mipmapped"] - #[doc = " array is bound as a color target in the graphics API, then the flag"] - #[doc = " ::CUDA_ARRAY3D_COLOR_ATTACHMENT must be specified in"] - #[doc = " ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::arrayDesc::Flags."] - #[doc = " ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::numLevels specifies"] - #[doc = " the total number of levels in the mipmap chain."] - #[doc = ""] - #[doc = " The returned CUDA mipmapped array must be freed using ::cuMipmappedArrayDestroy."] - #[doc = ""] - #[doc = " \\param mipmap - Returned CUDA mipmapped array"] - #[doc = " \\param extMem - Handle to external memory object"] - #[doc = " \\param mipmapDesc - CUDA array descriptor"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuImportExternalMemory"] - #[doc = " ::cuDestroyExternalMemory,"] - #[doc = " ::cuExternalMemoryGetMappedBuffer"] pub fn cuExternalMemoryGetMappedMipmappedArray( mipmap: *mut CUmipmappedArray, extMem: CUexternalMemory, @@ -7000,160 +2094,15 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Destroys an external memory object."] - #[doc = ""] - #[doc = " Destroys the specified external memory object. Any existing buffers"] - #[doc = " and CUDA mipmapped arrays mapped onto this object must no longer be"] - #[doc = " used and must be explicitly freed using ::cuMemFree and"] - #[doc = " ::cuMipmappedArrayDestroy respectively."] - #[doc = ""] - #[doc = " \\param extMem - External memory object to be destroyed"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuImportExternalMemory"] - #[doc = " ::cuExternalMemoryGetMappedBuffer,"] - #[doc = " ::cuExternalMemoryGetMappedMipmappedArray"] pub fn cuDestroyExternalMemory(extMem: CUexternalMemory) -> CUresult; } extern "C" { - #[doc = " \\brief Imports an external semaphore"] - #[doc = ""] - #[doc = " Imports an externally allocated synchronization object and returns"] - #[doc = " a handle to that in \\p extSem_out."] - #[doc = ""] - #[doc = " The properties of the handle being imported must be described in"] - #[doc = " \\p semHandleDesc. The ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC is"] - #[doc = " defined as follows:"] - #[doc = ""] - #[doc = " \\code"] - #[doc = "typedef struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st {"] - #[doc = "CUexternalSemaphoreHandleType type;"] - #[doc = "union {"] - #[doc = "int fd;"] - #[doc = "struct {"] - #[doc = "void *handle;"] - #[doc = "const void *name;"] - #[doc = "} win32;"] - #[doc = "} handle;"] - #[doc = "unsigned int flags;"] - #[doc = "} CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC;"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " where ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type specifies the type of"] - #[doc = " handle being imported. ::CUexternalSemaphoreHandleType is defined"] - #[doc = " as:"] - #[doc = ""] - #[doc = " \\code"] - #[doc = "typedef enum CUexternalSemaphoreHandleType_enum {"] - #[doc = "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD = 1,"] - #[doc = "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 = 2,"] - #[doc = "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3,"] - #[doc = "CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE = 4"] - #[doc = "} CUexternalSemaphoreHandleType;"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is"] - #[doc = " ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD, then"] - #[doc = " ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::fd must be a valid"] - #[doc = " file descriptor referencing a synchronization object. Ownership of"] - #[doc = " the file descriptor is transferred to the CUDA driver when the"] - #[doc = " handle is imported successfully. Performing any operations on the"] - #[doc = " file descriptor after it is imported results in undefined behavior."] - #[doc = ""] - #[doc = " If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is"] - #[doc = " ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32, then exactly one"] - #[doc = " of ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle and"] - #[doc = " ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name must not be"] - #[doc = " NULL. If"] - #[doc = " ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle"] - #[doc = " is not NULL, then it must represent a valid shared NT handle that"] - #[doc = " references a synchronization object. Ownership of this handle is"] - #[doc = " not transferred to CUDA after the import operation, so the"] - #[doc = " application must release the handle using the appropriate system"] - #[doc = " call. If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name"] - #[doc = " is not NULL, then it must name a valid synchronization object."] - #[doc = ""] - #[doc = " If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is"] - #[doc = " ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT, then"] - #[doc = " ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle must"] - #[doc = " be non-NULL and"] - #[doc = " ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name"] - #[doc = " must be NULL. The handle specified must be a globally shared KMT"] - #[doc = " handle. This handle does not hold a reference to the underlying"] - #[doc = " object, and thus will be invalid when all references to the"] - #[doc = " synchronization object are destroyed."] - #[doc = ""] - #[doc = " If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is"] - #[doc = " ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, then exactly one"] - #[doc = " of ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle and"] - #[doc = " ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name must not be"] - #[doc = " NULL. If"] - #[doc = " ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle"] - #[doc = " is not NULL, then it must represent a valid shared NT handle that"] - #[doc = " is returned by ID3DDevice::CreateSharedHandle when referring to a"] - #[doc = " ID3D12Fence object. This handle holds a reference to the underlying"] - #[doc = " object. If"] - #[doc = " ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name"] - #[doc = " is not NULL, then it must name a valid synchronization object that"] - #[doc = " refers to a valid ID3D12Fence object."] - #[doc = ""] - #[doc = " \\param extSem_out - Returned handle to an external semaphore"] - #[doc = " \\param semHandleDesc - Semaphore import handle descriptor"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuDestroyExternalSemaphore,"] - #[doc = " ::cuSignalExternalSemaphoresAsync,"] - #[doc = " ::cuWaitExternalSemaphoresAsync"] pub fn cuImportExternalSemaphore( extSem_out: *mut CUexternalSemaphore, semHandleDesc: *const CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC, ) -> CUresult; } extern "C" { - #[doc = " \\brief Signals a set of external semaphore objects"] - #[doc = ""] - #[doc = " Enqueues a signal operation on a set of externally allocated"] - #[doc = " semaphore object in the specified stream. The operations will be"] - #[doc = " executed when all prior operations in the stream complete."] - #[doc = ""] - #[doc = " The exact semantics of signaling a semaphore depends on the type of"] - #[doc = " the object."] - #[doc = ""] - #[doc = " If the semaphore object is any one of the following types:"] - #[doc = " ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD,"] - #[doc = " ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32,"] - #[doc = " ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT"] - #[doc = " then signaling the semaphore will set it to the signaled state."] - #[doc = ""] - #[doc = " If the semaphore object is of the type"] - #[doc = " ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, then the"] - #[doc = " semaphore will be set to the value specified in"] - #[doc = " ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::fence::value."] - #[doc = ""] - #[doc = " \\param extSemArray - Set of external semaphores to be signaled"] - #[doc = " \\param paramsArray - Array of semaphore parameters"] - #[doc = " \\param numExtSems - Number of semaphores to signal"] - #[doc = " \\param stream - Stream to enqueue the signal operations in"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuImportExternalSemaphore,"] - #[doc = " ::cuDestroyExternalSemaphore,"] - #[doc = " ::cuWaitExternalSemaphoresAsync"] pub fn cuSignalExternalSemaphoresAsync( extSemArray: *const CUexternalSemaphore, paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS, @@ -7162,44 +2111,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Waits on a set of external semaphore objects"] - #[doc = ""] - #[doc = " Enqueues a wait operation on a set of externally allocated"] - #[doc = " semaphore object in the specified stream. The operations will be"] - #[doc = " executed when all prior operations in the stream complete."] - #[doc = ""] - #[doc = " The exact semantics of waiting on a semaphore depends on the type"] - #[doc = " of the object."] - #[doc = ""] - #[doc = " If the semaphore object is any one of the following types:"] - #[doc = " ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD,"] - #[doc = " ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32,"] - #[doc = " ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT"] - #[doc = " then waiting on the semaphore will wait until the semaphore reaches"] - #[doc = " the signaled state. The semaphore will then be reset to the"] - #[doc = " unsignaled state. Therefore for every signal operation, there can"] - #[doc = " only be one wait operation."] - #[doc = ""] - #[doc = " If the semaphore object is of the type"] - #[doc = " ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, then waiting on"] - #[doc = " the semaphore will wait until the value of the semaphore is"] - #[doc = " greater than or equal to"] - #[doc = " ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::fence::value."] - #[doc = ""] - #[doc = " \\param extSemArray - External semaphores to be waited on"] - #[doc = " \\param paramsArray - Array of semaphore parameters"] - #[doc = " \\param numExtSems - Number of semaphores to wait on"] - #[doc = " \\param stream - Stream to enqueue the wait operations in"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuImportExternalSemaphore,"] - #[doc = " ::cuDestroyExternalSemaphore,"] - #[doc = " ::cuSignalExternalSemaphoresAsync"] pub fn cuWaitExternalSemaphoresAsync( extSemArray: *const CUexternalSemaphore, paramsArray: *const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS, @@ -7208,61 +2119,9 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Destroys an external semaphore"] - #[doc = ""] - #[doc = " Destroys an external semaphore object and releases any references"] - #[doc = " to the underlying resource. Any outstanding signals or waits must"] - #[doc = " have completed before the semaphore is destroyed."] - #[doc = ""] - #[doc = " \\param extSem - External semaphore to be destroyed"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuImportExternalSemaphore,"] - #[doc = " ::cuSignalExternalSemaphoresAsync,"] - #[doc = " ::cuWaitExternalSemaphoresAsync"] pub fn cuDestroyExternalSemaphore(extSem: CUexternalSemaphore) -> CUresult; } extern "C" { - #[doc = " \\brief Wait on a memory location"] - #[doc = ""] - #[doc = " Enqueues a synchronization of the stream on the given memory location. Work"] - #[doc = " ordered after the operation will block until the given condition on the"] - #[doc = " memory is satisfied. By default, the condition is to wait for"] - #[doc = " (int32_t)(*addr - value) >= 0, a cyclic greater-or-equal."] - #[doc = " Other condition types can be specified via \\p flags."] - #[doc = ""] - #[doc = " If the memory was registered via ::cuMemHostRegister(), the device pointer"] - #[doc = " should be obtained with ::cuMemHostGetDevicePointer(). This function cannot"] - #[doc = " be used with managed memory (::cuMemAllocManaged)."] - #[doc = ""] - #[doc = " Support for this can be queried with ::cuDeviceGetAttribute() and"] - #[doc = " ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS."] - #[doc = ""] - #[doc = " Support for CU_STREAM_WAIT_VALUE_NOR can be queried with ::cuDeviceGetAttribute() and"] - #[doc = " ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR."] - #[doc = ""] - #[doc = " \\param stream The stream to synchronize on the memory location."] - #[doc = " \\param addr The memory location to wait on."] - #[doc = " \\param value The value to compare with the memory location."] - #[doc = " \\param flags See ::CUstreamWaitValue_flags."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_NOT_SUPPORTED"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuStreamWaitValue64,"] - #[doc = " ::cuStreamWriteValue32,"] - #[doc = " ::cuStreamWriteValue64"] - #[doc = " ::cuStreamBatchMemOp,"] - #[doc = " ::cuMemHostRegister,"] - #[doc = " ::cuStreamWaitEvent"] pub fn cuStreamWaitValue32( stream: CUstream, addr: CUdeviceptr, @@ -7271,37 +2130,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Wait on a memory location"] - #[doc = ""] - #[doc = " Enqueues a synchronization of the stream on the given memory location. Work"] - #[doc = " ordered after the operation will block until the given condition on the"] - #[doc = " memory is satisfied. By default, the condition is to wait for"] - #[doc = " (int64_t)(*addr - value) >= 0, a cyclic greater-or-equal."] - #[doc = " Other condition types can be specified via \\p flags."] - #[doc = ""] - #[doc = " If the memory was registered via ::cuMemHostRegister(), the device pointer"] - #[doc = " should be obtained with ::cuMemHostGetDevicePointer()."] - #[doc = ""] - #[doc = " Support for this can be queried with ::cuDeviceGetAttribute() and"] - #[doc = " ::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS."] - #[doc = ""] - #[doc = " \\param stream The stream to synchronize on the memory location."] - #[doc = " \\param addr The memory location to wait on."] - #[doc = " \\param value The value to compare with the memory location."] - #[doc = " \\param flags See ::CUstreamWaitValue_flags."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_NOT_SUPPORTED"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuStreamWaitValue32,"] - #[doc = " ::cuStreamWriteValue32,"] - #[doc = " ::cuStreamWriteValue64,"] - #[doc = " ::cuStreamBatchMemOp,"] - #[doc = " ::cuMemHostRegister,"] - #[doc = " ::cuStreamWaitEvent"] pub fn cuStreamWaitValue64( stream: CUstream, addr: CUdeviceptr, @@ -7310,37 +2138,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Write a value to memory"] - #[doc = ""] - #[doc = " Write a value to memory. Unless the ::CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER"] - #[doc = " flag is passed, the write is preceded by a system-wide memory fence,"] - #[doc = " equivalent to a __threadfence_system() but scoped to the stream"] - #[doc = " rather than a CUDA thread."] - #[doc = ""] - #[doc = " If the memory was registered via ::cuMemHostRegister(), the device pointer"] - #[doc = " should be obtained with ::cuMemHostGetDevicePointer(). This function cannot"] - #[doc = " be used with managed memory (::cuMemAllocManaged)."] - #[doc = ""] - #[doc = " Support for this can be queried with ::cuDeviceGetAttribute() and"] - #[doc = " ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS."] - #[doc = ""] - #[doc = " \\param stream The stream to do the write in."] - #[doc = " \\param addr The device address to write to."] - #[doc = " \\param value The value to write."] - #[doc = " \\param flags See ::CUstreamWriteValue_flags."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_NOT_SUPPORTED"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuStreamWriteValue64,"] - #[doc = " ::cuStreamWaitValue32,"] - #[doc = " ::cuStreamWaitValue64,"] - #[doc = " ::cuStreamBatchMemOp,"] - #[doc = " ::cuMemHostRegister,"] - #[doc = " ::cuEventRecord"] pub fn cuStreamWriteValue32( stream: CUstream, addr: CUdeviceptr, @@ -7349,36 +2146,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Write a value to memory"] - #[doc = ""] - #[doc = " Write a value to memory. Unless the ::CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER"] - #[doc = " flag is passed, the write is preceded by a system-wide memory fence,"] - #[doc = " equivalent to a __threadfence_system() but scoped to the stream"] - #[doc = " rather than a CUDA thread."] - #[doc = ""] - #[doc = " If the memory was registered via ::cuMemHostRegister(), the device pointer"] - #[doc = " should be obtained with ::cuMemHostGetDevicePointer()."] - #[doc = ""] - #[doc = " Support for this can be queried with ::cuDeviceGetAttribute() and"] - #[doc = " ::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS."] - #[doc = ""] - #[doc = " \\param stream The stream to do the write in."] - #[doc = " \\param addr The device address to write to."] - #[doc = " \\param value The value to write."] - #[doc = " \\param flags See ::CUstreamWriteValue_flags."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_NOT_SUPPORTED"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuStreamWriteValue32,"] - #[doc = " ::cuStreamWaitValue32,"] - #[doc = " ::cuStreamWaitValue64,"] - #[doc = " ::cuStreamBatchMemOp,"] - #[doc = " ::cuMemHostRegister,"] - #[doc = " ::cuEventRecord"] pub fn cuStreamWriteValue64( stream: CUstream, addr: CUdeviceptr, @@ -7387,37 +2154,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Batch operations to synchronize the stream via memory operations"] - #[doc = ""] - #[doc = " This is a batch version of ::cuStreamWaitValue32() and ::cuStreamWriteValue32()."] - #[doc = " Batching operations may avoid some performance overhead in both the API call"] - #[doc = " and the device execution versus adding them to the stream in separate API"] - #[doc = " calls. The operations are enqueued in the order they appear in the array."] - #[doc = ""] - #[doc = " See ::CUstreamBatchMemOpType for the full set of supported operations, and"] - #[doc = " ::cuStreamWaitValue32(), ::cuStreamWaitValue64(), ::cuStreamWriteValue32(),"] - #[doc = " and ::cuStreamWriteValue64() for details of specific operations."] - #[doc = ""] - #[doc = " Basic support for this can be queried with ::cuDeviceGetAttribute() and"] - #[doc = " ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS. See related APIs for details"] - #[doc = " on querying support for specific operations."] - #[doc = ""] - #[doc = " \\param stream The stream to enqueue the operations in."] - #[doc = " \\param count The number of operations in the array. Must be less than 256."] - #[doc = " \\param paramArray The types and parameters of the individual operations."] - #[doc = " \\param flags Reserved for future expansion; must be 0."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_NOT_SUPPORTED"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuStreamWaitValue32,"] - #[doc = " ::cuStreamWaitValue64,"] - #[doc = " ::cuStreamWriteValue32,"] - #[doc = " ::cuStreamWriteValue64,"] - #[doc = " ::cuMemHostRegister"] pub fn cuStreamBatchMemOp( stream: CUstream, count: ::std::os::raw::c_uint, @@ -7426,61 +2162,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns information about a function"] - #[doc = ""] - #[doc = " Returns in \\p *pi the integer value of the attribute \\p attrib on the kernel"] - #[doc = " given by \\p hfunc. The supported attributes are:"] - #[doc = " - ::CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK: The maximum number of threads"] - #[doc = " per block, beyond which a launch of the function would fail. This number"] - #[doc = " depends on both the function and the device on which the function is"] - #[doc = " currently loaded."] - #[doc = " - ::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES: The size in bytes of"] - #[doc = " statically-allocated shared memory per block required by this function."] - #[doc = " This does not include dynamically-allocated shared memory requested by"] - #[doc = " the user at runtime."] - #[doc = " - ::CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES: The size in bytes of user-allocated"] - #[doc = " constant memory required by this function."] - #[doc = " - ::CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES: The size in bytes of local memory"] - #[doc = " used by each thread of this function."] - #[doc = " - ::CU_FUNC_ATTRIBUTE_NUM_REGS: The number of registers used by each thread"] - #[doc = " of this function."] - #[doc = " - ::CU_FUNC_ATTRIBUTE_PTX_VERSION: The PTX virtual architecture version for"] - #[doc = " which the function was compiled. This value is the major PTX version * 10"] - #[doc = " + the minor PTX version, so a PTX version 1.3 function would return the"] - #[doc = " value 13. Note that this may return the undefined value of 0 for cubins"] - #[doc = " compiled prior to CUDA 3.0."] - #[doc = " - ::CU_FUNC_ATTRIBUTE_BINARY_VERSION: The binary architecture version for"] - #[doc = " which the function was compiled. This value is the major binary"] - #[doc = " version * 10 + the minor binary version, so a binary version 1.3 function"] - #[doc = " would return the value 13. Note that this will return a value of 10 for"] - #[doc = " legacy cubins that do not have a properly-encoded binary architecture"] - #[doc = " version."] - #[doc = " - ::CU_FUNC_CACHE_MODE_CA: The attribute to indicate whether the function has"] - #[doc = " been compiled with user specified option \"-Xptxas --dlcm=ca\" set ."] - #[doc = " - ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: The maximum size in bytes of"] - #[doc = " dynamically-allocated shared memory."] - #[doc = " - ::CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: Preferred shared memory-L1"] - #[doc = " cache split ratio in percent of total shared memory."] - #[doc = ""] - #[doc = " \\param pi - Returned attribute value"] - #[doc = " \\param attrib - Attribute requested"] - #[doc = " \\param hfunc - Function to query attribute of"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuCtxGetCacheConfig,"] - #[doc = " ::cuCtxSetCacheConfig,"] - #[doc = " ::cuFuncSetCacheConfig,"] - #[doc = " ::cuLaunchKernel,"] - #[doc = " ::cudaFuncGetAttributes"] - #[doc = " ::cudaFuncSetAttribute"] pub fn cuFuncGetAttribute( pi: *mut ::std::os::raw::c_int, attrib: CUfunction_attribute, @@ -7488,48 +2169,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets information about a function"] - #[doc = ""] - #[doc = " This call sets the value of a specified attribute \\p attrib on the kernel given"] - #[doc = " by \\p hfunc to an integer value specified by \\p val"] - #[doc = " This function returns CUDA_SUCCESS if the new value of the attribute could be"] - #[doc = " successfully set. If the set fails, this call will return an error."] - #[doc = " Not all attributes can have values set. Attempting to set a value on a read-only"] - #[doc = " attribute will result in an error (CUDA_ERROR_INVALID_VALUE)"] - #[doc = ""] - #[doc = " Supported attributes for the cuFuncSetAttribute call are:"] - #[doc = " - ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: This maximum size in bytes of"] - #[doc = " dynamically-allocated shared memory. The value should contain the requested"] - #[doc = " maximum size of dynamically-allocated shared memory. The sum of this value and"] - #[doc = " the function attribute ::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES cannot exceed the"] - #[doc = " device attribute ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN."] - #[doc = " The maximal size of requestable dynamic shared memory may differ by GPU"] - #[doc = " architecture."] - #[doc = " - ::CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: On devices where the L1"] - #[doc = " cache and shared memory use the same hardware resources, this sets the shared memory"] - #[doc = " carveout preference, in percent of the total shared memory."] - #[doc = " See ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR"] - #[doc = " This is only a hint, and the driver can choose a different ratio if required to execute the function."] - #[doc = ""] - #[doc = " \\param hfunc - Function to query attribute of"] - #[doc = " \\param attrib - Attribute requested"] - #[doc = " \\param value - The value to set"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuCtxGetCacheConfig,"] - #[doc = " ::cuCtxSetCacheConfig,"] - #[doc = " ::cuFuncSetCacheConfig,"] - #[doc = " ::cuLaunchKernel,"] - #[doc = " ::cudaFuncGetAttributes"] - #[doc = " ::cudaFuncSetAttribute"] pub fn cuFuncSetAttribute( hfunc: CUfunction, attrib: CUfunction_attribute, @@ -7537,214 +2176,18 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets the preferred cache configuration for a device function"] - #[doc = ""] - #[doc = " On devices where the L1 cache and shared memory use the same hardware"] - #[doc = " resources, this sets through \\p config the preferred cache configuration for"] - #[doc = " the device function \\p hfunc. This is only a preference. The driver will use"] - #[doc = " the requested configuration if possible, but it is free to choose a different"] - #[doc = " configuration if required to execute \\p hfunc. Any context-wide preference"] - #[doc = " set via ::cuCtxSetCacheConfig() will be overridden by this per-function"] - #[doc = " setting unless the per-function setting is ::CU_FUNC_CACHE_PREFER_NONE. In"] - #[doc = " that case, the current context-wide setting will be used."] - #[doc = ""] - #[doc = " This setting does nothing on devices where the size of the L1 cache and"] - #[doc = " shared memory are fixed."] - #[doc = ""] - #[doc = " Launching a kernel with a different preference than the most recent"] - #[doc = " preference setting may insert a device-side synchronization point."] - #[doc = ""] - #[doc = ""] - #[doc = " The supported cache configurations are:"] - #[doc = " - ::CU_FUNC_CACHE_PREFER_NONE: no preference for shared memory or L1 (default)"] - #[doc = " - ::CU_FUNC_CACHE_PREFER_SHARED: prefer larger shared memory and smaller L1 cache"] - #[doc = " - ::CU_FUNC_CACHE_PREFER_L1: prefer larger L1 cache and smaller shared memory"] - #[doc = " - ::CU_FUNC_CACHE_PREFER_EQUAL: prefer equal sized L1 cache and shared memory"] - #[doc = ""] - #[doc = " \\param hfunc - Kernel to configure cache for"] - #[doc = " \\param config - Requested cache configuration"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuCtxGetCacheConfig,"] - #[doc = " ::cuCtxSetCacheConfig,"] - #[doc = " ::cuFuncGetAttribute,"] - #[doc = " ::cuLaunchKernel,"] - #[doc = " ::cudaFuncSetCacheConfig"] pub fn cuFuncSetCacheConfig( hfunc: CUfunction, config: CUfunc_cache, ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets the shared memory configuration for a device function."] - #[doc = ""] - #[doc = " On devices with configurable shared memory banks, this function will"] - #[doc = " force all subsequent launches of the specified device function to have"] - #[doc = " the given shared memory bank size configuration. On any given launch of the"] - #[doc = " function, the shared memory configuration of the device will be temporarily"] - #[doc = " changed if needed to suit the function's preferred configuration. Changes in"] - #[doc = " shared memory configuration between subsequent launches of functions,"] - #[doc = " may introduce a device side synchronization point."] - #[doc = ""] - #[doc = " Any per-function setting of shared memory bank size set via"] - #[doc = " ::cuFuncSetSharedMemConfig will override the context wide setting set with"] - #[doc = " ::cuCtxSetSharedMemConfig."] - #[doc = ""] - #[doc = " Changing the shared memory bank size will not increase shared memory usage"] - #[doc = " or affect occupancy of kernels, but may have major effects on performance."] - #[doc = " Larger bank sizes will allow for greater potential bandwidth to shared memory,"] - #[doc = " but will change what kinds of accesses to shared memory will result in bank"] - #[doc = " conflicts."] - #[doc = ""] - #[doc = " This function will do nothing on devices with fixed shared memory bank size."] - #[doc = ""] - #[doc = " The supported bank configurations are:"] - #[doc = " - ::CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE: use the context's shared memory"] - #[doc = " configuration when launching this function."] - #[doc = " - ::CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: set shared memory bank width to"] - #[doc = " be natively four bytes when launching this function."] - #[doc = " - ::CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: set shared memory bank width to"] - #[doc = " be natively eight bytes when launching this function."] - #[doc = ""] - #[doc = " \\param hfunc - kernel to be given a shared memory config"] - #[doc = " \\param config - requested shared memory configuration"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuCtxGetCacheConfig,"] - #[doc = " ::cuCtxSetCacheConfig,"] - #[doc = " ::cuCtxGetSharedMemConfig,"] - #[doc = " ::cuCtxSetSharedMemConfig,"] - #[doc = " ::cuFuncGetAttribute,"] - #[doc = " ::cuLaunchKernel,"] - #[doc = " ::cudaFuncSetSharedMemConfig"] pub fn cuFuncSetSharedMemConfig( hfunc: CUfunction, config: CUsharedconfig, ) -> CUresult; } extern "C" { - #[doc = " \\brief Launches a CUDA function"] - #[doc = ""] - #[doc = " Invokes the kernel \\p f on a \\p gridDimX x \\p gridDimY x \\p gridDimZ"] - #[doc = " grid of blocks. Each block contains \\p blockDimX x \\p blockDimY x"] - #[doc = " \\p blockDimZ threads."] - #[doc = ""] - #[doc = " \\p sharedMemBytes sets the amount of dynamic shared memory that will be"] - #[doc = " available to each thread block."] - #[doc = ""] - #[doc = " Kernel parameters to \\p f can be specified in one of two ways:"] - #[doc = ""] - #[doc = " 1) Kernel parameters can be specified via \\p kernelParams. If \\p f"] - #[doc = " has N parameters, then \\p kernelParams needs to be an array of N"] - #[doc = " pointers. Each of \\p kernelParams[0] through \\p kernelParams[N-1]"] - #[doc = " must point to a region of memory from which the actual kernel"] - #[doc = " parameter will be copied. The number of kernel parameters and their"] - #[doc = " offsets and sizes do not need to be specified as that information is"] - #[doc = " retrieved directly from the kernel's image."] - #[doc = ""] - #[doc = " 2) Kernel parameters can also be packaged by the application into"] - #[doc = " a single buffer that is passed in via the \\p extra parameter."] - #[doc = " This places the burden on the application of knowing each kernel"] - #[doc = " parameter's size and alignment/padding within the buffer. Here is"] - #[doc = " an example of using the \\p extra parameter in this manner:"] - #[doc = " \\code"] - #[doc = "size_t argBufferSize;"] - #[doc = "char argBuffer[256];"] - #[doc = ""] - #[doc = ""] - #[doc = "void *config[] = {"] - #[doc = "CU_LAUNCH_PARAM_BUFFER_POINTER, argBuffer,"] - #[doc = "CU_LAUNCH_PARAM_BUFFER_SIZE, &argBufferSize,"] - #[doc = "CU_LAUNCH_PARAM_END"] - #[doc = "};"] - #[doc = "status = cuLaunchKernel(f, gx, gy, gz, bx, by, bz, sh, s, NULL, config);"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " The \\p extra parameter exists to allow ::cuLaunchKernel to take"] - #[doc = " additional less commonly used arguments. \\p extra specifies a list of"] - #[doc = " names of extra settings and their corresponding values. Each extra"] - #[doc = " setting name is immediately followed by the corresponding value. The"] - #[doc = " list must be terminated with either NULL or ::CU_LAUNCH_PARAM_END."] - #[doc = ""] - #[doc = " - ::CU_LAUNCH_PARAM_END, which indicates the end of the \\p extra"] - #[doc = " array;"] - #[doc = " - ::CU_LAUNCH_PARAM_BUFFER_POINTER, which specifies that the next"] - #[doc = " value in \\p extra will be a pointer to a buffer containing all"] - #[doc = " the kernel parameters for launching kernel \\p f;"] - #[doc = " - ::CU_LAUNCH_PARAM_BUFFER_SIZE, which specifies that the next"] - #[doc = " value in \\p extra will be a pointer to a size_t containing the"] - #[doc = " size of the buffer specified with ::CU_LAUNCH_PARAM_BUFFER_POINTER;"] - #[doc = ""] - #[doc = " The error ::CUDA_ERROR_INVALID_VALUE will be returned if kernel"] - #[doc = " parameters are specified with both \\p kernelParams and \\p extra"] - #[doc = " (i.e. both \\p kernelParams and \\p extra are non-NULL)."] - #[doc = ""] - #[doc = " Calling ::cuLaunchKernel() sets persistent function state that is"] - #[doc = " the same as function state set through the following deprecated APIs:"] - #[doc = " ::cuFuncSetBlockShape(),"] - #[doc = " ::cuFuncSetSharedSize(),"] - #[doc = " ::cuParamSetSize(),"] - #[doc = " ::cuParamSeti(),"] - #[doc = " ::cuParamSetf(),"] - #[doc = " ::cuParamSetv()."] - #[doc = ""] - #[doc = " When the kernel \\p f is launched via ::cuLaunchKernel(), the previous"] - #[doc = " block shape, shared size and parameter info associated with \\p f"] - #[doc = " is overwritten."] - #[doc = ""] - #[doc = " Note that to use ::cuLaunchKernel(), the kernel \\p f must either have"] - #[doc = " been compiled with toolchain version 3.2 or later so that it will"] - #[doc = " contain kernel parameter information, or have no kernel parameters."] - #[doc = " If either of these conditions is not met, then ::cuLaunchKernel() will"] - #[doc = " return ::CUDA_ERROR_INVALID_IMAGE."] - #[doc = ""] - #[doc = " \\param f - Kernel to launch"] - #[doc = " \\param gridDimX - Width of grid in blocks"] - #[doc = " \\param gridDimY - Height of grid in blocks"] - #[doc = " \\param gridDimZ - Depth of grid in blocks"] - #[doc = " \\param blockDimX - X dimension of each thread block"] - #[doc = " \\param blockDimY - Y dimension of each thread block"] - #[doc = " \\param blockDimZ - Z dimension of each thread block"] - #[doc = " \\param sharedMemBytes - Dynamic shared-memory size per thread block in bytes"] - #[doc = " \\param hStream - Stream identifier"] - #[doc = " \\param kernelParams - Array of pointers to kernel parameters"] - #[doc = " \\param extra - Extra options"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_INVALID_IMAGE,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_LAUNCH_FAILED,"] - #[doc = " ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES,"] - #[doc = " ::CUDA_ERROR_LAUNCH_TIMEOUT,"] - #[doc = " ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING,"] - #[doc = " ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuCtxGetCacheConfig,"] - #[doc = " ::cuCtxSetCacheConfig,"] - #[doc = " ::cuFuncSetCacheConfig,"] - #[doc = " ::cuFuncGetAttribute,"] - #[doc = " ::cudaLaunchKernel"] pub fn cuLaunchKernel( f: CUfunction, gridDimX: ::std::os::raw::c_uint, @@ -7760,80 +2203,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Launches a CUDA function where thread blocks can cooperate and synchronize as they execute"] - #[doc = ""] - #[doc = " Invokes the kernel \\p f on a \\p gridDimX x \\p gridDimY x \\p gridDimZ"] - #[doc = " grid of blocks. Each block contains \\p blockDimX x \\p blockDimY x"] - #[doc = " \\p blockDimZ threads."] - #[doc = ""] - #[doc = " \\p sharedMemBytes sets the amount of dynamic shared memory that will be"] - #[doc = " available to each thread block."] - #[doc = ""] - #[doc = " The device on which this kernel is invoked must have a non-zero value for"] - #[doc = " the device attribute ::CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH."] - #[doc = ""] - #[doc = " The total number of blocks launched cannot exceed the maximum number of blocks per"] - #[doc = " multiprocessor as returned by ::cuOccupancyMaxActiveBlocksPerMultiprocessor (or"] - #[doc = " ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors"] - #[doc = " as specified by the device attribute ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT."] - #[doc = ""] - #[doc = " The kernel cannot make use of CUDA dynamic parallelism."] - #[doc = ""] - #[doc = " Kernel parameters must be specified via \\p kernelParams. If \\p f"] - #[doc = " has N parameters, then \\p kernelParams needs to be an array of N"] - #[doc = " pointers. Each of \\p kernelParams[0] through \\p kernelParams[N-1]"] - #[doc = " must point to a region of memory from which the actual kernel"] - #[doc = " parameter will be copied. The number of kernel parameters and their"] - #[doc = " offsets and sizes do not need to be specified as that information is"] - #[doc = " retrieved directly from the kernel's image."] - #[doc = ""] - #[doc = " Calling ::cuLaunchCooperativeKernel() sets persistent function state that is"] - #[doc = " the same as function state set through ::cuLaunchKernel API"] - #[doc = ""] - #[doc = " When the kernel \\p f is launched via ::cuLaunchCooperativeKernel(), the previous"] - #[doc = " block shape, shared size and parameter info associated with \\p f"] - #[doc = " is overwritten."] - #[doc = ""] - #[doc = " Note that to use ::cuLaunchCooperativeKernel(), the kernel \\p f must either have"] - #[doc = " been compiled with toolchain version 3.2 or later so that it will"] - #[doc = " contain kernel parameter information, or have no kernel parameters."] - #[doc = " If either of these conditions is not met, then ::cuLaunchCooperativeKernel() will"] - #[doc = " return ::CUDA_ERROR_INVALID_IMAGE."] - #[doc = ""] - #[doc = " \\param f - Kernel to launch"] - #[doc = " \\param gridDimX - Width of grid in blocks"] - #[doc = " \\param gridDimY - Height of grid in blocks"] - #[doc = " \\param gridDimZ - Depth of grid in blocks"] - #[doc = " \\param blockDimX - X dimension of each thread block"] - #[doc = " \\param blockDimY - Y dimension of each thread block"] - #[doc = " \\param blockDimZ - Z dimension of each thread block"] - #[doc = " \\param sharedMemBytes - Dynamic shared-memory size per thread block in bytes"] - #[doc = " \\param hStream - Stream identifier"] - #[doc = " \\param kernelParams - Array of pointers to kernel parameters"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_INVALID_IMAGE,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_LAUNCH_FAILED,"] - #[doc = " ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES,"] - #[doc = " ::CUDA_ERROR_LAUNCH_TIMEOUT,"] - #[doc = " ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING,"] - #[doc = " ::CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE,"] - #[doc = " ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuCtxGetCacheConfig,"] - #[doc = " ::cuCtxSetCacheConfig,"] - #[doc = " ::cuFuncSetCacheConfig,"] - #[doc = " ::cuFuncGetAttribute,"] - #[doc = " ::cuLaunchCooperativeKernelMultiDevice,"] - #[doc = " ::cudaLaunchCooperativeKernel"] pub fn cuLaunchCooperativeKernel( f: CUfunction, gridDimX: ::std::os::raw::c_uint, @@ -7848,137 +2217,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Launches CUDA functions on multiple devices where thread blocks can cooperate and synchronize as they execute"] - #[doc = ""] - #[doc = " Invokes kernels as specified in the \\p launchParamsList array where each element"] - #[doc = " of the array specifies all the parameters required to perform a single kernel launch."] - #[doc = " These kernels can cooperate and synchronize as they execute. The size of the array is"] - #[doc = " specified by \\p numDevices."] - #[doc = ""] - #[doc = " No two kernels can be launched on the same device. All the devices targeted by this"] - #[doc = " multi-device launch must be identical. All devices must have a non-zero value for the"] - #[doc = " device attribute ::CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH."] - #[doc = ""] - #[doc = " All kernels launched must be identical with respect to the compiled code. Note that"] - #[doc = " any __device__, __constant__ or __managed__ variables present in the module that owns"] - #[doc = " the kernel launched on each device, are independently instantiated on every device."] - #[doc = " It is the application's responsiblity to ensure these variables are initialized and"] - #[doc = " used appropriately."] - #[doc = ""] - #[doc = " The size of the grids as specified in blocks, the size of the blocks themselves"] - #[doc = " and the amount of shared memory used by each thread block must also match across"] - #[doc = " all launched kernels."] - #[doc = ""] - #[doc = " The streams used to launch these kernels must have been created via either ::cuStreamCreate"] - #[doc = " or ::cuStreamCreateWithPriority. The NULL stream or ::CU_STREAM_LEGACY or ::CU_STREAM_PER_THREAD"] - #[doc = " cannot be used."] - #[doc = ""] - #[doc = " The total number of blocks launched per kernel cannot exceed the maximum number of blocks"] - #[doc = " per multiprocessor as returned by ::cuOccupancyMaxActiveBlocksPerMultiprocessor (or"] - #[doc = " ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors"] - #[doc = " as specified by the device attribute ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT. Since the"] - #[doc = " total number of blocks launched per device has to match across all devices, the maximum"] - #[doc = " number of blocks that can be launched per device will be limited by the device with the"] - #[doc = " least number of multiprocessors."] - #[doc = ""] - #[doc = " The kernels cannot make use of CUDA dynamic parallelism."] - #[doc = ""] - #[doc = " The ::CUDA_LAUNCH_PARAMS structure is defined as:"] - #[doc = " \\code"] - #[doc = "typedef struct CUDA_LAUNCH_PARAMS_st"] - #[doc = "{"] - #[doc = "CUfunction function;"] - #[doc = "unsigned int gridDimX;"] - #[doc = "unsigned int gridDimY;"] - #[doc = "unsigned int gridDimZ;"] - #[doc = "unsigned int blockDimX;"] - #[doc = "unsigned int blockDimY;"] - #[doc = "unsigned int blockDimZ;"] - #[doc = "unsigned int sharedMemBytes;"] - #[doc = "CUstream hStream;"] - #[doc = "void **kernelParams;"] - #[doc = "} CUDA_LAUNCH_PARAMS;"] - #[doc = " \\endcode"] - #[doc = " where:"] - #[doc = " - ::CUDA_LAUNCH_PARAMS::function specifies the kernel to be launched. All functions must"] - #[doc = " be identical with respect to the compiled code."] - #[doc = " - ::CUDA_LAUNCH_PARAMS::gridDimX is the width of the grid in blocks. This must match across"] - #[doc = " all kernels launched."] - #[doc = " - ::CUDA_LAUNCH_PARAMS::gridDimY is the height of the grid in blocks. This must match across"] - #[doc = " all kernels launched."] - #[doc = " - ::CUDA_LAUNCH_PARAMS::gridDimZ is the depth of the grid in blocks. This must match across"] - #[doc = " all kernels launched."] - #[doc = " - ::CUDA_LAUNCH_PARAMS::blockDimX is the X dimension of each thread block. This must match across"] - #[doc = " all kernels launched."] - #[doc = " - ::CUDA_LAUNCH_PARAMS::blockDimX is the Y dimension of each thread block. This must match across"] - #[doc = " all kernels launched."] - #[doc = " - ::CUDA_LAUNCH_PARAMS::blockDimZ is the Z dimension of each thread block. This must match across"] - #[doc = " all kernels launched."] - #[doc = " - ::CUDA_LAUNCH_PARAMS::sharedMemBytes is the dynamic shared-memory size per thread block in bytes."] - #[doc = " This must match across all kernels launched."] - #[doc = " - ::CUDA_LAUNCH_PARAMS::hStream is the handle to the stream to perform the launch in. This cannot"] - #[doc = " be the NULL stream or ::CU_STREAM_LEGACY or ::CU_STREAM_PER_THREAD. The CUDA context associated"] - #[doc = " with this stream must match that associated with ::CUDA_LAUNCH_PARAMS::function."] - #[doc = " - ::CUDA_LAUNCH_PARAMS::kernelParams is an array of pointers to kernel parameters. If"] - #[doc = " ::CUDA_LAUNCH_PARAMS::function has N parameters, then ::CUDA_LAUNCH_PARAMS::kernelParams"] - #[doc = " needs to be an array of N pointers. Each of ::CUDA_LAUNCH_PARAMS::kernelParams[0] through"] - #[doc = " ::CUDA_LAUNCH_PARAMS::kernelParams[N-1] must point to a region of memory from which the actual"] - #[doc = " kernel parameter will be copied. The number of kernel parameters and their offsets and sizes"] - #[doc = " do not need to be specified as that information is retrieved directly from the kernel's image."] - #[doc = ""] - #[doc = " By default, the kernel won't begin execution on any GPU until all prior work in all the specified"] - #[doc = " streams has completed. This behavior can be overridden by specifying the flag"] - #[doc = " ::CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC. When this flag is specified, each kernel"] - #[doc = " will only wait for prior work in the stream corresponding to that GPU to complete before it begins"] - #[doc = " execution."] - #[doc = ""] - #[doc = " Similarly, by default, any subsequent work pushed in any of the specified streams will not begin"] - #[doc = " execution until the kernels on all GPUs have completed. This behavior can be overridden by specifying"] - #[doc = " the flag ::CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC. When this flag is specified,"] - #[doc = " any subsequent work pushed in any of the specified streams will only wait for the kernel launched"] - #[doc = " on the GPU corresponding to that stream to complete before it begins execution."] - #[doc = ""] - #[doc = " Calling ::cuLaunchCooperativeKernelMultiDevice() sets persistent function state that is"] - #[doc = " the same as function state set through ::cuLaunchKernel API when called individually for each"] - #[doc = " element in \\p launchParamsList."] - #[doc = ""] - #[doc = " When kernels are launched via ::cuLaunchCooperativeKernelMultiDevice(), the previous"] - #[doc = " block shape, shared size and parameter info associated with each ::CUDA_LAUNCH_PARAMS::function"] - #[doc = " in \\p launchParamsList is overwritten."] - #[doc = ""] - #[doc = " Note that to use ::cuLaunchCooperativeKernelMultiDevice(), the kernels must either have"] - #[doc = " been compiled with toolchain version 3.2 or later so that it will"] - #[doc = " contain kernel parameter information, or have no kernel parameters."] - #[doc = " If either of these conditions is not met, then ::cuLaunchCooperativeKernelMultiDevice() will"] - #[doc = " return ::CUDA_ERROR_INVALID_IMAGE."] - #[doc = ""] - #[doc = " \\param launchParamsList - List of launch parameters, one per device"] - #[doc = " \\param numDevices - Size of the \\p launchParamsList array"] - #[doc = " \\param flags - Flags to control launch behavior"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_INVALID_IMAGE,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_LAUNCH_FAILED,"] - #[doc = " ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES,"] - #[doc = " ::CUDA_ERROR_LAUNCH_TIMEOUT,"] - #[doc = " ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING,"] - #[doc = " ::CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE,"] - #[doc = " ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuCtxGetCacheConfig,"] - #[doc = " ::cuCtxSetCacheConfig,"] - #[doc = " ::cuFuncSetCacheConfig,"] - #[doc = " ::cuFuncGetAttribute,"] - #[doc = " ::cuLaunchCooperativeKernel,"] - #[doc = " ::cudaLaunchCooperativeKernelMultiDevice"] pub fn cuLaunchCooperativeKernelMultiDevice( launchParamsList: *mut CUDA_LAUNCH_PARAMS, numDevices: ::std::os::raw::c_uint, @@ -7986,67 +2224,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Enqueues a host function call in a stream"] - #[doc = ""] - #[doc = " Enqueues a host function to run in a stream. The function will be called"] - #[doc = " after currently enqueued work and will block work added after it."] - #[doc = ""] - #[doc = " The host function must not make any CUDA API calls. Attempting to use a"] - #[doc = " CUDA API may result in ::CUDA_ERROR_NOT_PERMITTED, but this is not required."] - #[doc = " The host function must not perform any synchronization that may depend on"] - #[doc = " outstanding CUDA work not mandated to run earlier. Host functions without a"] - #[doc = " mandated order (such as in independent streams) execute in undefined order"] - #[doc = " and may be serialized."] - #[doc = ""] - #[doc = " For the purposes of Unified Memory, execution makes a number of guarantees:"] - #[doc = " "] - #[doc = ""] - #[doc = " Note that, in contrast to ::cuStreamAddCallback, the function will not be"] - #[doc = " called in the event of an error in the CUDA context."] - #[doc = ""] - #[doc = " \\param hStream - Stream to enqueue function call in"] - #[doc = " \\param fn - The function to call once preceding stream operations are complete"] - #[doc = " \\param userData - User-specified data to be passed to the function"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_NOT_SUPPORTED"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuStreamCreate,"] - #[doc = " ::cuStreamQuery,"] - #[doc = " ::cuStreamSynchronize,"] - #[doc = " ::cuStreamWaitEvent,"] - #[doc = " ::cuStreamDestroy,"] - #[doc = " ::cuMemAllocManaged,"] - #[doc = " ::cuStreamAttachMemAsync,"] - #[doc = " ::cuStreamAddCallback"] pub fn cuLaunchHostFunc( hStream: CUstream, fn_: CUhostFn, @@ -8054,38 +2231,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets the block-dimensions for the function"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Specifies the \\p x, \\p y, and \\p z dimensions of the thread blocks that are"] - #[doc = " created when the kernel given by \\p hfunc is launched."] - #[doc = ""] - #[doc = " \\param hfunc - Kernel to specify dimensions of"] - #[doc = " \\param x - X dimension"] - #[doc = " \\param y - Y dimension"] - #[doc = " \\param z - Z dimension"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuFuncSetSharedSize,"] - #[doc = " ::cuFuncSetCacheConfig,"] - #[doc = " ::cuFuncGetAttribute,"] - #[doc = " ::cuParamSetSize,"] - #[doc = " ::cuParamSeti,"] - #[doc = " ::cuParamSetf,"] - #[doc = " ::cuParamSetv,"] - #[doc = " ::cuLaunch,"] - #[doc = " ::cuLaunchGrid,"] - #[doc = " ::cuLaunchGridAsync,"] - #[doc = " ::cuLaunchKernel"] pub fn cuFuncSetBlockShape( hfunc: CUfunction, x: ::std::os::raw::c_int, @@ -8094,105 +2239,18 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets the dynamic shared-memory size for the function"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Sets through \\p bytes the amount of dynamic shared memory that will be"] - #[doc = " available to each thread block when the kernel given by \\p hfunc is launched."] - #[doc = ""] - #[doc = " \\param hfunc - Kernel to specify dynamic shared-memory size for"] - #[doc = " \\param bytes - Dynamic shared-memory size per thread in bytes"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuFuncSetBlockShape,"] - #[doc = " ::cuFuncSetCacheConfig,"] - #[doc = " ::cuFuncGetAttribute,"] - #[doc = " ::cuParamSetSize,"] - #[doc = " ::cuParamSeti,"] - #[doc = " ::cuParamSetf,"] - #[doc = " ::cuParamSetv,"] - #[doc = " ::cuLaunch,"] - #[doc = " ::cuLaunchGrid,"] - #[doc = " ::cuLaunchGridAsync,"] - #[doc = " ::cuLaunchKernel"] pub fn cuFuncSetSharedSize( hfunc: CUfunction, bytes: ::std::os::raw::c_uint, ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets the parameter size for the function"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Sets through \\p numbytes the total size in bytes needed by the function"] - #[doc = " parameters of the kernel corresponding to \\p hfunc."] - #[doc = ""] - #[doc = " \\param hfunc - Kernel to set parameter size for"] - #[doc = " \\param numbytes - Size of parameter list in bytes"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuFuncSetBlockShape,"] - #[doc = " ::cuFuncSetSharedSize,"] - #[doc = " ::cuFuncGetAttribute,"] - #[doc = " ::cuParamSetf,"] - #[doc = " ::cuParamSeti,"] - #[doc = " ::cuParamSetv,"] - #[doc = " ::cuLaunch,"] - #[doc = " ::cuLaunchGrid,"] - #[doc = " ::cuLaunchGridAsync,"] - #[doc = " ::cuLaunchKernel"] pub fn cuParamSetSize( hfunc: CUfunction, numbytes: ::std::os::raw::c_uint, ) -> CUresult; } extern "C" { - #[doc = " \\brief Adds an integer parameter to the function's argument list"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Sets an integer parameter that will be specified the next time the"] - #[doc = " kernel corresponding to \\p hfunc will be invoked. \\p offset is a byte offset."] - #[doc = ""] - #[doc = " \\param hfunc - Kernel to add parameter to"] - #[doc = " \\param offset - Offset to add parameter to argument list"] - #[doc = " \\param value - Value of parameter"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuFuncSetBlockShape,"] - #[doc = " ::cuFuncSetSharedSize,"] - #[doc = " ::cuFuncGetAttribute,"] - #[doc = " ::cuParamSetSize,"] - #[doc = " ::cuParamSetf,"] - #[doc = " ::cuParamSetv,"] - #[doc = " ::cuLaunch,"] - #[doc = " ::cuLaunchGrid,"] - #[doc = " ::cuLaunchGridAsync,"] - #[doc = " ::cuLaunchKernel"] pub fn cuParamSeti( hfunc: CUfunction, offset: ::std::os::raw::c_int, @@ -8200,35 +2258,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Adds a floating-point parameter to the function's argument list"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Sets a floating-point parameter that will be specified the next time the"] - #[doc = " kernel corresponding to \\p hfunc will be invoked. \\p offset is a byte offset."] - #[doc = ""] - #[doc = " \\param hfunc - Kernel to add parameter to"] - #[doc = " \\param offset - Offset to add parameter to argument list"] - #[doc = " \\param value - Value of parameter"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuFuncSetBlockShape,"] - #[doc = " ::cuFuncSetSharedSize,"] - #[doc = " ::cuFuncGetAttribute,"] - #[doc = " ::cuParamSetSize,"] - #[doc = " ::cuParamSeti,"] - #[doc = " ::cuParamSetv,"] - #[doc = " ::cuLaunch,"] - #[doc = " ::cuLaunchGrid,"] - #[doc = " ::cuLaunchGridAsync,"] - #[doc = " ::cuLaunchKernel"] pub fn cuParamSetf( hfunc: CUfunction, offset: ::std::os::raw::c_int, @@ -8236,37 +2265,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Adds arbitrary data to the function's argument list"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Copies an arbitrary amount of data (specified in \\p numbytes) from \\p ptr"] - #[doc = " into the parameter space of the kernel corresponding to \\p hfunc. \\p offset"] - #[doc = " is a byte offset."] - #[doc = ""] - #[doc = " \\param hfunc - Kernel to add data to"] - #[doc = " \\param offset - Offset to add data to argument list"] - #[doc = " \\param ptr - Pointer to arbitrary data"] - #[doc = " \\param numbytes - Size of data to copy in bytes"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuFuncSetBlockShape,"] - #[doc = " ::cuFuncSetSharedSize,"] - #[doc = " ::cuFuncGetAttribute,"] - #[doc = " ::cuParamSetSize,"] - #[doc = " ::cuParamSetf,"] - #[doc = " ::cuParamSeti,"] - #[doc = " ::cuLaunch,"] - #[doc = " ::cuLaunchGrid,"] - #[doc = " ::cuLaunchGridAsync,"] - #[doc = " ::cuLaunchKernel"] pub fn cuParamSetv( hfunc: CUfunction, offset: ::std::os::raw::c_int, @@ -8275,77 +2273,9 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Launches a CUDA function"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Invokes the kernel \\p f on a 1 x 1 x 1 grid of blocks. The block"] - #[doc = " contains the number of threads specified by a previous call to"] - #[doc = " ::cuFuncSetBlockShape()."] - #[doc = ""] - #[doc = " \\param f - Kernel to launch"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_LAUNCH_FAILED,"] - #[doc = " ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES,"] - #[doc = " ::CUDA_ERROR_LAUNCH_TIMEOUT,"] - #[doc = " ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING,"] - #[doc = " ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuFuncSetBlockShape,"] - #[doc = " ::cuFuncSetSharedSize,"] - #[doc = " ::cuFuncGetAttribute,"] - #[doc = " ::cuParamSetSize,"] - #[doc = " ::cuParamSetf,"] - #[doc = " ::cuParamSeti,"] - #[doc = " ::cuParamSetv,"] - #[doc = " ::cuLaunchGrid,"] - #[doc = " ::cuLaunchGridAsync,"] - #[doc = " ::cuLaunchKernel"] pub fn cuLaunch(f: CUfunction) -> CUresult; } extern "C" { - #[doc = " \\brief Launches a CUDA function"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Invokes the kernel \\p f on a \\p grid_width x \\p grid_height grid of"] - #[doc = " blocks. Each block contains the number of threads specified by a previous"] - #[doc = " call to ::cuFuncSetBlockShape()."] - #[doc = ""] - #[doc = " \\param f - Kernel to launch"] - #[doc = " \\param grid_width - Width of grid in blocks"] - #[doc = " \\param grid_height - Height of grid in blocks"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_LAUNCH_FAILED,"] - #[doc = " ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES,"] - #[doc = " ::CUDA_ERROR_LAUNCH_TIMEOUT,"] - #[doc = " ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING,"] - #[doc = " ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuFuncSetBlockShape,"] - #[doc = " ::cuFuncSetSharedSize,"] - #[doc = " ::cuFuncGetAttribute,"] - #[doc = " ::cuParamSetSize,"] - #[doc = " ::cuParamSetf,"] - #[doc = " ::cuParamSeti,"] - #[doc = " ::cuParamSetv,"] - #[doc = " ::cuLaunch,"] - #[doc = " ::cuLaunchGridAsync,"] - #[doc = " ::cuLaunchKernel"] pub fn cuLaunchGrid( f: CUfunction, grid_width: ::std::os::raw::c_int, @@ -8353,49 +2283,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Launches a CUDA function"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Invokes the kernel \\p f on a \\p grid_width x \\p grid_height grid of"] - #[doc = " blocks. Each block contains the number of threads specified by a previous"] - #[doc = " call to ::cuFuncSetBlockShape()."] - #[doc = ""] - #[doc = " \\param f - Kernel to launch"] - #[doc = " \\param grid_width - Width of grid in blocks"] - #[doc = " \\param grid_height - Height of grid in blocks"] - #[doc = " \\param hStream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_LAUNCH_FAILED,"] - #[doc = " ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES,"] - #[doc = " ::CUDA_ERROR_LAUNCH_TIMEOUT,"] - #[doc = " ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING,"] - #[doc = " ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED"] - #[doc = ""] - #[doc = " \\note In certain cases where cubins are created with no ABI (i.e., using \\p ptxas \\p --abi-compile \\p no),"] - #[doc = " this function may serialize kernel launches. In order to force the CUDA driver to retain"] - #[doc = " asynchronous behavior, set the ::CU_CTX_LMEM_RESIZE_TO_MAX flag during context creation (see ::cuCtxCreate)."] - #[doc = ""] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa ::cuFuncSetBlockShape,"] - #[doc = " ::cuFuncSetSharedSize,"] - #[doc = " ::cuFuncGetAttribute,"] - #[doc = " ::cuParamSetSize,"] - #[doc = " ::cuParamSetf,"] - #[doc = " ::cuParamSeti,"] - #[doc = " ::cuParamSetv,"] - #[doc = " ::cuLaunch,"] - #[doc = " ::cuLaunchGrid,"] - #[doc = " ::cuLaunchKernel"] pub fn cuLaunchGridAsync( f: CUfunction, grid_width: ::std::os::raw::c_int, @@ -8404,26 +2291,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Adds a texture-reference to the function's argument list"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Makes the CUDA array or linear memory bound to the texture reference"] - #[doc = " \\p hTexRef available to a device program as a texture. In this version of"] - #[doc = " CUDA, the texture-reference must be obtained via ::cuModuleGetTexRef() and"] - #[doc = " the \\p texunit parameter must be set to ::CU_PARAM_TR_DEFAULT."] - #[doc = ""] - #[doc = " \\param hfunc - Kernel to add texture-reference to"] - #[doc = " \\param texunit - Texture unit (must be ::CU_PARAM_TR_DEFAULT)"] - #[doc = " \\param hTexRef - Texture-reference to add to argument list"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] pub fn cuParamSetTexRef( hfunc: CUfunction, texunit: ::std::os::raw::c_int, @@ -8431,136 +2298,12 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Creates a graph"] - #[doc = ""] - #[doc = " Creates an empty graph, which is returned via \\p phGraph."] - #[doc = ""] - #[doc = " \\param phGraph - Returns newly created graph"] - #[doc = " \\param flags - Graph creation flags, must be 0"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphAddChildGraphNode,"] - #[doc = " ::cuGraphAddEmptyNode,"] - #[doc = " ::cuGraphAddKernelNode,"] - #[doc = " ::cuGraphAddHostNode,"] - #[doc = " ::cuGraphAddMemcpyNode,"] - #[doc = " ::cuGraphAddMemsetNode,"] - #[doc = " ::cuGraphInstantiate,"] - #[doc = " ::cuGraphDestroy,"] - #[doc = " ::cuGraphGetNodes,"] - #[doc = " ::cuGraphGetRootNodes,"] - #[doc = " ::cuGraphGetEdges,"] - #[doc = " ::cuGraphClone"] pub fn cuGraphCreate( phGraph: *mut CUgraph, flags: ::std::os::raw::c_uint, ) -> CUresult; } extern "C" { - #[doc = " \\brief Creates a kernel execution node and adds it to a graph"] - #[doc = ""] - #[doc = " Creates a new kernel execution node and adds it to \\p hGraph with \\p numDependencies"] - #[doc = " dependencies specified via \\p dependencies and arguments specified in \\p nodeParams."] - #[doc = " It is possible for \\p numDependencies to be 0, in which case the node will be placed"] - #[doc = " at the root of the graph. \\p dependencies may not have any duplicate entries."] - #[doc = " A handle to the new node will be returned in \\p phGraphNode."] - #[doc = ""] - #[doc = " The CUDA_KERNEL_NODE_PARAMS structure is defined as:"] - #[doc = ""] - #[doc = " \\code"] - #[doc = " typedef struct CUDA_KERNEL_NODE_PARAMS_st {"] - #[doc = " CUfunction func;"] - #[doc = " unsigned int gridDimX;"] - #[doc = " unsigned int gridDimY;"] - #[doc = " unsigned int gridDimZ;"] - #[doc = " unsigned int blockDimX;"] - #[doc = " unsigned int blockDimY;"] - #[doc = " unsigned int blockDimZ;"] - #[doc = " unsigned int sharedMemBytes;"] - #[doc = " void **kernelParams;"] - #[doc = " void **extra;"] - #[doc = " } CUDA_KERNEL_NODE_PARAMS;"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " When the graph is launched, the node will invoke kernel \\p func on a (\\p gridDimX x"] - #[doc = " \\p gridDimY x \\p gridDimZ) grid of blocks. Each block contains"] - #[doc = " (\\p blockDimX x \\p blockDimY x \\p blockDimZ) threads."] - #[doc = ""] - #[doc = " \\p sharedMemBytes sets the amount of dynamic shared memory that will be"] - #[doc = " available to each thread block."] - #[doc = ""] - #[doc = " Kernel parameters to \\p func can be specified in one of two ways:"] - #[doc = ""] - #[doc = " 1) Kernel parameters can be specified via \\p kernelParams. If the kernel has N"] - #[doc = " parameters, then \\p kernelParams needs to be an array of N pointers. Each pointer,"] - #[doc = " from \\p kernelParams[0] to \\p kernelParams[N-1], points to the region of memory from which the actual"] - #[doc = " parameter will be copied. The number of kernel parameters and their offsets and sizes do not need"] - #[doc = " to be specified as that information is retrieved directly from the kernel's image."] - #[doc = ""] - #[doc = " 2) Kernel parameters can also be packaged by the application into a single buffer that is passed in"] - #[doc = " via \\p extra. This places the burden on the application of knowing each kernel"] - #[doc = " parameter's size and alignment/padding within the buffer. The \\p extra parameter exists"] - #[doc = " to allow this function to take additional less commonly used arguments. \\p extra specifies"] - #[doc = " a list of names of extra settings and their corresponding values. Each extra setting name is"] - #[doc = " immediately followed by the corresponding value. The list must be terminated with either NULL or"] - #[doc = " CU_LAUNCH_PARAM_END."] - #[doc = ""] - #[doc = " - ::CU_LAUNCH_PARAM_END, which indicates the end of the \\p extra"] - #[doc = " array;"] - #[doc = " - ::CU_LAUNCH_PARAM_BUFFER_POINTER, which specifies that the next"] - #[doc = " value in \\p extra will be a pointer to a buffer"] - #[doc = " containing all the kernel parameters for launching kernel"] - #[doc = " \\p func;"] - #[doc = " - ::CU_LAUNCH_PARAM_BUFFER_SIZE, which specifies that the next"] - #[doc = " value in \\p extra will be a pointer to a size_t"] - #[doc = " containing the size of the buffer specified with"] - #[doc = " ::CU_LAUNCH_PARAM_BUFFER_POINTER;"] - #[doc = ""] - #[doc = " The error ::CUDA_ERROR_INVALID_VALUE will be returned if kernel parameters are specified with both"] - #[doc = " \\p kernelParams and \\p extra (i.e. both \\p kernelParams and"] - #[doc = " \\p extra are non-NULL)."] - #[doc = ""] - #[doc = " The \\p kernelParams or \\p extra array, as well as the argument values it points to,"] - #[doc = " are copied during this call."] - #[doc = ""] - #[doc = " \\note Kernels launched using graphs must not use texture and surface references. Reading or"] - #[doc = " writing through any texture or surface reference is undefined behavior."] - #[doc = " This restriction does not apply to texture and surface objects."] - #[doc = ""] - #[doc = " \\param phGraphNode - Returns newly created node"] - #[doc = " \\param hGraph - Graph to which to add the node"] - #[doc = " \\param dependencies - Dependencies of the node"] - #[doc = " \\param numDependencies - Number of dependencies"] - #[doc = " \\param nodeParams - Parameters for the GPU execution node"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuLaunchKernel,"] - #[doc = " ::cuGraphKernelNodeGetParams,"] - #[doc = " ::cuGraphKernelNodeSetParams,"] - #[doc = " ::cuGraphCreate,"] - #[doc = " ::cuGraphDestroyNode,"] - #[doc = " ::cuGraphAddChildGraphNode,"] - #[doc = " ::cuGraphAddEmptyNode,"] - #[doc = " ::cuGraphAddHostNode,"] - #[doc = " ::cuGraphAddMemcpyNode,"] - #[doc = " ::cuGraphAddMemsetNode"] pub fn cuGraphAddKernelNode( phGraphNode: *mut CUgraphNode, hGraph: CUgraph, @@ -8570,109 +2313,18 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns a kernel node's parameters"] - #[doc = ""] - #[doc = " Returns the parameters of kernel node \\p hNode in \\p nodeParams."] - #[doc = " The \\p kernelParams or \\p extra array returned in \\p nodeParams,"] - #[doc = " as well as the argument values it points to, are owned by the node."] - #[doc = " This memory remains valid until the node is destroyed or its"] - #[doc = " parameters are modified, and should not be modified"] - #[doc = " directly. Use ::cuGraphKernelNodeSetParams to update the"] - #[doc = " parameters of this node."] - #[doc = ""] - #[doc = " The params will contain either \\p kernelParams or \\p extra,"] - #[doc = " according to which of these was most recently set on the node."] - #[doc = ""] - #[doc = " \\param hNode - Node to get the parameters for"] - #[doc = " \\param nodeParams - Pointer to return the parameters"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuLaunchKernel,"] - #[doc = " ::cuGraphAddKernelNode,"] - #[doc = " ::cuGraphKernelNodeSetParams"] pub fn cuGraphKernelNodeGetParams( hNode: CUgraphNode, nodeParams: *mut CUDA_KERNEL_NODE_PARAMS, ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets a kernel node's parameters"] - #[doc = ""] - #[doc = " Sets the parameters of kernel node \\p hNode to \\p nodeParams."] - #[doc = ""] - #[doc = " \\param hNode - Node to set the parameters for"] - #[doc = " \\param nodeParams - Parameters to copy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuLaunchKernel,"] - #[doc = " ::cuGraphAddKernelNode,"] - #[doc = " ::cuGraphKernelNodeGetParams"] pub fn cuGraphKernelNodeSetParams( hNode: CUgraphNode, nodeParams: *const CUDA_KERNEL_NODE_PARAMS, ) -> CUresult; } extern "C" { - #[doc = " \\brief Creates a memcpy node and adds it to a graph"] - #[doc = ""] - #[doc = " Creates a new memcpy node and adds it to \\p hGraph with \\p numDependencies"] - #[doc = " dependencies specified via \\p dependencies."] - #[doc = " It is possible for \\p numDependencies to be 0, in which case the node will be placed"] - #[doc = " at the root of the graph. \\p dependencies may not have any duplicate entries."] - #[doc = " A handle to the new node will be returned in \\p phGraphNode."] - #[doc = ""] - #[doc = " When the graph is launched, the node will perform the memcpy described by \\p copyParams."] - #[doc = " See ::cuMemcpy3D() for a description of the structure and its restrictions."] - #[doc = ""] - #[doc = " Memcpy nodes have some additional restrictions with regards to managed memory, if the"] - #[doc = " system contains at least one device which has a zero value for the device attribute"] - #[doc = " ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. If one or more of the operands refer"] - #[doc = " to managed memory, then using the memory type ::CU_MEMORYTYPE_UNIFIED is disallowed"] - #[doc = " for those operand(s). The managed memory will be treated as residing on either the"] - #[doc = " host or the device, depending on which memory type is specified."] - #[doc = ""] - #[doc = " \\param phGraphNode - Returns newly created node"] - #[doc = " \\param hGraph - Graph to which to add the node"] - #[doc = " \\param dependencies - Dependencies of the node"] - #[doc = " \\param numDependencies - Number of dependencies"] - #[doc = " \\param copyParams - Parameters for the memory copy"] - #[doc = " \\param ctx - Context on which to run the node"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuMemcpy3D,"] - #[doc = " ::cuGraphMemcpyNodeGetParams,"] - #[doc = " ::cuGraphMemcpyNodeSetParams,"] - #[doc = " ::cuGraphCreate,"] - #[doc = " ::cuGraphDestroyNode,"] - #[doc = " ::cuGraphAddChildGraphNode,"] - #[doc = " ::cuGraphAddEmptyNode,"] - #[doc = " ::cuGraphAddKernelNode,"] - #[doc = " ::cuGraphAddHostNode,"] - #[doc = " ::cuGraphAddMemsetNode"] pub fn cuGraphAddMemcpyNode( phGraphNode: *mut CUgraphNode, hGraph: CUgraph, @@ -8683,94 +2335,18 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns a memcpy node's parameters"] - #[doc = ""] - #[doc = " Returns the parameters of memcpy node \\p hNode in \\p nodeParams."] - #[doc = ""] - #[doc = " \\param hNode - Node to get the parameters for"] - #[doc = " \\param nodeParams - Pointer to return the parameters"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuMemcpy3D,"] - #[doc = " ::cuGraphAddMemcpyNode,"] - #[doc = " ::cuGraphMemcpyNodeSetParams"] pub fn cuGraphMemcpyNodeGetParams( hNode: CUgraphNode, nodeParams: *mut CUDA_MEMCPY3D, ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets a memcpy node's parameters"] - #[doc = ""] - #[doc = " Sets the parameters of memcpy node \\p hNode to \\p nodeParams."] - #[doc = ""] - #[doc = " \\param hNode - Node to set the parameters for"] - #[doc = " \\param nodeParams - Parameters to copy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuMemcpy3D,"] - #[doc = " ::cuGraphAddMemcpyNode,"] - #[doc = " ::cuGraphMemcpyNodeGetParams"] pub fn cuGraphMemcpyNodeSetParams( hNode: CUgraphNode, nodeParams: *const CUDA_MEMCPY3D, ) -> CUresult; } extern "C" { - #[doc = " \\brief Creates a memset node and adds it to a graph"] - #[doc = ""] - #[doc = " Creates a new memset node and adds it to \\p hGraph with \\p numDependencies"] - #[doc = " dependencies specified via \\p dependencies."] - #[doc = " It is possible for \\p numDependencies to be 0, in which case the node will be placed"] - #[doc = " at the root of the graph. \\p dependencies may not have any duplicate entries."] - #[doc = " A handle to the new node will be returned in \\p phGraphNode."] - #[doc = ""] - #[doc = " The element size must be 1, 2, or 4 bytes."] - #[doc = " When the graph is launched, the node will perform the memset described by \\p memsetParams."] - #[doc = ""] - #[doc = " \\param phGraphNode - Returns newly created node"] - #[doc = " \\param hGraph - Graph to which to add the node"] - #[doc = " \\param dependencies - Dependencies of the node"] - #[doc = " \\param numDependencies - Number of dependencies"] - #[doc = " \\param memsetParams - Parameters for the memory set"] - #[doc = " \\param ctx - Context on which to run the node"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuMemsetD2D32,"] - #[doc = " ::cuGraphMemsetNodeGetParams,"] - #[doc = " ::cuGraphMemsetNodeSetParams,"] - #[doc = " ::cuGraphCreate,"] - #[doc = " ::cuGraphDestroyNode,"] - #[doc = " ::cuGraphAddChildGraphNode,"] - #[doc = " ::cuGraphAddEmptyNode,"] - #[doc = " ::cuGraphAddKernelNode,"] - #[doc = " ::cuGraphAddHostNode,"] - #[doc = " ::cuGraphAddMemcpyNode"] pub fn cuGraphAddMemsetNode( phGraphNode: *mut CUgraphNode, hGraph: CUgraph, @@ -8781,93 +2357,18 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns a memset node's parameters"] - #[doc = ""] - #[doc = " Returns the parameters of memset node \\p hNode in \\p nodeParams."] - #[doc = ""] - #[doc = " \\param hNode - Node to get the parameters for"] - #[doc = " \\param nodeParams - Pointer to return the parameters"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuMemsetD2D32,"] - #[doc = " ::cuGraphAddMemsetNode,"] - #[doc = " ::cuGraphMemsetNodeSetParams"] pub fn cuGraphMemsetNodeGetParams( hNode: CUgraphNode, nodeParams: *mut CUDA_MEMSET_NODE_PARAMS, ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets a memset node's parameters"] - #[doc = ""] - #[doc = " Sets the parameters of memset node \\p hNode to \\p nodeParams."] - #[doc = ""] - #[doc = " \\param hNode - Node to set the parameters for"] - #[doc = " \\param nodeParams - Parameters to copy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuMemsetD2D32,"] - #[doc = " ::cuGraphAddMemsetNode,"] - #[doc = " ::cuGraphMemsetNodeGetParams"] pub fn cuGraphMemsetNodeSetParams( hNode: CUgraphNode, nodeParams: *const CUDA_MEMSET_NODE_PARAMS, ) -> CUresult; } extern "C" { - #[doc = " \\brief Creates a host execution node and adds it to a graph"] - #[doc = ""] - #[doc = " Creates a new CPU execution node and adds it to \\p hGraph with \\p numDependencies"] - #[doc = " dependencies specified via \\p dependencies and arguments specified in \\p nodeParams."] - #[doc = " It is possible for \\p numDependencies to be 0, in which case the node will be placed"] - #[doc = " at the root of the graph. \\p dependencies may not have any duplicate entries."] - #[doc = " A handle to the new node will be returned in \\p phGraphNode."] - #[doc = ""] - #[doc = " When the graph is launched, the node will invoke the specified CPU function."] - #[doc = " Host nodes are not supported under MPS with pre-Volta GPUs."] - #[doc = ""] - #[doc = " \\param phGraphNode - Returns newly created node"] - #[doc = " \\param hGraph - Graph to which to add the node"] - #[doc = " \\param dependencies - Dependencies of the node"] - #[doc = " \\param numDependencies - Number of dependencies"] - #[doc = " \\param nodeParams - Parameters for the host node"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_SUPPORTED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuLaunchHostFunc,"] - #[doc = " ::cuGraphHostNodeGetParams,"] - #[doc = " ::cuGraphHostNodeSetParams,"] - #[doc = " ::cuGraphCreate,"] - #[doc = " ::cuGraphDestroyNode,"] - #[doc = " ::cuGraphAddChildGraphNode,"] - #[doc = " ::cuGraphAddEmptyNode,"] - #[doc = " ::cuGraphAddKernelNode,"] - #[doc = " ::cuGraphAddMemcpyNode,"] - #[doc = " ::cuGraphAddMemsetNode"] pub fn cuGraphAddHostNode( phGraphNode: *mut CUgraphNode, hGraph: CUgraph, @@ -8877,90 +2378,18 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns a host node's parameters"] - #[doc = ""] - #[doc = " Returns the parameters of host node \\p hNode in \\p nodeParams."] - #[doc = ""] - #[doc = " \\param hNode - Node to get the parameters for"] - #[doc = " \\param nodeParams - Pointer to return the parameters"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuLaunchHostFunc,"] - #[doc = " ::cuGraphAddHostNode,"] - #[doc = " ::cuGraphHostNodeSetParams"] pub fn cuGraphHostNodeGetParams( hNode: CUgraphNode, nodeParams: *mut CUDA_HOST_NODE_PARAMS, ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets a host node's parameters"] - #[doc = ""] - #[doc = " Sets the parameters of host node \\p hNode to \\p nodeParams."] - #[doc = ""] - #[doc = " \\param hNode - Node to set the parameters for"] - #[doc = " \\param nodeParams - Parameters to copy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuLaunchHostFunc,"] - #[doc = " ::cuGraphAddHostNode,"] - #[doc = " ::cuGraphHostNodeGetParams"] pub fn cuGraphHostNodeSetParams( hNode: CUgraphNode, nodeParams: *const CUDA_HOST_NODE_PARAMS, ) -> CUresult; } extern "C" { - #[doc = " \\brief Creates a child graph node and adds it to a graph"] - #[doc = ""] - #[doc = " Creates a new node which executes an embedded graph, and adds it to \\p hGraph with"] - #[doc = " \\p numDependencies dependencies specified via \\p dependencies."] - #[doc = " It is possible for \\p numDependencies to be 0, in which case the node will be placed"] - #[doc = " at the root of the graph. \\p dependencies may not have any duplicate entries."] - #[doc = " A handle to the new node will be returned in \\p phGraphNode."] - #[doc = ""] - #[doc = " The node executes an embedded child graph. The child graph is cloned in this call."] - #[doc = ""] - #[doc = " \\param phGraphNode - Returns newly created node"] - #[doc = " \\param hGraph - Graph to which to add the node"] - #[doc = " \\param dependencies - Dependencies of the node"] - #[doc = " \\param numDependencies - Number of dependencies"] - #[doc = " \\param childGraph - The graph to clone into this node"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphChildGraphNodeGetGraph,"] - #[doc = " ::cuGraphCreate,"] - #[doc = " ::cuGraphDestroyNode,"] - #[doc = " ::cuGraphAddEmptyNode,"] - #[doc = " ::cuGraphAddKernelNode,"] - #[doc = " ::cuGraphAddHostNode,"] - #[doc = " ::cuGraphAddMemcpyNode,"] - #[doc = " ::cuGraphAddMemsetNode,"] - #[doc = " ::cuGraphClone"] pub fn cuGraphAddChildGraphNode( phGraphNode: *mut CUgraphNode, hGraph: CUgraph, @@ -8970,66 +2399,12 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Gets a handle to the embedded graph of a child graph node"] - #[doc = ""] - #[doc = " Gets a handle to the embedded graph in a child graph node. This call"] - #[doc = " does not clone the graph. Changes to the graph will be reflected in"] - #[doc = " the node, and the node retains ownership of the graph."] - #[doc = ""] - #[doc = " \\param hNode - Node to get the embedded graph for"] - #[doc = " \\param phGraph - Location to store a handle to the graph"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphAddChildGraphNode,"] - #[doc = " ::cuGraphNodeFindInClone"] pub fn cuGraphChildGraphNodeGetGraph( hNode: CUgraphNode, phGraph: *mut CUgraph, ) -> CUresult; } extern "C" { - #[doc = " \\brief Creates an empty node and adds it to a graph"] - #[doc = ""] - #[doc = " Creates a new node which performs no operation, and adds it to \\p hGraph with"] - #[doc = " \\p numDependencies dependencies specified via \\p dependencies."] - #[doc = " It is possible for \\p numDependencies to be 0, in which case the node will be placed"] - #[doc = " at the root of the graph. \\p dependencies may not have any duplicate entries."] - #[doc = " A handle to the new node will be returned in \\p phGraphNode."] - #[doc = ""] - #[doc = " An empty node performs no operation during execution, but can be used for"] - #[doc = " transitive ordering. For example, a phased execution graph with 2 groups of n"] - #[doc = " nodes with a barrier between them can be represented using an empty node and"] - #[doc = " 2*n dependency edges, rather than no empty node and n^2 dependency edges."] - #[doc = ""] - #[doc = " \\param phGraphNode - Returns newly created node"] - #[doc = " \\param hGraph - Graph to which to add the node"] - #[doc = " \\param dependencies - Dependencies of the node"] - #[doc = " \\param numDependencies - Number of dependencies"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphCreate,"] - #[doc = " ::cuGraphDestroyNode,"] - #[doc = " ::cuGraphAddChildGraphNode,"] - #[doc = " ::cuGraphAddKernelNode,"] - #[doc = " ::cuGraphAddHostNode,"] - #[doc = " ::cuGraphAddMemcpyNode,"] - #[doc = " ::cuGraphAddMemsetNode"] pub fn cuGraphAddEmptyNode( phGraphNode: *mut CUgraphNode, hGraph: CUgraph, @@ -9038,55 +2413,12 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Clones a graph"] - #[doc = ""] - #[doc = " This function creates a copy of \\p originalGraph and returns it in \\p * phGraphClone."] - #[doc = " All parameters are copied into the cloned graph. The original graph may be modified"] - #[doc = " after this call without affecting the clone."] - #[doc = ""] - #[doc = " Child graph nodes in the original graph are recursively copied into the clone."] - #[doc = ""] - #[doc = " \\param phGraphClone - Returns newly created cloned graph"] - #[doc = " \\param originalGraph - Graph to clone"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_OUT_OF_MEMORY"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphCreate,"] - #[doc = " ::cuGraphNodeFindInClone"] pub fn cuGraphClone( phGraphClone: *mut CUgraph, originalGraph: CUgraph, ) -> CUresult; } extern "C" { - #[doc = " \\brief Finds a cloned version of a node"] - #[doc = ""] - #[doc = " This function returns the node in \\p hClonedGraph corresponding to \\p hOriginalNode"] - #[doc = " in the original graph."] - #[doc = ""] - #[doc = " \\p hClonedGraph must have been cloned from \\p hOriginalGraph via ::cuGraphClone."] - #[doc = " \\p hOriginalNode must have been in \\p hOriginalGraph at the time of the call to"] - #[doc = " ::cuGraphClone, and the corresponding cloned node in \\p hClonedGraph must not have"] - #[doc = " been removed. The cloned node is then returned via \\p phClonedNode."] - #[doc = ""] - #[doc = " \\param phNode - Returns handle to the cloned node"] - #[doc = " \\param hOriginalNode - Handle to the original node"] - #[doc = " \\param hClonedGraph - Cloned graph to query"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphClone"] pub fn cuGraphNodeFindInClone( phNode: *mut CUgraphNode, hOriginalNode: CUgraphNode, @@ -9094,66 +2426,12 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns a node's type"] - #[doc = ""] - #[doc = " Returns the node type of \\p hNode in \\p type."] - #[doc = ""] - #[doc = " \\param hNode - Node to query"] - #[doc = " \\param type - Pointer to return the node type"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphGetNodes,"] - #[doc = " ::cuGraphGetRootNodes,"] - #[doc = " ::cuGraphChildGraphNodeGetGraph,"] - #[doc = " ::cuGraphKernelNodeGetParams,"] - #[doc = " ::cuGraphKernelNodeSetParams,"] - #[doc = " ::cuGraphHostNodeGetParams,"] - #[doc = " ::cuGraphHostNodeSetParams,"] - #[doc = " ::cuGraphMemcpyNodeGetParams,"] - #[doc = " ::cuGraphMemcpyNodeSetParams,"] - #[doc = " ::cuGraphMemsetNodeGetParams,"] - #[doc = " ::cuGraphMemsetNodeSetParams"] pub fn cuGraphNodeGetType( hNode: CUgraphNode, type_: *mut CUgraphNodeType, ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns a graph's nodes"] - #[doc = ""] - #[doc = " Returns a list of \\p hGraph's nodes. \\p nodes may be NULL, in which case this"] - #[doc = " function will return the number of nodes in \\p numNodes. Otherwise,"] - #[doc = " \\p numNodes entries will be filled in. If \\p numNodes is higher than the actual"] - #[doc = " number of nodes, the remaining entries in \\p nodes will be set to NULL, and the"] - #[doc = " number of nodes actually obtained will be returned in \\p numNodes."] - #[doc = ""] - #[doc = " \\param hGraph - Graph to query"] - #[doc = " \\param nodes - Pointer to return the nodes"] - #[doc = " \\param numNodes - See description"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphCreate,"] - #[doc = " ::cuGraphGetRootNodes,"] - #[doc = " ::cuGraphGetEdges,"] - #[doc = " ::cuGraphNodeGetType,"] - #[doc = " ::cuGraphNodeGetDependencies,"] - #[doc = " ::cuGraphNodeGetDependentNodes"] pub fn cuGraphGetNodes( hGraph: CUgraph, nodes: *mut CUgraphNode, @@ -9161,33 +2439,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns a graph's root nodes"] - #[doc = ""] - #[doc = " Returns a list of \\p hGraph's root nodes. \\p rootNodes may be NULL, in which case this"] - #[doc = " function will return the number of root nodes in \\p numRootNodes. Otherwise,"] - #[doc = " \\p numRootNodes entries will be filled in. If \\p numRootNodes is higher than the actual"] - #[doc = " number of root nodes, the remaining entries in \\p rootNodes will be set to NULL, and the"] - #[doc = " number of nodes actually obtained will be returned in \\p numRootNodes."] - #[doc = ""] - #[doc = " \\param hGraph - Graph to query"] - #[doc = " \\param rootNodes - Pointer to return the root nodes"] - #[doc = " \\param numRootNodes - See description"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphCreate,"] - #[doc = " ::cuGraphGetNodes,"] - #[doc = " ::cuGraphGetEdges,"] - #[doc = " ::cuGraphNodeGetType,"] - #[doc = " ::cuGraphNodeGetDependencies,"] - #[doc = " ::cuGraphNodeGetDependentNodes"] pub fn cuGraphGetRootNodes( hGraph: CUgraph, rootNodes: *mut CUgraphNode, @@ -9195,36 +2446,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns a graph's dependency edges"] - #[doc = ""] - #[doc = " Returns a list of \\p hGraph's dependency edges. Edges are returned via corresponding"] - #[doc = " indices in \\p from and \\p to; that is, the node in \\p to[i] has a dependency on the"] - #[doc = " node in \\p from[i]. \\p from and \\p to may both be NULL, in which"] - #[doc = " case this function only returns the number of edges in \\p numEdges. Otherwise,"] - #[doc = " \\p numEdges entries will be filled in. If \\p numEdges is higher than the actual"] - #[doc = " number of edges, the remaining entries in \\p from and \\p to will be set to NULL, and"] - #[doc = " the number of edges actually returned will be written to \\p numEdges."] - #[doc = ""] - #[doc = " \\param hGraph - Graph to get the edges from"] - #[doc = " \\param from - Location to return edge endpoints"] - #[doc = " \\param to - Location to return edge endpoints"] - #[doc = " \\param numEdges - See description"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphGetNodes,"] - #[doc = " ::cuGraphGetRootNodes,"] - #[doc = " ::cuGraphAddDependencies,"] - #[doc = " ::cuGraphRemoveDependencies,"] - #[doc = " ::cuGraphNodeGetDependencies,"] - #[doc = " ::cuGraphNodeGetDependentNodes"] pub fn cuGraphGetEdges( hGraph: CUgraph, from: *mut CUgraphNode, @@ -9233,33 +2454,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns a node's dependencies"] - #[doc = ""] - #[doc = " Returns a list of \\p node's dependencies. \\p dependencies may be NULL, in which case this"] - #[doc = " function will return the number of dependencies in \\p numDependencies. Otherwise,"] - #[doc = " \\p numDependencies entries will be filled in. If \\p numDependencies is higher than the actual"] - #[doc = " number of dependencies, the remaining entries in \\p dependencies will be set to NULL, and the"] - #[doc = " number of nodes actually obtained will be returned in \\p numDependencies."] - #[doc = ""] - #[doc = " \\param hNode - Node to query"] - #[doc = " \\param dependencies - Pointer to return the dependencies"] - #[doc = " \\param numDependencies - See description"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphNodeGetDependentNodes,"] - #[doc = " ::cuGraphGetNodes,"] - #[doc = " ::cuGraphGetRootNodes,"] - #[doc = " ::cuGraphGetEdges,"] - #[doc = " ::cuGraphAddDependencies,"] - #[doc = " ::cuGraphRemoveDependencies"] pub fn cuGraphNodeGetDependencies( hNode: CUgraphNode, dependencies: *mut CUgraphNode, @@ -9267,34 +2461,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns a node's dependent nodes"] - #[doc = ""] - #[doc = " Returns a list of \\p node's dependent nodes. \\p dependentNodes may be NULL, in which"] - #[doc = " case this function will return the number of dependent nodes in \\p numDependentNodes."] - #[doc = " Otherwise, \\p numDependentNodes entries will be filled in. If \\p numDependentNodes is"] - #[doc = " higher than the actual number of dependent nodes, the remaining entries in"] - #[doc = " \\p dependentNodes will be set to NULL, and the number of nodes actually obtained will"] - #[doc = " be returned in \\p numDependentNodes."] - #[doc = ""] - #[doc = " \\param hNode - Node to query"] - #[doc = " \\param dependentNodes - Pointer to return the dependent nodes"] - #[doc = " \\param numDependentNodes - See description"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphNodeGetDependencies,"] - #[doc = " ::cuGraphGetNodes,"] - #[doc = " ::cuGraphGetRootNodes,"] - #[doc = " ::cuGraphGetEdges,"] - #[doc = " ::cuGraphAddDependencies,"] - #[doc = " ::cuGraphRemoveDependencies"] pub fn cuGraphNodeGetDependentNodes( hNode: CUgraphNode, dependentNodes: *mut CUgraphNode, @@ -9302,31 +2468,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Adds dependency edges to a graph"] - #[doc = ""] - #[doc = " The number of dependencies to be added is defined by \\p numDependencies"] - #[doc = " Elements in \\p from and \\p to at corresponding indices define a dependency."] - #[doc = " Each node in \\p from and \\p to must belong to \\p hGraph."] - #[doc = ""] - #[doc = " If \\p numDependencies is 0, elements in \\p from and \\p to will be ignored."] - #[doc = " Specifying an existing dependency will return an error."] - #[doc = ""] - #[doc = " \\param hGraph - Graph to which dependencies are added"] - #[doc = " \\param from - Array of nodes that provide the dependencies"] - #[doc = " \\param to - Array of dependent nodes"] - #[doc = " \\param numDependencies - Number of dependencies to be added"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphRemoveDependencies,"] - #[doc = " ::cuGraphGetEdges,"] - #[doc = " ::cuGraphNodeGetDependencies,"] - #[doc = " ::cuGraphNodeGetDependentNodes"] pub fn cuGraphAddDependencies( hGraph: CUgraph, from: *const CUgraphNode, @@ -9335,31 +2476,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Removes dependency edges from a graph"] - #[doc = ""] - #[doc = " The number of \\p dependencies to be removed is defined by \\p numDependencies."] - #[doc = " Elements in \\p from and \\p to at corresponding indices define a dependency."] - #[doc = " Each node in \\p from and \\p to must belong to \\p hGraph."] - #[doc = ""] - #[doc = " If \\p numDependencies is 0, elements in \\p from and \\p to will be ignored."] - #[doc = " Specifying a non-existing dependency will return an error."] - #[doc = ""] - #[doc = " \\param hGraph - Graph from which to remove dependencies"] - #[doc = " \\param from - Array of nodes that provide the dependencies"] - #[doc = " \\param to - Array of dependent nodes"] - #[doc = " \\param numDependencies - Number of dependencies to be removed"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphAddDependencies,"] - #[doc = " ::cuGraphGetEdges,"] - #[doc = " ::cuGraphNodeGetDependencies,"] - #[doc = " ::cuGraphNodeGetDependentNodes"] pub fn cuGraphRemoveDependencies( hGraph: CUgraph, from: *const CUgraphNode, @@ -9368,61 +2484,9 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Remove a node from the graph"] - #[doc = ""] - #[doc = " Removes \\p hNode from its graph. This operation also severs any dependencies of other nodes"] - #[doc = " on \\p hNode and vice versa."] - #[doc = ""] - #[doc = " \\param hNode - Node to remove"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphAddChildGraphNode,"] - #[doc = " ::cuGraphAddEmptyNode,"] - #[doc = " ::cuGraphAddKernelNode,"] - #[doc = " ::cuGraphAddHostNode,"] - #[doc = " ::cuGraphAddMemcpyNode,"] - #[doc = " ::cuGraphAddMemsetNode"] pub fn cuGraphDestroyNode(hNode: CUgraphNode) -> CUresult; } extern "C" { - #[doc = " \\brief Creates an executable graph from a graph"] - #[doc = ""] - #[doc = " Instantiates \\p hGraph as an executable graph. The graph is validated for any"] - #[doc = " structural constraints or intra-node constraints which were not previously"] - #[doc = " validated. If instantiation is successful, a handle to the instantiated graph"] - #[doc = " is returned in \\p graphExec."] - #[doc = ""] - #[doc = " If there are any errors, diagnostic information may be returned in \\p errorNode and"] - #[doc = " \\p logBuffer. This is the primary way to inspect instantiation errors. The output"] - #[doc = " will be null terminated unless the diagnostics overflow"] - #[doc = " the buffer. In this case, they will be truncated, and the last byte can be"] - #[doc = " inspected to determine if truncation occurred."] - #[doc = ""] - #[doc = " \\param phGraphExec - Returns instantiated graph"] - #[doc = " \\param hGraph - Graph to instantiate"] - #[doc = " \\param phErrorNode - In case of an instantiation error, this may be modified to"] - #[doc = " indicate a node contributing to the error"] - #[doc = " \\param logBuffer - A character buffer to store diagnostic messages"] - #[doc = " \\param bufferSize - Size of the log buffer in bytes"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphCreate,"] - #[doc = " ::cuGraphLaunch,"] - #[doc = " ::cuGraphExecDestroy"] pub fn cuGraphInstantiate( phGraphExec: *mut CUgraphExec, hGraph: CUgraph, @@ -9432,34 +2496,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets the parameters for a kernel node in the given graphExec"] - #[doc = ""] - #[doc = " Sets the parameters of a kernel node in an executable graph \\p hGraphExec."] - #[doc = " The node is identified by the corresponding node \\p hNode in the"] - #[doc = " non-executable graph, from which the executable graph was instantiated."] - #[doc = ""] - #[doc = " \\p hNode must not have been removed from the original graph. The \\p func field"] - #[doc = " of \\p nodeParams cannot be modified and must match the original value."] - #[doc = " All other values can be modified."] - #[doc = ""] - #[doc = " The modifications take effect at the next launch of \\p hGraphExec. Already"] - #[doc = " enqueued or running launches of \\p hGraphExec are not affected by this call."] - #[doc = " \\p hNode is also not modified by this call."] - #[doc = ""] - #[doc = " \\param hGraphExec - The executable graph in which to set the specified node"] - #[doc = " \\param hNode - kernel node from the graph from which graphExec was instantiated"] - #[doc = " \\param nodeParams - Updated Parameters to set"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphAddKernelNode,"] - #[doc = " ::cuGraphKernelNodeSetParams,"] - #[doc = " ::cuGraphInstantiate"] pub fn cuGraphExecKernelNodeSetParams( hGraphExec: CUgraphExec, hNode: CUgraphNode, @@ -9467,96 +2503,18 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Launches an executable graph in a stream"] - #[doc = ""] - #[doc = " Executes \\p hGraphExec in \\p hStream. Only one instance of \\p hGraphExec may be executing"] - #[doc = " at a time. Each launch is ordered behind both any previous work in \\p hStream"] - #[doc = " and any previous launches of \\p hGraphExec. To execute a graph concurrently, it must be"] - #[doc = " instantiated multiple times into multiple executable graphs."] - #[doc = ""] - #[doc = " \\param hGraphExec - Executable graph to launch"] - #[doc = " \\param hStream - Stream in which to launch the graph"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphInstantiate,"] - #[doc = " ::cuGraphExecDestroy"] pub fn cuGraphLaunch( hGraphExec: CUgraphExec, hStream: CUstream, ) -> CUresult; } extern "C" { - #[doc = " \\brief Destroys an executable graph"] - #[doc = ""] - #[doc = " Destroys the executable graph specified by \\p hGraphExec, as well"] - #[doc = " as all of its executable nodes. If the executable graph is"] - #[doc = " in-flight, it will not be terminated, but rather freed"] - #[doc = " asynchronously on completion."] - #[doc = ""] - #[doc = " \\param hGraphExec - Executable graph to destroy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphInstantiate,"] - #[doc = " ::cuGraphLaunch"] pub fn cuGraphExecDestroy(hGraphExec: CUgraphExec) -> CUresult; } extern "C" { - #[doc = " \\brief Destroys a graph"] - #[doc = ""] - #[doc = " Destroys the graph specified by \\p hGraph, as well as all of its nodes."] - #[doc = ""] - #[doc = " \\param hGraph - Graph to destroy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphCreate"] pub fn cuGraphDestroy(hGraph: CUgraph) -> CUresult; } extern "C" { - #[doc = " \\brief Returns occupancy of a function"] - #[doc = ""] - #[doc = " Returns in \\p *numBlocks the number of the maximum active blocks per"] - #[doc = " streaming multiprocessor."] - #[doc = ""] - #[doc = " \\param numBlocks - Returned occupancy"] - #[doc = " \\param func - Kernel for which occupancy is calculated"] - #[doc = " \\param blockSize - Block size the kernel is intended to be launched with"] - #[doc = " \\param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_UNKNOWN"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaOccupancyMaxActiveBlocksPerMultiprocessor"] pub fn cuOccupancyMaxActiveBlocksPerMultiprocessor( numBlocks: *mut ::std::os::raw::c_int, func: CUfunction, @@ -9565,44 +2523,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns occupancy of a function"] - #[doc = ""] - #[doc = " Returns in \\p *numBlocks the number of the maximum active blocks per"] - #[doc = " streaming multiprocessor."] - #[doc = ""] - #[doc = " The \\p Flags parameter controls how special cases are handled. The"] - #[doc = " valid flags are:"] - #[doc = ""] - #[doc = " - ::CU_OCCUPANCY_DEFAULT, which maintains the default behavior as"] - #[doc = " ::cuOccupancyMaxActiveBlocksPerMultiprocessor;"] - #[doc = ""] - #[doc = " - ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE, which suppresses the"] - #[doc = " default behavior on platform where global caching affects"] - #[doc = " occupancy. On such platforms, if caching is enabled, but"] - #[doc = " per-block SM resource usage would result in zero occupancy, the"] - #[doc = " occupancy calculator will calculate the occupancy as if caching"] - #[doc = " is disabled. Setting ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE makes"] - #[doc = " the occupancy calculator to return 0 in such cases. More information"] - #[doc = " can be found about this feature in the \"Unified L1/Texture Cache\""] - #[doc = " section of the Maxwell tuning guide."] - #[doc = ""] - #[doc = " \\param numBlocks - Returned occupancy"] - #[doc = " \\param func - Kernel for which occupancy is calculated"] - #[doc = " \\param blockSize - Block size the kernel is intended to be launched with"] - #[doc = " \\param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes"] - #[doc = " \\param flags - Requested behavior for the occupancy calculator"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_UNKNOWN"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags"] pub fn cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags( numBlocks: *mut ::std::os::raw::c_int, func: CUfunction, @@ -9612,54 +2532,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Suggest a launch configuration with reasonable occupancy"] - #[doc = ""] - #[doc = " Returns in \\p *blockSize a reasonable block size that can achieve"] - #[doc = " the maximum occupancy (or, the maximum number of active warps with"] - #[doc = " the fewest blocks per multiprocessor), and in \\p *minGridSize the"] - #[doc = " minimum grid size to achieve the maximum occupancy."] - #[doc = ""] - #[doc = " If \\p blockSizeLimit is 0, the configurator will use the maximum"] - #[doc = " block size permitted by the device / function instead."] - #[doc = ""] - #[doc = " If per-block dynamic shared memory allocation is not needed, the"] - #[doc = " user should leave both \\p blockSizeToDynamicSMemSize and \\p"] - #[doc = " dynamicSMemSize as 0."] - #[doc = ""] - #[doc = " If per-block dynamic shared memory allocation is needed, then if"] - #[doc = " the dynamic shared memory size is constant regardless of block"] - #[doc = " size, the size should be passed through \\p dynamicSMemSize, and \\p"] - #[doc = " blockSizeToDynamicSMemSize should be NULL."] - #[doc = ""] - #[doc = " Otherwise, if the per-block dynamic shared memory size varies with"] - #[doc = " different block sizes, the user needs to provide a unary function"] - #[doc = " through \\p blockSizeToDynamicSMemSize that computes the dynamic"] - #[doc = " shared memory needed by \\p func for any given block size. \\p"] - #[doc = " dynamicSMemSize is ignored. An example signature is:"] - #[doc = ""] - #[doc = " \\code"] - #[doc = " // Take block size, returns dynamic shared memory needed"] - #[doc = " size_t blockToSmem(int blockSize);"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " \\param minGridSize - Returned minimum grid size needed to achieve the maximum occupancy"] - #[doc = " \\param blockSize - Returned maximum block size that can achieve the maximum occupancy"] - #[doc = " \\param func - Kernel for which launch configuration is calculated"] - #[doc = " \\param blockSizeToDynamicSMemSize - A function that calculates how much per-block dynamic shared memory \\p func uses based on the block size"] - #[doc = " \\param dynamicSMemSize - Dynamic shared memory usage intended, in bytes"] - #[doc = " \\param blockSizeLimit - The maximum block size \\p func is designed to handle"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_UNKNOWN"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaOccupancyMaxPotentialBlockSize"] pub fn cuOccupancyMaxPotentialBlockSize( minGridSize: *mut ::std::os::raw::c_int, blockSize: *mut ::std::os::raw::c_int, @@ -9670,48 +2542,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Suggest a launch configuration with reasonable occupancy"] - #[doc = ""] - #[doc = " An extended version of ::cuOccupancyMaxPotentialBlockSize. In"] - #[doc = " addition to arguments passed to ::cuOccupancyMaxPotentialBlockSize,"] - #[doc = " ::cuOccupancyMaxPotentialBlockSizeWithFlags also takes a \\p Flags"] - #[doc = " parameter."] - #[doc = ""] - #[doc = " The \\p Flags parameter controls how special cases are handled. The"] - #[doc = " valid flags are:"] - #[doc = ""] - #[doc = " - ::CU_OCCUPANCY_DEFAULT, which maintains the default behavior as"] - #[doc = " ::cuOccupancyMaxPotentialBlockSize;"] - #[doc = ""] - #[doc = " - ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE, which suppresses the"] - #[doc = " default behavior on platform where global caching affects"] - #[doc = " occupancy. On such platforms, the launch configurations that"] - #[doc = " produces maximal occupancy might not support global"] - #[doc = " caching. Setting ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE"] - #[doc = " guarantees that the the produced launch configuration is global"] - #[doc = " caching compatible at a potential cost of occupancy. More information"] - #[doc = " can be found about this feature in the \"Unified L1/Texture Cache\""] - #[doc = " section of the Maxwell tuning guide."] - #[doc = ""] - #[doc = " \\param minGridSize - Returned minimum grid size needed to achieve the maximum occupancy"] - #[doc = " \\param blockSize - Returned maximum block size that can achieve the maximum occupancy"] - #[doc = " \\param func - Kernel for which launch configuration is calculated"] - #[doc = " \\param blockSizeToDynamicSMemSize - A function that calculates how much per-block dynamic shared memory \\p func uses based on the block size"] - #[doc = " \\param dynamicSMemSize - Dynamic shared memory usage intended, in bytes"] - #[doc = " \\param blockSizeLimit - The maximum block size \\p func is designed to handle"] - #[doc = " \\param flags - Options"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_UNKNOWN"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaOccupancyMaxPotentialBlockSizeWithFlags"] pub fn cuOccupancyMaxPotentialBlockSizeWithFlags( minGridSize: *mut ::std::os::raw::c_int, blockSize: *mut ::std::os::raw::c_int, @@ -9723,33 +2553,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Binds an array as a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Binds the CUDA array \\p hArray to the texture reference \\p hTexRef. Any"] - #[doc = " previous address or CUDA array state associated with the texture reference"] - #[doc = " is superseded by this function. \\p Flags must be set to"] - #[doc = " ::CU_TRSA_OVERRIDE_FORMAT. Any CUDA array previously bound to \\p hTexRef is"] - #[doc = " unbound."] - #[doc = ""] - #[doc = " \\param hTexRef - Texture reference to bind"] - #[doc = " \\param hArray - Array to bind"] - #[doc = " \\param Flags - Options (must be ::CU_TRSA_OVERRIDE_FORMAT)"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddress,"] - #[doc = " ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode,"] - #[doc = " ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,"] - #[doc = " ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,"] - #[doc = " ::cudaBindTextureToArray"] pub fn cuTexRefSetArray( hTexRef: CUtexref, hArray: CUarray, @@ -9757,32 +2560,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Binds a mipmapped array to a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Binds the CUDA mipmapped array \\p hMipmappedArray to the texture reference \\p hTexRef."] - #[doc = " Any previous address or CUDA array state associated with the texture reference"] - #[doc = " is superseded by this function. \\p Flags must be set to ::CU_TRSA_OVERRIDE_FORMAT."] - #[doc = " Any CUDA array previously bound to \\p hTexRef is unbound."] - #[doc = ""] - #[doc = " \\param hTexRef - Texture reference to bind"] - #[doc = " \\param hMipmappedArray - Mipmapped array to bind"] - #[doc = " \\param Flags - Options (must be ::CU_TRSA_OVERRIDE_FORMAT)"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddress,"] - #[doc = " ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode,"] - #[doc = " ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,"] - #[doc = " ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,"] - #[doc = " ::cudaBindTextureToMipmappedArray"] pub fn cuTexRefSetMipmappedArray( hTexRef: CUtexref, hMipmappedArray: CUmipmappedArray, @@ -9806,37 +2583,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets the format for a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Specifies the format of the data to be read by the texture reference"] - #[doc = " \\p hTexRef. \\p fmt and \\p NumPackedComponents are exactly analogous to the"] - #[doc = " ::Format and ::NumChannels members of the ::CUDA_ARRAY_DESCRIPTOR structure:"] - #[doc = " They specify the format of each component and the number of components per"] - #[doc = " array element."] - #[doc = ""] - #[doc = " \\param hTexRef - Texture reference"] - #[doc = " \\param fmt - Format to set"] - #[doc = " \\param NumPackedComponents - Number of components per array element"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddress,"] - #[doc = " ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,"] - #[doc = " ::cuTexRefSetFilterMode, ::cuTexRefSetFlags,"] - #[doc = " ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,"] - #[doc = " ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,"] - #[doc = " ::cudaCreateChannelDesc,"] - #[doc = " ::cudaBindTexture,"] - #[doc = " ::cudaBindTexture2D,"] - #[doc = " ::cudaBindTextureToArray,"] - #[doc = " ::cudaBindTextureToMipmappedArray"] pub fn cuTexRefSetFormat( hTexRef: CUtexref, fmt: CUarray_format, @@ -9844,48 +2590,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets the addressing mode for a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Specifies the addressing mode \\p am for the given dimension \\p dim of the"] - #[doc = " texture reference \\p hTexRef. If \\p dim is zero, the addressing mode is"] - #[doc = " applied to the first parameter of the functions used to fetch from the"] - #[doc = " texture; if \\p dim is 1, the second, and so on. ::CUaddress_mode is defined"] - #[doc = " as:"] - #[doc = " \\code"] - #[doc = "typedef enum CUaddress_mode_enum {"] - #[doc = "CU_TR_ADDRESS_MODE_WRAP = 0,"] - #[doc = "CU_TR_ADDRESS_MODE_CLAMP = 1,"] - #[doc = "CU_TR_ADDRESS_MODE_MIRROR = 2,"] - #[doc = "CU_TR_ADDRESS_MODE_BORDER = 3"] - #[doc = "} CUaddress_mode;"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " Note that this call has no effect if \\p hTexRef is bound to linear memory."] - #[doc = " Also, if the flag, ::CU_TRSF_NORMALIZED_COORDINATES, is not set, the only"] - #[doc = " supported address mode is ::CU_TR_ADDRESS_MODE_CLAMP."] - #[doc = ""] - #[doc = " \\param hTexRef - Texture reference"] - #[doc = " \\param dim - Dimension"] - #[doc = " \\param am - Addressing mode to set"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddress,"] - #[doc = " ::cuTexRefSetAddress2D, ::cuTexRefSetArray,"] - #[doc = " ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,"] - #[doc = " ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,"] - #[doc = " ::cudaBindTexture,"] - #[doc = " ::cudaBindTexture2D,"] - #[doc = " ::cudaBindTextureToArray,"] - #[doc = " ::cudaBindTextureToMipmappedArray"] pub fn cuTexRefSetAddressMode( hTexRef: CUtexref, dim: ::std::os::raw::c_int, @@ -9893,138 +2597,22 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets the filtering mode for a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Specifies the filtering mode \\p fm to be used when reading memory through"] - #[doc = " the texture reference \\p hTexRef. ::CUfilter_mode_enum is defined as:"] - #[doc = ""] - #[doc = " \\code"] - #[doc = "typedef enum CUfilter_mode_enum {"] - #[doc = "CU_TR_FILTER_MODE_POINT = 0,"] - #[doc = "CU_TR_FILTER_MODE_LINEAR = 1"] - #[doc = "} CUfilter_mode;"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " Note that this call has no effect if \\p hTexRef is bound to linear memory."] - #[doc = ""] - #[doc = " \\param hTexRef - Texture reference"] - #[doc = " \\param fm - Filtering mode to set"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddress,"] - #[doc = " ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,"] - #[doc = " ::cuTexRefSetFlags, ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,"] - #[doc = " ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,"] - #[doc = " ::cudaBindTextureToArray"] pub fn cuTexRefSetFilterMode( hTexRef: CUtexref, fm: CUfilter_mode, ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets the mipmap filtering mode for a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Specifies the mipmap filtering mode \\p fm to be used when reading memory through"] - #[doc = " the texture reference \\p hTexRef. ::CUfilter_mode_enum is defined as:"] - #[doc = ""] - #[doc = " \\code"] - #[doc = "typedef enum CUfilter_mode_enum {"] - #[doc = "CU_TR_FILTER_MODE_POINT = 0,"] - #[doc = "CU_TR_FILTER_MODE_LINEAR = 1"] - #[doc = "} CUfilter_mode;"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " Note that this call has no effect if \\p hTexRef is not bound to a mipmapped array."] - #[doc = ""] - #[doc = " \\param hTexRef - Texture reference"] - #[doc = " \\param fm - Filtering mode to set"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddress,"] - #[doc = " ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,"] - #[doc = " ::cuTexRefSetFlags, ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,"] - #[doc = " ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,"] - #[doc = " ::cudaBindTextureToMipmappedArray"] pub fn cuTexRefSetMipmapFilterMode( hTexRef: CUtexref, fm: CUfilter_mode, ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets the mipmap level bias for a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Specifies the mipmap level bias \\p bias to be added to the specified mipmap level when"] - #[doc = " reading memory through the texture reference \\p hTexRef."] - #[doc = ""] - #[doc = " Note that this call has no effect if \\p hTexRef is not bound to a mipmapped array."] - #[doc = ""] - #[doc = " \\param hTexRef - Texture reference"] - #[doc = " \\param bias - Mipmap level bias"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddress,"] - #[doc = " ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,"] - #[doc = " ::cuTexRefSetFlags, ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,"] - #[doc = " ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,"] - #[doc = " ::cudaBindTextureToMipmappedArray"] pub fn cuTexRefSetMipmapLevelBias(hTexRef: CUtexref, bias: f32) -> CUresult; } extern "C" { - #[doc = " \\brief Sets the mipmap min/max mipmap level clamps for a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Specifies the min/max mipmap level clamps, \\p minMipmapLevelClamp and \\p maxMipmapLevelClamp"] - #[doc = " respectively, to be used when reading memory through the texture reference"] - #[doc = " \\p hTexRef."] - #[doc = ""] - #[doc = " Note that this call has no effect if \\p hTexRef is not bound to a mipmapped array."] - #[doc = ""] - #[doc = " \\param hTexRef - Texture reference"] - #[doc = " \\param minMipmapLevelClamp - Mipmap min level clamp"] - #[doc = " \\param maxMipmapLevelClamp - Mipmap max level clamp"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddress,"] - #[doc = " ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,"] - #[doc = " ::cuTexRefSetFlags, ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,"] - #[doc = " ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,"] - #[doc = " ::cudaBindTextureToMipmappedArray"] pub fn cuTexRefSetMipmapLevelClamp( hTexRef: CUtexref, minMipmapLevelClamp: f32, @@ -10032,113 +2620,18 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets the maximum anisotropy for a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Specifies the maximum anisotropy \\p maxAniso to be used when reading memory through"] - #[doc = " the texture reference \\p hTexRef."] - #[doc = ""] - #[doc = " Note that this call has no effect if \\p hTexRef is bound to linear memory."] - #[doc = ""] - #[doc = " \\param hTexRef - Texture reference"] - #[doc = " \\param maxAniso - Maximum anisotropy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddress,"] - #[doc = " ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,"] - #[doc = " ::cuTexRefSetFlags, ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,"] - #[doc = " ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,"] - #[doc = " ::cudaBindTextureToArray,"] - #[doc = " ::cudaBindTextureToMipmappedArray"] pub fn cuTexRefSetMaxAnisotropy( hTexRef: CUtexref, maxAniso: ::std::os::raw::c_uint, ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets the border color for a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Specifies the value of the RGBA color via the \\p pBorderColor to the texture reference"] - #[doc = " \\p hTexRef. The color value supports only float type and holds color components in"] - #[doc = " the following sequence:"] - #[doc = " pBorderColor[0] holds 'R' component"] - #[doc = " pBorderColor[1] holds 'G' component"] - #[doc = " pBorderColor[2] holds 'B' component"] - #[doc = " pBorderColor[3] holds 'A' component"] - #[doc = ""] - #[doc = " Note that the color values can be set only when the Address mode is set to"] - #[doc = " CU_TR_ADDRESS_MODE_BORDER using ::cuTexRefSetAddressMode."] - #[doc = " Applications using integer border color values have to \"reinterpret_cast\" their values to float."] - #[doc = ""] - #[doc = " \\param hTexRef - Texture reference"] - #[doc = " \\param pBorderColor - RGBA color"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddressMode,"] - #[doc = " ::cuTexRefGetAddressMode, ::cuTexRefGetBorderColor,"] - #[doc = " ::cudaBindTexture,"] - #[doc = " ::cudaBindTexture2D,"] - #[doc = " ::cudaBindTextureToArray,"] - #[doc = " ::cudaBindTextureToMipmappedArray"] pub fn cuTexRefSetBorderColor( hTexRef: CUtexref, pBorderColor: *mut f32, ) -> CUresult; } extern "C" { - #[doc = " \\brief Sets the flags for a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Specifies optional flags via \\p Flags to specify the behavior of data"] - #[doc = " returned through the texture reference \\p hTexRef. The valid flags are:"] - #[doc = ""] - #[doc = " - ::CU_TRSF_READ_AS_INTEGER, which suppresses the default behavior of"] - #[doc = " having the texture promote integer data to floating point data in the"] - #[doc = " range [0, 1]. Note that texture with 32-bit integer format"] - #[doc = " would not be promoted, regardless of whether or not this"] - #[doc = " flag is specified;"] - #[doc = " - ::CU_TRSF_NORMALIZED_COORDINATES, which suppresses the"] - #[doc = " default behavior of having the texture coordinates range"] - #[doc = " from [0, Dim) where Dim is the width or height of the CUDA"] - #[doc = " array. Instead, the texture coordinates [0, 1.0) reference"] - #[doc = " the entire breadth of the array dimension;"] - #[doc = ""] - #[doc = " \\param hTexRef - Texture reference"] - #[doc = " \\param Flags - Optional flags to set"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddress,"] - #[doc = " ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,"] - #[doc = " ::cuTexRefSetFilterMode, ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,"] - #[doc = " ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat,"] - #[doc = " ::cudaBindTexture,"] - #[doc = " ::cudaBindTexture2D,"] - #[doc = " ::cudaBindTextureToArray,"] - #[doc = " ::cudaBindTextureToMipmappedArray"] pub fn cuTexRefSetFlags( hTexRef: CUtexref, Flags: ::std::os::raw::c_uint, @@ -10151,88 +2644,18 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Gets the array bound to a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Returns in \\p *phArray the CUDA array bound to the texture reference"] - #[doc = " \\p hTexRef, or returns ::CUDA_ERROR_INVALID_VALUE if the texture reference"] - #[doc = " is not bound to any CUDA array."] - #[doc = ""] - #[doc = " \\param phArray - Returned array"] - #[doc = " \\param hTexRef - Texture reference"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddress,"] - #[doc = " ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,"] - #[doc = " ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefGetAddress, ::cuTexRefGetAddressMode,"] - #[doc = " ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat"] pub fn cuTexRefGetArray( phArray: *mut CUarray, hTexRef: CUtexref, ) -> CUresult; } extern "C" { - #[doc = " \\brief Gets the mipmapped array bound to a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Returns in \\p *phMipmappedArray the CUDA mipmapped array bound to the texture"] - #[doc = " reference \\p hTexRef, or returns ::CUDA_ERROR_INVALID_VALUE if the texture reference"] - #[doc = " is not bound to any CUDA mipmapped array."] - #[doc = ""] - #[doc = " \\param phMipmappedArray - Returned mipmapped array"] - #[doc = " \\param hTexRef - Texture reference"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddress,"] - #[doc = " ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,"] - #[doc = " ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefGetAddress, ::cuTexRefGetAddressMode,"] - #[doc = " ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat"] pub fn cuTexRefGetMipmappedArray( phMipmappedArray: *mut CUmipmappedArray, hTexRef: CUtexref, ) -> CUresult; } extern "C" { - #[doc = " \\brief Gets the addressing mode used by a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Returns in \\p *pam the addressing mode corresponding to the"] - #[doc = " dimension \\p dim of the texture reference \\p hTexRef. Currently, the only"] - #[doc = " valid value for \\p dim are 0 and 1."] - #[doc = ""] - #[doc = " \\param pam - Returned addressing mode"] - #[doc = " \\param hTexRef - Texture reference"] - #[doc = " \\param dim - Dimension"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddress,"] - #[doc = " ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,"] - #[doc = " ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefGetAddress, ::cuTexRefGetArray,"] - #[doc = " ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat"] pub fn cuTexRefGetAddressMode( pam: *mut CUaddress_mode, hTexRef: CUtexref, @@ -10240,58 +2663,12 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Gets the filter-mode used by a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Returns in \\p *pfm the filtering mode of the texture reference"] - #[doc = " \\p hTexRef."] - #[doc = ""] - #[doc = " \\param pfm - Returned filtering mode"] - #[doc = " \\param hTexRef - Texture reference"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddress,"] - #[doc = " ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,"] - #[doc = " ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,"] - #[doc = " ::cuTexRefGetFlags, ::cuTexRefGetFormat"] pub fn cuTexRefGetFilterMode( pfm: *mut CUfilter_mode, hTexRef: CUtexref, ) -> CUresult; } extern "C" { - #[doc = " \\brief Gets the format used by a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Returns in \\p *pFormat and \\p *pNumChannels the format and number"] - #[doc = " of components of the CUDA array bound to the texture reference \\p hTexRef."] - #[doc = " If \\p pFormat or \\p pNumChannels is NULL, it will be ignored."] - #[doc = ""] - #[doc = " \\param pFormat - Returned format"] - #[doc = " \\param pNumChannels - Returned number of components"] - #[doc = " \\param hTexRef - Texture reference"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddress,"] - #[doc = " ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,"] - #[doc = " ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,"] - #[doc = " ::cuTexRefGetFilterMode, ::cuTexRefGetFlags"] pub fn cuTexRefGetFormat( pFormat: *mut CUarray_format, pNumChannels: *mut ::std::os::raw::c_int, @@ -10299,85 +2676,18 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Gets the mipmap filtering mode for a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Returns the mipmap filtering mode in \\p pfm that's used when reading memory through"] - #[doc = " the texture reference \\p hTexRef."] - #[doc = ""] - #[doc = " \\param pfm - Returned mipmap filtering mode"] - #[doc = " \\param hTexRef - Texture reference"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddress,"] - #[doc = " ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,"] - #[doc = " ::cuTexRefSetFlags, ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,"] - #[doc = " ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat"] pub fn cuTexRefGetMipmapFilterMode( pfm: *mut CUfilter_mode, hTexRef: CUtexref, ) -> CUresult; } extern "C" { - #[doc = " \\brief Gets the mipmap level bias for a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Returns the mipmap level bias in \\p pBias that's added to the specified mipmap"] - #[doc = " level when reading memory through the texture reference \\p hTexRef."] - #[doc = ""] - #[doc = " \\param pbias - Returned mipmap level bias"] - #[doc = " \\param hTexRef - Texture reference"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddress,"] - #[doc = " ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,"] - #[doc = " ::cuTexRefSetFlags, ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,"] - #[doc = " ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat"] pub fn cuTexRefGetMipmapLevelBias( pbias: *mut f32, hTexRef: CUtexref, ) -> CUresult; } extern "C" { - #[doc = " \\brief Gets the min/max mipmap level clamps for a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Returns the min/max mipmap level clamps in \\p pminMipmapLevelClamp and \\p pmaxMipmapLevelClamp"] - #[doc = " that's used when reading memory through the texture reference \\p hTexRef."] - #[doc = ""] - #[doc = " \\param pminMipmapLevelClamp - Returned mipmap min level clamp"] - #[doc = " \\param pmaxMipmapLevelClamp - Returned mipmap max level clamp"] - #[doc = " \\param hTexRef - Texture reference"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddress,"] - #[doc = " ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,"] - #[doc = " ::cuTexRefSetFlags, ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,"] - #[doc = " ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat"] pub fn cuTexRefGetMipmapLevelClamp( pminMipmapLevelClamp: *mut f32, pmaxMipmapLevelClamp: *mut f32, @@ -10385,160 +2695,30 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Gets the maximum anisotropy for a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Returns the maximum anisotropy in \\p pmaxAniso that's used when reading memory through"] - #[doc = " the texture reference \\p hTexRef."] - #[doc = ""] - #[doc = " \\param pmaxAniso - Returned maximum anisotropy"] - #[doc = " \\param hTexRef - Texture reference"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddress,"] - #[doc = " ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,"] - #[doc = " ::cuTexRefSetFlags, ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,"] - #[doc = " ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat"] pub fn cuTexRefGetMaxAnisotropy( pmaxAniso: *mut ::std::os::raw::c_int, hTexRef: CUtexref, ) -> CUresult; } extern "C" { - #[doc = " \\brief Gets the border color used by a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Returns in \\p pBorderColor, values of the RGBA color used by"] - #[doc = " the texture reference \\p hTexRef."] - #[doc = " The color value is of type float and holds color components in"] - #[doc = " the following sequence:"] - #[doc = " pBorderColor[0] holds 'R' component"] - #[doc = " pBorderColor[1] holds 'G' component"] - #[doc = " pBorderColor[2] holds 'B' component"] - #[doc = " pBorderColor[3] holds 'A' component"] - #[doc = ""] - #[doc = " \\param hTexRef - Texture reference"] - #[doc = " \\param pBorderColor - Returned Type and Value of RGBA color"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddressMode,"] - #[doc = " ::cuTexRefSetAddressMode, ::cuTexRefSetBorderColor"] pub fn cuTexRefGetBorderColor( pBorderColor: *mut f32, hTexRef: CUtexref, ) -> CUresult; } extern "C" { - #[doc = " \\brief Gets the flags used by a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Returns in \\p *pFlags the flags of the texture reference \\p hTexRef."] - #[doc = ""] - #[doc = " \\param pFlags - Returned flags"] - #[doc = " \\param hTexRef - Texture reference"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefSetAddress,"] - #[doc = " ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray,"] - #[doc = " ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray,"] - #[doc = " ::cuTexRefGetFilterMode, ::cuTexRefGetFormat"] pub fn cuTexRefGetFlags( pFlags: *mut ::std::os::raw::c_uint, hTexRef: CUtexref, ) -> CUresult; } extern "C" { - #[doc = " \\brief Creates a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Creates a texture reference and returns its handle in \\p *pTexRef. Once"] - #[doc = " created, the application must call ::cuTexRefSetArray() or"] - #[doc = " ::cuTexRefSetAddress() to associate the reference with allocated memory."] - #[doc = " Other texture reference functions are used to specify the format and"] - #[doc = " interpretation (addressing, filtering, etc.) to be used when the memory is"] - #[doc = " read through this texture reference."] - #[doc = ""] - #[doc = " \\param pTexRef - Returned texture reference"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefDestroy"] pub fn cuTexRefCreate(pTexRef: *mut CUtexref) -> CUresult; } extern "C" { - #[doc = " \\brief Destroys a texture reference"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Destroys the texture reference specified by \\p hTexRef."] - #[doc = ""] - #[doc = " \\param hTexRef - Texture reference to destroy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuTexRefCreate"] pub fn cuTexRefDestroy(hTexRef: CUtexref) -> CUresult; } extern "C" { - #[doc = " \\brief Sets the CUDA array for a surface reference."] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Sets the CUDA array \\p hArray to be read and written by the surface reference"] - #[doc = " \\p hSurfRef. Any previous CUDA array state associated with the surface"] - #[doc = " reference is superseded by this function. \\p Flags must be set to 0."] - #[doc = " The ::CUDA_ARRAY3D_SURFACE_LDST flag must have been set for the CUDA array."] - #[doc = " Any CUDA array previously bound to \\p hSurfRef is unbound."] - #[doc = ""] - #[doc = " \\param hSurfRef - Surface reference handle"] - #[doc = " \\param hArray - CUDA array handle"] - #[doc = " \\param Flags - set to 0"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuModuleGetSurfRef,"] - #[doc = " ::cuSurfRefGetArray,"] - #[doc = " ::cudaBindSurfaceToArray"] pub fn cuSurfRefSetArray( hSurfRef: CUsurfref, hArray: CUarray, @@ -10546,235 +2726,12 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Passes back the CUDA array bound to a surface reference."] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Returns in \\p *phArray the CUDA array bound to the surface reference"] - #[doc = " \\p hSurfRef, or returns ::CUDA_ERROR_INVALID_VALUE if the surface reference"] - #[doc = " is not bound to any CUDA array."] - #[doc = ""] - #[doc = " \\param phArray - Surface reference handle"] - #[doc = " \\param hSurfRef - Surface reference handle"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa ::cuModuleGetSurfRef, ::cuSurfRefSetArray"] pub fn cuSurfRefGetArray( phArray: *mut CUarray, hSurfRef: CUsurfref, ) -> CUresult; } extern "C" { - #[doc = " \\brief Creates a texture object"] - #[doc = ""] - #[doc = " Creates a texture object and returns it in \\p pTexObject. \\p pResDesc describes"] - #[doc = " the data to texture from. \\p pTexDesc describes how the data should be sampled."] - #[doc = " \\p pResViewDesc is an optional argument that specifies an alternate format for"] - #[doc = " the data described by \\p pResDesc, and also describes the subresource region"] - #[doc = " to restrict access to when texturing. \\p pResViewDesc can only be specified if"] - #[doc = " the type of resource is a CUDA array or a CUDA mipmapped array."] - #[doc = ""] - #[doc = " Texture objects are only supported on devices of compute capability 3.0 or higher."] - #[doc = " Additionally, a texture object is an opaque value, and, as such, should only be"] - #[doc = " accessed through CUDA API calls."] - #[doc = ""] - #[doc = " The ::CUDA_RESOURCE_DESC structure is defined as:"] - #[doc = " \\code"] - #[doc = "typedef struct CUDA_RESOURCE_DESC_st"] - #[doc = "{"] - #[doc = "CUresourcetype resType;"] - #[doc = ""] - #[doc = "union {"] - #[doc = "struct {"] - #[doc = "CUarray hArray;"] - #[doc = "} array;"] - #[doc = "struct {"] - #[doc = "CUmipmappedArray hMipmappedArray;"] - #[doc = "} mipmap;"] - #[doc = "struct {"] - #[doc = "CUdeviceptr devPtr;"] - #[doc = "CUarray_format format;"] - #[doc = "unsigned int numChannels;"] - #[doc = "size_t sizeInBytes;"] - #[doc = "} linear;"] - #[doc = "struct {"] - #[doc = "CUdeviceptr devPtr;"] - #[doc = "CUarray_format format;"] - #[doc = "unsigned int numChannels;"] - #[doc = "size_t width;"] - #[doc = "size_t height;"] - #[doc = "size_t pitchInBytes;"] - #[doc = "} pitch2D;"] - #[doc = "} res;"] - #[doc = ""] - #[doc = "unsigned int flags;"] - #[doc = "} CUDA_RESOURCE_DESC;"] - #[doc = ""] - #[doc = " \\endcode"] - #[doc = " where:"] - #[doc = " - ::CUDA_RESOURCE_DESC::resType specifies the type of resource to texture from."] - #[doc = " CUresourceType is defined as:"] - #[doc = " \\code"] - #[doc = "typedef enum CUresourcetype_enum {"] - #[doc = "CU_RESOURCE_TYPE_ARRAY = 0x00,"] - #[doc = "CU_RESOURCE_TYPE_MIPMAPPED_ARRAY = 0x01,"] - #[doc = "CU_RESOURCE_TYPE_LINEAR = 0x02,"] - #[doc = "CU_RESOURCE_TYPE_PITCH2D = 0x03"] - #[doc = "} CUresourcetype;"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " \\par"] - #[doc = " If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_ARRAY, ::CUDA_RESOURCE_DESC::res::array::hArray"] - #[doc = " must be set to a valid CUDA array handle."] - #[doc = ""] - #[doc = " \\par"] - #[doc = " If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_MIPMAPPED_ARRAY, ::CUDA_RESOURCE_DESC::res::mipmap::hMipmappedArray"] - #[doc = " must be set to a valid CUDA mipmapped array handle."] - #[doc = ""] - #[doc = " \\par"] - #[doc = " If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_LINEAR, ::CUDA_RESOURCE_DESC::res::linear::devPtr"] - #[doc = " must be set to a valid device pointer, that is aligned to ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT."] - #[doc = " ::CUDA_RESOURCE_DESC::res::linear::format and ::CUDA_RESOURCE_DESC::res::linear::numChannels"] - #[doc = " describe the format of each component and the number of components per array element. ::CUDA_RESOURCE_DESC::res::linear::sizeInBytes"] - #[doc = " specifies the size of the array in bytes. The total number of elements in the linear address range cannot exceed"] - #[doc = " ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH. The number of elements is computed as (sizeInBytes / (sizeof(format) * numChannels))."] - #[doc = ""] - #[doc = " \\par"] - #[doc = " If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_PITCH2D, ::CUDA_RESOURCE_DESC::res::pitch2D::devPtr"] - #[doc = " must be set to a valid device pointer, that is aligned to ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT."] - #[doc = " ::CUDA_RESOURCE_DESC::res::pitch2D::format and ::CUDA_RESOURCE_DESC::res::pitch2D::numChannels"] - #[doc = " describe the format of each component and the number of components per array element. ::CUDA_RESOURCE_DESC::res::pitch2D::width"] - #[doc = " and ::CUDA_RESOURCE_DESC::res::pitch2D::height specify the width and height of the array in elements, and cannot exceed"] - #[doc = " ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH and ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT respectively."] - #[doc = " ::CUDA_RESOURCE_DESC::res::pitch2D::pitchInBytes specifies the pitch between two rows in bytes and has to be aligned to"] - #[doc = " ::CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT. Pitch cannot exceed ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH."] - #[doc = ""] - #[doc = " - ::flags must be set to zero."] - #[doc = ""] - #[doc = ""] - #[doc = " The ::CUDA_TEXTURE_DESC struct is defined as"] - #[doc = " \\code"] - #[doc = "typedef struct CUDA_TEXTURE_DESC_st {"] - #[doc = "CUaddress_mode addressMode[3];"] - #[doc = "CUfilter_mode filterMode;"] - #[doc = "unsigned int flags;"] - #[doc = "unsigned int maxAnisotropy;"] - #[doc = "CUfilter_mode mipmapFilterMode;"] - #[doc = "float mipmapLevelBias;"] - #[doc = "float minMipmapLevelClamp;"] - #[doc = "float maxMipmapLevelClamp;"] - #[doc = "} CUDA_TEXTURE_DESC;"] - #[doc = " \\endcode"] - #[doc = " where"] - #[doc = " - ::CUDA_TEXTURE_DESC::addressMode specifies the addressing mode for each dimension of the texture data. ::CUaddress_mode is defined as:"] - #[doc = " \\code"] - #[doc = "typedef enum CUaddress_mode_enum {"] - #[doc = "CU_TR_ADDRESS_MODE_WRAP = 0,"] - #[doc = "CU_TR_ADDRESS_MODE_CLAMP = 1,"] - #[doc = "CU_TR_ADDRESS_MODE_MIRROR = 2,"] - #[doc = "CU_TR_ADDRESS_MODE_BORDER = 3"] - #[doc = "} CUaddress_mode;"] - #[doc = " \\endcode"] - #[doc = " This is ignored if ::CUDA_RESOURCE_DESC::resType is ::CU_RESOURCE_TYPE_LINEAR. Also, if the flag, ::CU_TRSF_NORMALIZED_COORDINATES"] - #[doc = " is not set, the only supported address mode is ::CU_TR_ADDRESS_MODE_CLAMP."] - #[doc = ""] - #[doc = " - ::CUDA_TEXTURE_DESC::filterMode specifies the filtering mode to be used when fetching from the texture. CUfilter_mode is defined as:"] - #[doc = " \\code"] - #[doc = "typedef enum CUfilter_mode_enum {"] - #[doc = "CU_TR_FILTER_MODE_POINT = 0,"] - #[doc = "CU_TR_FILTER_MODE_LINEAR = 1"] - #[doc = "} CUfilter_mode;"] - #[doc = " \\endcode"] - #[doc = " This is ignored if ::CUDA_RESOURCE_DESC::resType is ::CU_RESOURCE_TYPE_LINEAR."] - #[doc = ""] - #[doc = " - ::CUDA_TEXTURE_DESC::flags can be any combination of the following:"] - #[doc = " - ::CU_TRSF_READ_AS_INTEGER, which suppresses the default behavior of having the texture promote integer data to floating point data in the"] - #[doc = " range [0, 1]. Note that texture with 32-bit integer format would not be promoted, regardless of whether or not this flag is specified."] - #[doc = " - ::CU_TRSF_NORMALIZED_COORDINATES, which suppresses the default behavior of having the texture coordinates range from [0, Dim) where Dim is"] - #[doc = " the width or height of the CUDA array. Instead, the texture coordinates [0, 1.0) reference the entire breadth of the array dimension; Note"] - #[doc = " that for CUDA mipmapped arrays, this flag has to be set."] - #[doc = ""] - #[doc = " - ::CUDA_TEXTURE_DESC::maxAnisotropy specifies the maximum anisotropy ratio to be used when doing anisotropic filtering. This value will be"] - #[doc = " clamped to the range [1,16]."] - #[doc = ""] - #[doc = " - ::CUDA_TEXTURE_DESC::mipmapFilterMode specifies the filter mode when the calculated mipmap level lies between two defined mipmap levels."] - #[doc = ""] - #[doc = " - ::CUDA_TEXTURE_DESC::mipmapLevelBias specifies the offset to be applied to the calculated mipmap level."] - #[doc = ""] - #[doc = " - ::CUDA_TEXTURE_DESC::minMipmapLevelClamp specifies the lower end of the mipmap level range to clamp access to."] - #[doc = ""] - #[doc = " - ::CUDA_TEXTURE_DESC::maxMipmapLevelClamp specifies the upper end of the mipmap level range to clamp access to."] - #[doc = ""] - #[doc = ""] - #[doc = " The ::CUDA_RESOURCE_VIEW_DESC struct is defined as"] - #[doc = " \\code"] - #[doc = "typedef struct CUDA_RESOURCE_VIEW_DESC_st"] - #[doc = "{"] - #[doc = "CUresourceViewFormat format;"] - #[doc = "size_t width;"] - #[doc = "size_t height;"] - #[doc = "size_t depth;"] - #[doc = "unsigned int firstMipmapLevel;"] - #[doc = "unsigned int lastMipmapLevel;"] - #[doc = "unsigned int firstLayer;"] - #[doc = "unsigned int lastLayer;"] - #[doc = "} CUDA_RESOURCE_VIEW_DESC;"] - #[doc = " \\endcode"] - #[doc = " where:"] - #[doc = " - ::CUDA_RESOURCE_VIEW_DESC::format specifies how the data contained in the CUDA array or CUDA mipmapped array should"] - #[doc = " be interpreted. Note that this can incur a change in size of the texture data. If the resource view format is a block"] - #[doc = " compressed format, then the underlying CUDA array or CUDA mipmapped array has to have a base of format ::CU_AD_FORMAT_UNSIGNED_INT32."] - #[doc = " with 2 or 4 channels, depending on the block compressed format. For ex., BC1 and BC4 require the underlying CUDA array to have"] - #[doc = " a format of ::CU_AD_FORMAT_UNSIGNED_INT32 with 2 channels. The other BC formats require the underlying resource to have the same base"] - #[doc = " format but with 4 channels."] - #[doc = ""] - #[doc = " - ::CUDA_RESOURCE_VIEW_DESC::width specifies the new width of the texture data. If the resource view format is a block"] - #[doc = " compressed format, this value has to be 4 times the original width of the resource. For non block compressed formats,"] - #[doc = " this value has to be equal to that of the original resource."] - #[doc = ""] - #[doc = " - ::CUDA_RESOURCE_VIEW_DESC::height specifies the new height of the texture data. If the resource view format is a block"] - #[doc = " compressed format, this value has to be 4 times the original height of the resource. For non block compressed formats,"] - #[doc = " this value has to be equal to that of the original resource."] - #[doc = ""] - #[doc = " - ::CUDA_RESOURCE_VIEW_DESC::depth specifies the new depth of the texture data. This value has to be equal to that of the"] - #[doc = " original resource."] - #[doc = ""] - #[doc = " - ::CUDA_RESOURCE_VIEW_DESC::firstMipmapLevel specifies the most detailed mipmap level. This will be the new mipmap level zero."] - #[doc = " For non-mipmapped resources, this value has to be zero.::CUDA_TEXTURE_DESC::minMipmapLevelClamp and ::CUDA_TEXTURE_DESC::maxMipmapLevelClamp"] - #[doc = " will be relative to this value. For ex., if the firstMipmapLevel is set to 2, and a minMipmapLevelClamp of 1.2 is specified,"] - #[doc = " then the actual minimum mipmap level clamp will be 3.2."] - #[doc = ""] - #[doc = " - ::CUDA_RESOURCE_VIEW_DESC::lastMipmapLevel specifies the least detailed mipmap level. For non-mipmapped resources, this value"] - #[doc = " has to be zero."] - #[doc = ""] - #[doc = " - ::CUDA_RESOURCE_VIEW_DESC::firstLayer specifies the first layer index for layered textures. This will be the new layer zero."] - #[doc = " For non-layered resources, this value has to be zero."] - #[doc = ""] - #[doc = " - ::CUDA_RESOURCE_VIEW_DESC::lastLayer specifies the last layer index for layered textures. For non-layered resources,"] - #[doc = " this value has to be zero."] - #[doc = ""] - #[doc = ""] - #[doc = " \\param pTexObject - Texture object to create"] - #[doc = " \\param pResDesc - Resource descriptor"] - #[doc = " \\param pTexDesc - Texture descriptor"] - #[doc = " \\param pResViewDesc - Resource view descriptor"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuTexObjectDestroy,"] - #[doc = " ::cudaCreateTextureObject"] pub fn cuTexObjectCreate( pTexObject: *mut CUtexObject, pResDesc: *const CUDA_RESOURCE_DESC, @@ -10783,191 +2740,42 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Destroys a texture object"] - #[doc = ""] - #[doc = " Destroys the texture object specified by \\p texObject."] - #[doc = ""] - #[doc = " \\param texObject - Texture object to destroy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuTexObjectCreate,"] - #[doc = " ::cudaDestroyTextureObject"] pub fn cuTexObjectDestroy(texObject: CUtexObject) -> CUresult; } extern "C" { - #[doc = " \\brief Returns a texture object's resource descriptor"] - #[doc = ""] - #[doc = " Returns the resource descriptor for the texture object specified by \\p texObject."] - #[doc = ""] - #[doc = " \\param pResDesc - Resource descriptor"] - #[doc = " \\param texObject - Texture object"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuTexObjectCreate,"] - #[doc = " ::cudaGetTextureObjectResourceDesc,"] pub fn cuTexObjectGetResourceDesc( pResDesc: *mut CUDA_RESOURCE_DESC, texObject: CUtexObject, ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns a texture object's texture descriptor"] - #[doc = ""] - #[doc = " Returns the texture descriptor for the texture object specified by \\p texObject."] - #[doc = ""] - #[doc = " \\param pTexDesc - Texture descriptor"] - #[doc = " \\param texObject - Texture object"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuTexObjectCreate,"] - #[doc = " ::cudaGetTextureObjectTextureDesc"] pub fn cuTexObjectGetTextureDesc( pTexDesc: *mut CUDA_TEXTURE_DESC, texObject: CUtexObject, ) -> CUresult; } extern "C" { - #[doc = " \\brief Returns a texture object's resource view descriptor"] - #[doc = ""] - #[doc = " Returns the resource view descriptor for the texture object specified by \\p texObject."] - #[doc = " If no resource view was set for \\p texObject, the ::CUDA_ERROR_INVALID_VALUE is returned."] - #[doc = ""] - #[doc = " \\param pResViewDesc - Resource view descriptor"] - #[doc = " \\param texObject - Texture object"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuTexObjectCreate,"] - #[doc = " ::cudaGetTextureObjectResourceViewDesc"] pub fn cuTexObjectGetResourceViewDesc( pResViewDesc: *mut CUDA_RESOURCE_VIEW_DESC, texObject: CUtexObject, ) -> CUresult; } extern "C" { - #[doc = " \\brief Creates a surface object"] - #[doc = ""] - #[doc = " Creates a surface object and returns it in \\p pSurfObject. \\p pResDesc describes"] - #[doc = " the data to perform surface load/stores on. ::CUDA_RESOURCE_DESC::resType must be"] - #[doc = " ::CU_RESOURCE_TYPE_ARRAY and ::CUDA_RESOURCE_DESC::res::array::hArray"] - #[doc = " must be set to a valid CUDA array handle. ::CUDA_RESOURCE_DESC::flags must be set to zero."] - #[doc = ""] - #[doc = " Surface objects are only supported on devices of compute capability 3.0 or higher."] - #[doc = " Additionally, a surface object is an opaque value, and, as such, should only be"] - #[doc = " accessed through CUDA API calls."] - #[doc = ""] - #[doc = " \\param pSurfObject - Surface object to create"] - #[doc = " \\param pResDesc - Resource descriptor"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuSurfObjectDestroy,"] - #[doc = " ::cudaCreateSurfaceObject"] pub fn cuSurfObjectCreate( pSurfObject: *mut CUsurfObject, pResDesc: *const CUDA_RESOURCE_DESC, ) -> CUresult; } extern "C" { - #[doc = " \\brief Destroys a surface object"] - #[doc = ""] - #[doc = " Destroys the surface object specified by \\p surfObject."] - #[doc = ""] - #[doc = " \\param surfObject - Surface object to destroy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuSurfObjectCreate,"] - #[doc = " ::cudaDestroySurfaceObject"] pub fn cuSurfObjectDestroy(surfObject: CUsurfObject) -> CUresult; } extern "C" { - #[doc = " \\brief Returns a surface object's resource descriptor"] - #[doc = ""] - #[doc = " Returns the resource descriptor for the surface object specified by \\p surfObject."] - #[doc = ""] - #[doc = " \\param pResDesc - Resource descriptor"] - #[doc = " \\param surfObject - Surface object"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuSurfObjectCreate,"] - #[doc = " ::cudaGetSurfaceObjectResourceDesc"] pub fn cuSurfObjectGetResourceDesc( pResDesc: *mut CUDA_RESOURCE_DESC, surfObject: CUsurfObject, ) -> CUresult; } extern "C" { - #[doc = " \\brief Queries if a device may directly access a peer device's memory."] - #[doc = ""] - #[doc = " Returns in \\p *canAccessPeer a value of 1 if contexts on \\p dev are capable of"] - #[doc = " directly accessing memory from contexts on \\p peerDev and 0 otherwise."] - #[doc = " If direct access of \\p peerDev from \\p dev is possible, then access may be"] - #[doc = " enabled on two specific contexts by calling ::cuCtxEnablePeerAccess()."] - #[doc = ""] - #[doc = " \\param canAccessPeer - Returned access capability"] - #[doc = " \\param dev - Device from which allocations on \\p peerDev are to"] - #[doc = " be directly accessed."] - #[doc = " \\param peerDev - Device on which the allocations to be directly accessed"] - #[doc = " by \\p dev reside."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuCtxEnablePeerAccess,"] - #[doc = " ::cuCtxDisablePeerAccess,"] - #[doc = " ::cudaDeviceCanAccessPeer"] pub fn cuDeviceCanAccessPeer( canAccessPeer: *mut ::std::os::raw::c_int, dev: CUdevice, @@ -10975,121 +2783,15 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Enables direct access to memory allocations in a peer context."] - #[doc = ""] - #[doc = " If both the current context and \\p peerContext are on devices which support unified"] - #[doc = " addressing (as may be queried using ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING) and same"] - #[doc = " major compute capability, then on success all allocations from \\p peerContext will"] - #[doc = " immediately be accessible by the current context. See \\ref CUDA_UNIFIED for additional"] - #[doc = " details."] - #[doc = ""] - #[doc = " Note that access granted by this call is unidirectional and that in order to access"] - #[doc = " memory from the current context in \\p peerContext, a separate symmetric call"] - #[doc = " to ::cuCtxEnablePeerAccess() is required."] - #[doc = ""] - #[doc = " There is a system-wide maximum of eight peer connections per device."] - #[doc = ""] - #[doc = " Returns ::CUDA_ERROR_PEER_ACCESS_UNSUPPORTED if ::cuDeviceCanAccessPeer() indicates"] - #[doc = " that the ::CUdevice of the current context cannot directly access memory"] - #[doc = " from the ::CUdevice of \\p peerContext."] - #[doc = ""] - #[doc = " Returns ::CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED if direct access of"] - #[doc = " \\p peerContext from the current context has already been enabled."] - #[doc = ""] - #[doc = " Returns ::CUDA_ERROR_TOO_MANY_PEERS if direct peer access is not possible"] - #[doc = " because hardware resources required for peer access have been exhausted."] - #[doc = ""] - #[doc = " Returns ::CUDA_ERROR_INVALID_CONTEXT if there is no current context, \\p peerContext"] - #[doc = " is not a valid context, or if the current context is \\p peerContext."] - #[doc = ""] - #[doc = " Returns ::CUDA_ERROR_INVALID_VALUE if \\p Flags is not 0."] - #[doc = ""] - #[doc = " \\param peerContext - Peer context to enable direct access to from the current context"] - #[doc = " \\param Flags - Reserved for future use and must be set to 0"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED,"] - #[doc = " ::CUDA_ERROR_TOO_MANY_PEERS,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_PEER_ACCESS_UNSUPPORTED,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuDeviceCanAccessPeer,"] - #[doc = " ::cuCtxDisablePeerAccess,"] - #[doc = " ::cudaDeviceEnablePeerAccess"] pub fn cuCtxEnablePeerAccess( peerContext: CUcontext, Flags: ::std::os::raw::c_uint, ) -> CUresult; } extern "C" { - #[doc = " \\brief Disables direct access to memory allocations in a peer context and"] - #[doc = " unregisters any registered allocations."] - #[doc = ""] - #[doc = "Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has"] - #[doc = " not yet been enabled from \\p peerContext to the current context."] - #[doc = ""] - #[doc = " Returns ::CUDA_ERROR_INVALID_CONTEXT if there is no current context, or if"] - #[doc = " \\p peerContext is not a valid context."] - #[doc = ""] - #[doc = " \\param peerContext - Peer context to disable direct access to"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuDeviceCanAccessPeer,"] - #[doc = " ::cuCtxEnablePeerAccess,"] - #[doc = " ::cudaDeviceDisablePeerAccess"] pub fn cuCtxDisablePeerAccess(peerContext: CUcontext) -> CUresult; } extern "C" { - #[doc = " \\brief Queries attributes of the link between two devices."] - #[doc = ""] - #[doc = " Returns in \\p *value the value of the requested attribute \\p attrib of the"] - #[doc = " link between \\p srcDevice and \\p dstDevice. The supported attributes are:"] - #[doc = " - ::CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK: A relative value indicating the"] - #[doc = " performance of the link between two devices."] - #[doc = " - ::CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED P2P: 1 if P2P Access is enable."] - #[doc = " - ::CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED: 1 if Atomic operations over"] - #[doc = " the link are supported."] - #[doc = " - ::CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED: 1 if cudaArray can"] - #[doc = " be accessed over the link."] - #[doc = ""] - #[doc = " Returns ::CUDA_ERROR_INVALID_DEVICE if \\p srcDevice or \\p dstDevice are not valid"] - #[doc = " or if they represent the same device."] - #[doc = ""] - #[doc = " Returns ::CUDA_ERROR_INVALID_VALUE if \\p attrib is not valid or if \\p value is"] - #[doc = " a null pointer."] - #[doc = ""] - #[doc = " \\param value - Returned value of the requested attribute"] - #[doc = " \\param attrib - The requested attribute of the link between \\p srcDevice and \\p dstDevice."] - #[doc = " \\param srcDevice - The source device of the target link."] - #[doc = " \\param dstDevice - The destination device of the target link."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_DEVICE,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuCtxEnablePeerAccess,"] - #[doc = " ::cuCtxDisablePeerAccess,"] - #[doc = " ::cuDeviceCanAccessPeer,"] - #[doc = " ::cudaDeviceGetP2PAttribute"] pub fn cuDeviceGetP2PAttribute( value: *mut ::std::os::raw::c_int, attrib: CUdevice_P2PAttribute, @@ -11098,73 +2800,11 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Unregisters a graphics resource for access by CUDA"] - #[doc = ""] - #[doc = " Unregisters the graphics resource \\p resource so it is not accessible by"] - #[doc = " CUDA unless registered again."] - #[doc = ""] - #[doc = " If \\p resource is invalid then ::CUDA_ERROR_INVALID_HANDLE is"] - #[doc = " returned."] - #[doc = ""] - #[doc = " \\param resource - Resource to unregister"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_UNKNOWN"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphicsD3D9RegisterResource,"] - #[doc = " ::cuGraphicsD3D10RegisterResource,"] - #[doc = " ::cuGraphicsD3D11RegisterResource,"] - #[doc = " ::cuGraphicsGLRegisterBuffer,"] - #[doc = " ::cuGraphicsGLRegisterImage,"] - #[doc = " ::cudaGraphicsUnregisterResource"] pub fn cuGraphicsUnregisterResource( resource: CUgraphicsResource, ) -> CUresult; } extern "C" { - #[doc = " \\brief Get an array through which to access a subresource of a mapped graphics resource."] - #[doc = ""] - #[doc = " Returns in \\p *pArray an array through which the subresource of the mapped"] - #[doc = " graphics resource \\p resource which corresponds to array index \\p arrayIndex"] - #[doc = " and mipmap level \\p mipLevel may be accessed. The value set in \\p *pArray may"] - #[doc = " change every time that \\p resource is mapped."] - #[doc = ""] - #[doc = " If \\p resource is not a texture then it cannot be accessed via an array and"] - #[doc = " ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY is returned."] - #[doc = " If \\p arrayIndex is not a valid array index for \\p resource then"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE is returned."] - #[doc = " If \\p mipLevel is not a valid mipmap level for \\p resource then"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE is returned."] - #[doc = " If \\p resource is not mapped then ::CUDA_ERROR_NOT_MAPPED is returned."] - #[doc = ""] - #[doc = " \\param pArray - Returned array through which a subresource of \\p resource may be accessed"] - #[doc = " \\param resource - Mapped resource to access"] - #[doc = " \\param arrayIndex - Array index for array textures or cubemap face"] - #[doc = " index as defined by ::CUarray_cubemap_face for"] - #[doc = " cubemap textures for the subresource to access"] - #[doc = " \\param mipLevel - Mipmap level for the subresource to access"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_NOT_MAPPED,"] - #[doc = " ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphicsResourceGetMappedPointer,"] - #[doc = " ::cudaGraphicsSubResourceGetMappedArray"] pub fn cuGraphicsSubResourceGetMappedArray( pArray: *mut CUarray, resource: CUgraphicsResource, @@ -11173,33 +2813,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Get a mipmapped array through which to access a mapped graphics resource."] - #[doc = ""] - #[doc = " Returns in \\p *pMipmappedArray a mipmapped array through which the mapped graphics"] - #[doc = " resource \\p resource. The value set in \\p *pMipmappedArray may change every time"] - #[doc = " that \\p resource is mapped."] - #[doc = ""] - #[doc = " If \\p resource is not a texture then it cannot be accessed via a mipmapped array and"] - #[doc = " ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY is returned."] - #[doc = " If \\p resource is not mapped then ::CUDA_ERROR_NOT_MAPPED is returned."] - #[doc = ""] - #[doc = " \\param pMipmappedArray - Returned mipmapped array through which \\p resource may be accessed"] - #[doc = " \\param resource - Mapped resource to access"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_VALUE,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_NOT_MAPPED,"] - #[doc = " ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphicsResourceGetMappedPointer,"] - #[doc = " ::cudaGraphicsResourceGetMappedMipmappedArray"] pub fn cuGraphicsResourceGetMappedMipmappedArray( pMipmappedArray: *mut CUmipmappedArray, resource: CUgraphicsResource, @@ -11219,42 +2832,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Map graphics resources for access by CUDA"] - #[doc = ""] - #[doc = " Maps the \\p count graphics resources in \\p resources for access by CUDA."] - #[doc = ""] - #[doc = " The resources in \\p resources may be accessed by CUDA until they"] - #[doc = " are unmapped. The graphics API from which \\p resources were registered"] - #[doc = " should not access any resources while they are mapped by CUDA. If an"] - #[doc = " application does so, the results are undefined."] - #[doc = ""] - #[doc = " This function provides the synchronization guarantee that any graphics calls"] - #[doc = " issued before ::cuGraphicsMapResources() will complete before any subsequent CUDA"] - #[doc = " work issued in \\p stream begins."] - #[doc = ""] - #[doc = " If \\p resources includes any duplicate entries then ::CUDA_ERROR_INVALID_HANDLE is returned."] - #[doc = " If any of \\p resources are presently mapped for access by CUDA then ::CUDA_ERROR_ALREADY_MAPPED is returned."] - #[doc = ""] - #[doc = " \\param count - Number of resources to map"] - #[doc = " \\param resources - Resources to map for CUDA usage"] - #[doc = " \\param hStream - Stream with which to synchronize"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_ALREADY_MAPPED,"] - #[doc = " ::CUDA_ERROR_UNKNOWN"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphicsResourceGetMappedPointer,"] - #[doc = " ::cuGraphicsSubResourceGetMappedArray,"] - #[doc = " ::cuGraphicsUnmapResources,"] - #[doc = " ::cudaGraphicsMapResources"] pub fn cuGraphicsMapResources( count: ::std::os::raw::c_uint, resources: *mut CUgraphicsResource, @@ -11262,39 +2839,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " \\brief Unmap graphics resources."] - #[doc = ""] - #[doc = " Unmaps the \\p count graphics resources in \\p resources."] - #[doc = ""] - #[doc = " Once unmapped, the resources in \\p resources may not be accessed by CUDA"] - #[doc = " until they are mapped again."] - #[doc = ""] - #[doc = " This function provides the synchronization guarantee that any CUDA work issued"] - #[doc = " in \\p stream before ::cuGraphicsUnmapResources() will complete before any"] - #[doc = " subsequently issued graphics work begins."] - #[doc = ""] - #[doc = ""] - #[doc = " If \\p resources includes any duplicate entries then ::CUDA_ERROR_INVALID_HANDLE is returned."] - #[doc = " If any of \\p resources are not presently mapped for access by CUDA then ::CUDA_ERROR_NOT_MAPPED is returned."] - #[doc = ""] - #[doc = " \\param count - Number of resources to unmap"] - #[doc = " \\param resources - Resources to unmap"] - #[doc = " \\param hStream - Stream with which to synchronize"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::CUDA_SUCCESS,"] - #[doc = " ::CUDA_ERROR_DEINITIALIZED,"] - #[doc = " ::CUDA_ERROR_NOT_INITIALIZED,"] - #[doc = " ::CUDA_ERROR_INVALID_CONTEXT,"] - #[doc = " ::CUDA_ERROR_INVALID_HANDLE,"] - #[doc = " ::CUDA_ERROR_NOT_MAPPED,"] - #[doc = " ::CUDA_ERROR_UNKNOWN"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuGraphicsMapResources,"] - #[doc = " ::cudaGraphicsUnmapResources"] pub fn cuGraphicsUnmapResources( count: ::std::os::raw::c_uint, resources: *mut CUgraphicsResource, @@ -11302,7 +2846,6 @@ extern "C" { ) -> CUresult; } extern "C" { - #[doc = " @}"] pub fn cuGetExportTable( ppExportTable: *mut *const ::std::os::raw::c_void, pExportTableId: *const CUuuid, @@ -11312,13 +2855,7 @@ pub const cudaRoundMode_cudaRoundNearest: cudaRoundMode = 0; pub const cudaRoundMode_cudaRoundZero: cudaRoundMode = 1; pub const cudaRoundMode_cudaRoundPosInf: cudaRoundMode = 2; pub const cudaRoundMode_cudaRoundMinInf: cudaRoundMode = 3; -#[doc = " *"] -#[doc = " *"] -#[doc = " *"] pub type cudaRoundMode = u32; -#[doc = " *"] -#[doc = " *"] -#[doc = " *"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct dim3 { @@ -11327,479 +2864,132 @@ pub struct dim3 { pub z: ::std::os::raw::c_uint, } pub mod cudaError { - #[doc = " CUDA error types"] pub type Type = u32; - #[doc = " The API call returned with no errors. In the case of query calls, this"] - #[doc = " also means that the operation being queried is complete (see"] - #[doc = " ::cudaEventQuery() and ::cudaStreamQuery())."] pub const cudaSuccess: Type = 0; - #[doc = " This indicates that one or more of the parameters passed to the API call"] - #[doc = " is not within an acceptable range of values."] pub const cudaErrorInvalidValue: Type = 1; - #[doc = " The API call failed because it was unable to allocate enough memory to"] - #[doc = " perform the requested operation."] pub const cudaErrorMemoryAllocation: Type = 2; - #[doc = " The API call failed because the CUDA driver and runtime could not be"] - #[doc = " initialized."] pub const cudaErrorInitializationError: Type = 3; - #[doc = " This indicates that a CUDA Runtime API call cannot be executed because"] - #[doc = " it is being called during process shut down, at a point in time after"] - #[doc = " CUDA driver has been unloaded."] pub const cudaErrorCudartUnloading: Type = 4; - #[doc = " This indicates profiler is not initialized for this run. This can"] - #[doc = " happen when the application is running with external profiling tools"] - #[doc = " like visual profiler."] pub const cudaErrorProfilerDisabled: Type = 5; - #[doc = " \\deprecated"] - #[doc = " This error return is deprecated as of CUDA 5.0. It is no longer an error"] - #[doc = " to attempt to enable/disable the profiling via ::cudaProfilerStart or"] - #[doc = " ::cudaProfilerStop without initialization."] pub const cudaErrorProfilerNotInitialized: Type = 6; - #[doc = " \\deprecated"] - #[doc = " This error return is deprecated as of CUDA 5.0. It is no longer an error"] - #[doc = " to call cudaProfilerStart() when profiling is already enabled."] pub const cudaErrorProfilerAlreadyStarted: Type = 7; - #[doc = " \\deprecated"] - #[doc = " This error return is deprecated as of CUDA 5.0. It is no longer an error"] - #[doc = " to call cudaProfilerStop() when profiling is already disabled."] pub const cudaErrorProfilerAlreadyStopped: Type = 8; - #[doc = " This indicates that a kernel launch is requesting resources that can"] - #[doc = " never be satisfied by the current device. Requesting more shared memory"] - #[doc = " per block than the device supports will trigger this error, as will"] - #[doc = " requesting too many threads or blocks. See ::cudaDeviceProp for more"] - #[doc = " device limitations."] pub const cudaErrorInvalidConfiguration: Type = 9; - #[doc = " This indicates that one or more of the pitch-related parameters passed"] - #[doc = " to the API call is not within the acceptable range for pitch."] pub const cudaErrorInvalidPitchValue: Type = 12; - #[doc = " This indicates that the symbol name/identifier passed to the API call"] - #[doc = " is not a valid name or identifier."] pub const cudaErrorInvalidSymbol: Type = 13; - #[doc = " This indicates that at least one host pointer passed to the API call is"] - #[doc = " not a valid host pointer."] - #[doc = " \\deprecated"] - #[doc = " This error return is deprecated as of CUDA 10.1."] pub const cudaErrorInvalidHostPointer: Type = 16; - #[doc = " This indicates that at least one device pointer passed to the API call is"] - #[doc = " not a valid device pointer."] - #[doc = " \\deprecated"] - #[doc = " This error return is deprecated as of CUDA 10.1."] pub const cudaErrorInvalidDevicePointer: Type = 17; - #[doc = " This indicates that the texture passed to the API call is not a valid"] - #[doc = " texture."] pub const cudaErrorInvalidTexture: Type = 18; - #[doc = " This indicates that the texture binding is not valid. This occurs if you"] - #[doc = " call ::cudaGetTextureAlignmentOffset() with an unbound texture."] pub const cudaErrorInvalidTextureBinding: Type = 19; - #[doc = " This indicates that the channel descriptor passed to the API call is not"] - #[doc = " valid. This occurs if the format is not one of the formats specified by"] - #[doc = " ::cudaChannelFormatKind, or if one of the dimensions is invalid."] pub const cudaErrorInvalidChannelDescriptor: Type = 20; - #[doc = " This indicates that the direction of the memcpy passed to the API call is"] - #[doc = " not one of the types specified by ::cudaMemcpyKind."] pub const cudaErrorInvalidMemcpyDirection: Type = 21; - #[doc = " This indicated that the user has taken the address of a constant variable,"] - #[doc = " which was forbidden up until the CUDA 3.1 release."] - #[doc = " \\deprecated"] - #[doc = " This error return is deprecated as of CUDA 3.1. Variables in constant"] - #[doc = " memory may now have their address taken by the runtime via"] - #[doc = " ::cudaGetSymbolAddress()."] pub const cudaErrorAddressOfConstant: Type = 22; - #[doc = " This indicated that a texture fetch was not able to be performed."] - #[doc = " This was previously used for device emulation of texture operations."] - #[doc = " \\deprecated"] - #[doc = " This error return is deprecated as of CUDA 3.1. Device emulation mode was"] - #[doc = " removed with the CUDA 3.1 release."] pub const cudaErrorTextureFetchFailed: Type = 23; - #[doc = " This indicated that a texture was not bound for access."] - #[doc = " This was previously used for device emulation of texture operations."] - #[doc = " \\deprecated"] - #[doc = " This error return is deprecated as of CUDA 3.1. Device emulation mode was"] - #[doc = " removed with the CUDA 3.1 release."] pub const cudaErrorTextureNotBound: Type = 24; - #[doc = " This indicated that a synchronization operation had failed."] - #[doc = " This was previously used for some device emulation functions."] - #[doc = " \\deprecated"] - #[doc = " This error return is deprecated as of CUDA 3.1. Device emulation mode was"] - #[doc = " removed with the CUDA 3.1 release."] pub const cudaErrorSynchronizationError: Type = 25; - #[doc = " This indicates that a non-float texture was being accessed with linear"] - #[doc = " filtering. This is not supported by CUDA."] pub const cudaErrorInvalidFilterSetting: Type = 26; - #[doc = " This indicates that an attempt was made to read a non-float texture as a"] - #[doc = " normalized float. This is not supported by CUDA."] pub const cudaErrorInvalidNormSetting: Type = 27; - #[doc = " Mixing of device and device emulation code was not allowed."] - #[doc = " \\deprecated"] - #[doc = " This error return is deprecated as of CUDA 3.1. Device emulation mode was"] - #[doc = " removed with the CUDA 3.1 release."] pub const cudaErrorMixedDeviceExecution: Type = 28; - #[doc = " This indicates that the API call is not yet implemented. Production"] - #[doc = " releases of CUDA will never return this error."] - #[doc = " \\deprecated"] - #[doc = " This error return is deprecated as of CUDA 4.1."] pub const cudaErrorNotYetImplemented: Type = 31; - #[doc = " This indicated that an emulated device pointer exceeded the 32-bit address"] - #[doc = " range."] - #[doc = " \\deprecated"] - #[doc = " This error return is deprecated as of CUDA 3.1. Device emulation mode was"] - #[doc = " removed with the CUDA 3.1 release."] pub const cudaErrorMemoryValueTooLarge: Type = 32; - #[doc = " This indicates that the installed NVIDIA CUDA driver is older than the"] - #[doc = " CUDA runtime library. This is not a supported configuration. Users should"] - #[doc = " install an updated NVIDIA display driver to allow the application to run."] pub const cudaErrorInsufficientDriver: Type = 35; - #[doc = " This indicates that the surface passed to the API call is not a valid"] - #[doc = " surface."] pub const cudaErrorInvalidSurface: Type = 37; - #[doc = " This indicates that multiple global or constant variables (across separate"] - #[doc = " CUDA source files in the application) share the same string name."] pub const cudaErrorDuplicateVariableName: Type = 43; - #[doc = " This indicates that multiple textures (across separate CUDA source"] - #[doc = " files in the application) share the same string name."] pub const cudaErrorDuplicateTextureName: Type = 44; - #[doc = " This indicates that multiple surfaces (across separate CUDA source"] - #[doc = " files in the application) share the same string name."] pub const cudaErrorDuplicateSurfaceName: Type = 45; - #[doc = " This indicates that all CUDA devices are busy or unavailable at the current"] - #[doc = " time. Devices are often busy/unavailable due to use of"] - #[doc = " ::cudaComputeModeExclusive, ::cudaComputeModeProhibited or when long"] - #[doc = " running CUDA kernels have filled up the GPU and are blocking new work"] - #[doc = " from starting. They can also be unavailable due to memory constraints"] - #[doc = " on a device that already has active CUDA work being performed."] pub const cudaErrorDevicesUnavailable: Type = 46; - #[doc = " This indicates that the current context is not compatible with this"] - #[doc = " the CUDA Runtime. This can only occur if you are using CUDA"] - #[doc = " Runtime/Driver interoperability and have created an existing Driver"] - #[doc = " context using the driver API. The Driver context may be incompatible"] - #[doc = " either because the Driver context was created using an older version"] - #[doc = " of the API, because the Runtime API call expects a primary driver"] - #[doc = " context and the Driver context is not primary, or because the Driver"] - #[doc = " context has been destroyed. Please see \\ref CUDART_DRIVER \"Interactions"] - #[doc = " with the CUDA Driver API\" for more information."] pub const cudaErrorIncompatibleDriverContext: Type = 49; - #[doc = " The device function being invoked (usually via ::cudaLaunchKernel()) was not"] - #[doc = " previously configured via the ::cudaConfigureCall() function."] pub const cudaErrorMissingConfiguration: Type = 52; - #[doc = " This indicated that a previous kernel launch failed. This was previously"] - #[doc = " used for device emulation of kernel launches."] - #[doc = " \\deprecated"] - #[doc = " This error return is deprecated as of CUDA 3.1. Device emulation mode was"] - #[doc = " removed with the CUDA 3.1 release."] pub const cudaErrorPriorLaunchFailure: Type = 53; - #[doc = " This error indicates that a device runtime grid launch did not occur"] - #[doc = " because the depth of the child grid would exceed the maximum supported"] - #[doc = " number of nested grid launches."] pub const cudaErrorLaunchMaxDepthExceeded: Type = 65; - #[doc = " This error indicates that a grid launch did not occur because the kernel"] - #[doc = " uses file-scoped textures which are unsupported by the device runtime."] - #[doc = " Kernels launched via the device runtime only support textures created with"] - #[doc = " the Texture Object API's."] pub const cudaErrorLaunchFileScopedTex: Type = 66; - #[doc = " This error indicates that a grid launch did not occur because the kernel"] - #[doc = " uses file-scoped surfaces which are unsupported by the device runtime."] - #[doc = " Kernels launched via the device runtime only support surfaces created with"] - #[doc = " the Surface Object API's."] pub const cudaErrorLaunchFileScopedSurf: Type = 67; - #[doc = " This error indicates that a call to ::cudaDeviceSynchronize made from"] - #[doc = " the device runtime failed because the call was made at grid depth greater"] - #[doc = " than than either the default (2 levels of grids) or user specified device"] - #[doc = " limit ::cudaLimitDevRuntimeSyncDepth. To be able to synchronize on"] - #[doc = " launched grids at a greater depth successfully, the maximum nested"] - #[doc = " depth at which ::cudaDeviceSynchronize will be called must be specified"] - #[doc = " with the ::cudaLimitDevRuntimeSyncDepth limit to the ::cudaDeviceSetLimit"] - #[doc = " api before the host-side launch of a kernel using the device runtime."] - #[doc = " Keep in mind that additional levels of sync depth require the runtime"] - #[doc = " to reserve large amounts of device memory that cannot be used for"] - #[doc = " user allocations."] pub const cudaErrorSyncDepthExceeded: Type = 68; - #[doc = " This error indicates that a device runtime grid launch failed because"] - #[doc = " the launch would exceed the limit ::cudaLimitDevRuntimePendingLaunchCount."] - #[doc = " For this launch to proceed successfully, ::cudaDeviceSetLimit must be"] - #[doc = " called to set the ::cudaLimitDevRuntimePendingLaunchCount to be higher"] - #[doc = " than the upper bound of outstanding launches that can be issued to the"] - #[doc = " device runtime. Keep in mind that raising the limit of pending device"] - #[doc = " runtime launches will require the runtime to reserve device memory that"] - #[doc = " cannot be used for user allocations."] pub const cudaErrorLaunchPendingCountExceeded: Type = 69; - #[doc = " The requested device function does not exist or is not compiled for the"] - #[doc = " proper device architecture."] pub const cudaErrorInvalidDeviceFunction: Type = 98; - #[doc = " This indicates that no CUDA-capable devices were detected by the installed"] - #[doc = " CUDA driver."] pub const cudaErrorNoDevice: Type = 100; - #[doc = " This indicates that the device ordinal supplied by the user does not"] - #[doc = " correspond to a valid CUDA device."] pub const cudaErrorInvalidDevice: Type = 101; - #[doc = " This indicates an internal startup failure in the CUDA runtime."] pub const cudaErrorStartupFailure: Type = 127; - #[doc = " This indicates that the device kernel image is invalid."] pub const cudaErrorInvalidKernelImage: Type = 200; - #[doc = " This most frequently indicates that there is no context bound to the"] - #[doc = " current thread. This can also be returned if the context passed to an"] - #[doc = " API call is not a valid handle (such as a context that has had"] - #[doc = " ::cuCtxDestroy() invoked on it). This can also be returned if a user"] - #[doc = " mixes different API versions (i.e. 3010 context with 3020 API calls)."] - #[doc = " See ::cuCtxGetApiVersion() for more details."] pub const cudaErrorDeviceUninitilialized: Type = 201; - #[doc = " This indicates that the buffer object could not be mapped."] pub const cudaErrorMapBufferObjectFailed: Type = 205; - #[doc = " This indicates that the buffer object could not be unmapped."] pub const cudaErrorUnmapBufferObjectFailed: Type = 206; - #[doc = " This indicates that the specified array is currently mapped and thus"] - #[doc = " cannot be destroyed."] pub const cudaErrorArrayIsMapped: Type = 207; - #[doc = " This indicates that the resource is already mapped."] pub const cudaErrorAlreadyMapped: Type = 208; - #[doc = " This indicates that there is no kernel image available that is suitable"] - #[doc = " for the device. This can occur when a user specifies code generation"] - #[doc = " options for a particular CUDA source file that do not include the"] - #[doc = " corresponding device configuration."] pub const cudaErrorNoKernelImageForDevice: Type = 209; - #[doc = " This indicates that a resource has already been acquired."] pub const cudaErrorAlreadyAcquired: Type = 210; - #[doc = " This indicates that a resource is not mapped."] pub const cudaErrorNotMapped: Type = 211; - #[doc = " This indicates that a mapped resource is not available for access as an"] - #[doc = " array."] pub const cudaErrorNotMappedAsArray: Type = 212; - #[doc = " This indicates that a mapped resource is not available for access as a"] - #[doc = " pointer."] pub const cudaErrorNotMappedAsPointer: Type = 213; - #[doc = " This indicates that an uncorrectable ECC error was detected during"] - #[doc = " execution."] pub const cudaErrorECCUncorrectable: Type = 214; - #[doc = " This indicates that the ::cudaLimit passed to the API call is not"] - #[doc = " supported by the active device."] pub const cudaErrorUnsupportedLimit: Type = 215; - #[doc = " This indicates that a call tried to access an exclusive-thread device that"] - #[doc = " is already in use by a different thread."] pub const cudaErrorDeviceAlreadyInUse: Type = 216; - #[doc = " This error indicates that P2P access is not supported across the given"] - #[doc = " devices."] pub const cudaErrorPeerAccessUnsupported: Type = 217; - #[doc = " A PTX compilation failed. The runtime may fall back to compiling PTX if"] - #[doc = " an application does not contain a suitable binary for the current device."] pub const cudaErrorInvalidPtx: Type = 218; - #[doc = " This indicates an error with the OpenGL or DirectX context."] pub const cudaErrorInvalidGraphicsContext: Type = 219; - #[doc = " This indicates that an uncorrectable NVLink error was detected during the"] - #[doc = " execution."] pub const cudaErrorNvlinkUncorrectable: Type = 220; - #[doc = " This indicates that the PTX JIT compiler library was not found. The JIT Compiler"] - #[doc = " library is used for PTX compilation. The runtime may fall back to compiling PTX"] - #[doc = " if an application does not contain a suitable binary for the current device."] pub const cudaErrorJitCompilerNotFound: Type = 221; - #[doc = " This indicates that the device kernel source is invalid."] pub const cudaErrorInvalidSource: Type = 300; - #[doc = " This indicates that the file specified was not found."] pub const cudaErrorFileNotFound: Type = 301; - #[doc = " This indicates that a link to a shared object failed to resolve."] pub const cudaErrorSharedObjectSymbolNotFound: Type = 302; - #[doc = " This indicates that initialization of a shared object failed."] pub const cudaErrorSharedObjectInitFailed: Type = 303; - #[doc = " This error indicates that an OS call failed."] pub const cudaErrorOperatingSystem: Type = 304; - #[doc = " This indicates that a resource handle passed to the API call was not"] - #[doc = " valid. Resource handles are opaque types like ::cudaStream_t and"] - #[doc = " ::cudaEvent_t."] pub const cudaErrorInvalidResourceHandle: Type = 400; - #[doc = " This indicates that a resource required by the API call is not in a"] - #[doc = " valid state to perform the requested operation."] pub const cudaErrorIllegalState: Type = 401; - #[doc = " This indicates that a named symbol was not found. Examples of symbols"] - #[doc = " are global/constant variable names, texture names, and surface names."] pub const cudaErrorSymbolNotFound: Type = 500; - #[doc = " This indicates that asynchronous operations issued previously have not"] - #[doc = " completed yet. This result is not actually an error, but must be indicated"] - #[doc = " differently than ::cudaSuccess (which indicates completion). Calls that"] - #[doc = " may return this value include ::cudaEventQuery() and ::cudaStreamQuery()."] pub const cudaErrorNotReady: Type = 600; - #[doc = " The device encountered a load or store instruction on an invalid memory address."] - #[doc = " This leaves the process in an inconsistent state and any further CUDA work"] - #[doc = " will return the same error. To continue using CUDA, the process must be terminated"] - #[doc = " and relaunched."] pub const cudaErrorIllegalAddress: Type = 700; - #[doc = " This indicates that a launch did not occur because it did not have"] - #[doc = " appropriate resources. Although this error is similar to"] - #[doc = " ::cudaErrorInvalidConfiguration, this error usually indicates that the"] - #[doc = " user has attempted to pass too many arguments to the device kernel, or the"] - #[doc = " kernel launch specifies too many threads for the kernel's register count."] pub const cudaErrorLaunchOutOfResources: Type = 701; - #[doc = " This indicates that the device kernel took too long to execute. This can"] - #[doc = " only occur if timeouts are enabled - see the device property"] - #[doc = " \\ref ::cudaDeviceProp::kernelExecTimeoutEnabled \"kernelExecTimeoutEnabled\""] - #[doc = " for more information."] - #[doc = " This leaves the process in an inconsistent state and any further CUDA work"] - #[doc = " will return the same error. To continue using CUDA, the process must be terminated"] - #[doc = " and relaunched."] pub const cudaErrorLaunchTimeout: Type = 702; - #[doc = " This error indicates a kernel launch that uses an incompatible texturing"] - #[doc = " mode."] pub const cudaErrorLaunchIncompatibleTexturing: Type = 703; - #[doc = " This error indicates that a call to ::cudaDeviceEnablePeerAccess() is"] - #[doc = " trying to re-enable peer addressing on from a context which has already"] - #[doc = " had peer addressing enabled."] pub const cudaErrorPeerAccessAlreadyEnabled: Type = 704; - #[doc = " This error indicates that ::cudaDeviceDisablePeerAccess() is trying to"] - #[doc = " disable peer addressing which has not been enabled yet via"] - #[doc = " ::cudaDeviceEnablePeerAccess()."] pub const cudaErrorPeerAccessNotEnabled: Type = 705; - #[doc = " This indicates that the user has called ::cudaSetValidDevices(),"] - #[doc = " ::cudaSetDeviceFlags(), ::cudaD3D9SetDirect3DDevice(),"] - #[doc = " ::cudaD3D10SetDirect3DDevice, ::cudaD3D11SetDirect3DDevice(), or"] - #[doc = " ::cudaVDPAUSetVDPAUDevice() after initializing the CUDA runtime by"] - #[doc = " calling non-device management operations (allocating memory and"] - #[doc = " launching kernels are examples of non-device management operations)."] - #[doc = " This error can also be returned if using runtime/driver"] - #[doc = " interoperability and there is an existing ::CUcontext active on the"] - #[doc = " host thread."] pub const cudaErrorSetOnActiveProcess: Type = 708; - #[doc = " This error indicates that the context current to the calling thread"] - #[doc = " has been destroyed using ::cuCtxDestroy, or is a primary context which"] - #[doc = " has not yet been initialized."] pub const cudaErrorContextIsDestroyed: Type = 709; - #[doc = " An assert triggered in device code during kernel execution. The device"] - #[doc = " cannot be used again. All existing allocations are invalid. To continue"] - #[doc = " using CUDA, the process must be terminated and relaunched."] pub const cudaErrorAssert: Type = 710; - #[doc = " This error indicates that the hardware resources required to enable"] - #[doc = " peer access have been exhausted for one or more of the devices"] - #[doc = " passed to ::cudaEnablePeerAccess()."] pub const cudaErrorTooManyPeers: Type = 711; - #[doc = " This error indicates that the memory range passed to ::cudaHostRegister()"] - #[doc = " has already been registered."] pub const cudaErrorHostMemoryAlreadyRegistered: Type = 712; - #[doc = " This error indicates that the pointer passed to ::cudaHostUnregister()"] - #[doc = " does not correspond to any currently registered memory region."] pub const cudaErrorHostMemoryNotRegistered: Type = 713; - #[doc = " Device encountered an error in the call stack during kernel execution,"] - #[doc = " possibly due to stack corruption or exceeding the stack size limit."] - #[doc = " This leaves the process in an inconsistent state and any further CUDA work"] - #[doc = " will return the same error. To continue using CUDA, the process must be terminated"] - #[doc = " and relaunched."] pub const cudaErrorHardwareStackError: Type = 714; - #[doc = " The device encountered an illegal instruction during kernel execution"] - #[doc = " This leaves the process in an inconsistent state and any further CUDA work"] - #[doc = " will return the same error. To continue using CUDA, the process must be terminated"] - #[doc = " and relaunched."] pub const cudaErrorIllegalInstruction: Type = 715; - #[doc = " The device encountered a load or store instruction"] - #[doc = " on a memory address which is not aligned."] - #[doc = " This leaves the process in an inconsistent state and any further CUDA work"] - #[doc = " will return the same error. To continue using CUDA, the process must be terminated"] - #[doc = " and relaunched."] pub const cudaErrorMisalignedAddress: Type = 716; - #[doc = " While executing a kernel, the device encountered an instruction"] - #[doc = " which can only operate on memory locations in certain address spaces"] - #[doc = " (global, shared, or local), but was supplied a memory address not"] - #[doc = " belonging to an allowed address space."] - #[doc = " This leaves the process in an inconsistent state and any further CUDA work"] - #[doc = " will return the same error. To continue using CUDA, the process must be terminated"] - #[doc = " and relaunched."] pub const cudaErrorInvalidAddressSpace: Type = 717; - #[doc = " The device encountered an invalid program counter."] - #[doc = " This leaves the process in an inconsistent state and any further CUDA work"] - #[doc = " will return the same error. To continue using CUDA, the process must be terminated"] - #[doc = " and relaunched."] pub const cudaErrorInvalidPc: Type = 718; - #[doc = " An exception occurred on the device while executing a kernel. Common"] - #[doc = " causes include dereferencing an invalid device pointer and accessing"] - #[doc = " out of bounds shared memory. Less common cases can be system specific - more"] - #[doc = " information about these cases can be found in the system specific user guide."] - #[doc = " This leaves the process in an inconsistent state and any further CUDA work"] - #[doc = " will return the same error. To continue using CUDA, the process must be terminated"] - #[doc = " and relaunched."] pub const cudaErrorLaunchFailure: Type = 719; - #[doc = " This error indicates that the number of blocks launched per grid for a kernel that was"] - #[doc = " launched via either ::cudaLaunchCooperativeKernel or ::cudaLaunchCooperativeKernelMultiDevice"] - #[doc = " exceeds the maximum number of blocks as allowed by ::cudaOccupancyMaxActiveBlocksPerMultiprocessor"] - #[doc = " or ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags times the number of multiprocessors"] - #[doc = " as specified by the device attribute ::cudaDevAttrMultiProcessorCount."] pub const cudaErrorCooperativeLaunchTooLarge: Type = 720; - #[doc = " This error indicates the attempted operation is not permitted."] pub const cudaErrorNotPermitted: Type = 800; - #[doc = " This error indicates the attempted operation is not supported"] - #[doc = " on the current system or device."] pub const cudaErrorNotSupported: Type = 801; - #[doc = " This error indicates that the system is not yet ready to start any CUDA"] - #[doc = " work. To continue using CUDA, verify the system configuration is in a"] - #[doc = " valid state and all required driver daemons are actively running."] - #[doc = " More information about this error can be found in the system specific"] - #[doc = " user guide."] pub const cudaErrorSystemNotReady: Type = 802; - #[doc = " This error indicates that there is a mismatch between the versions of"] - #[doc = " the display driver and the CUDA driver. Refer to the compatibility documentation"] - #[doc = " for supported versions."] pub const cudaErrorSystemDriverMismatch: Type = 803; - #[doc = " This error indicates that the system was upgraded to run with forward compatibility"] - #[doc = " but the visible hardware detected by CUDA does not support this configuration."] - #[doc = " Refer to the compatibility documentation for the supported hardware matrix or ensure"] - #[doc = " that only supported hardware is visible during initialization via the CUDA_VISIBLE_DEVICES"] - #[doc = " environment variable."] pub const cudaErrorCompatNotSupportedOnDevice: Type = 804; - #[doc = " The operation is not permitted when the stream is capturing."] pub const cudaErrorStreamCaptureUnsupported: Type = 900; - #[doc = " The current capture sequence on the stream has been invalidated due to"] - #[doc = " a previous error."] pub const cudaErrorStreamCaptureInvalidated: Type = 901; - #[doc = " The operation would have resulted in a merge of two independent capture"] - #[doc = " sequences."] pub const cudaErrorStreamCaptureMerge: Type = 902; - #[doc = " The capture was not initiated in this stream."] pub const cudaErrorStreamCaptureUnmatched: Type = 903; - #[doc = " The capture sequence contains a fork that was not joined to the primary"] - #[doc = " stream."] pub const cudaErrorStreamCaptureUnjoined: Type = 904; - #[doc = " A dependency would have been created which crosses the capture sequence"] - #[doc = " boundary. Only implicit in-stream ordering dependencies are allowed to"] - #[doc = " cross the boundary."] pub const cudaErrorStreamCaptureIsolation: Type = 905; - #[doc = " The operation would have resulted in a disallowed implicit dependency on"] - #[doc = " a current capture sequence from cudaStreamLegacy."] pub const cudaErrorStreamCaptureImplicit: Type = 906; - #[doc = " The operation is not permitted on an event which was last recorded in a"] - #[doc = " capturing stream."] pub const cudaErrorCapturedEvent: Type = 907; - #[doc = " A stream capture sequence not initiated with the ::cudaStreamCaptureModeRelaxed"] - #[doc = " argument to ::cudaStreamBeginCapture was passed to ::cudaStreamEndCapture in a"] - #[doc = " different thread."] pub const cudaErrorStreamCaptureWrongThread: Type = 908; - #[doc = " This indicates that an unknown internal error has occurred."] pub const cudaErrorUnknown: Type = 999; - #[doc = " Any unhandled CUDA driver error is added to this value and returned via"] - #[doc = " the runtime. Production releases of CUDA should not return such errors."] - #[doc = " \\deprecated"] - #[doc = " This error return is deprecated as of CUDA 4.1."] pub const cudaErrorApiFailureBase: Type = 10000; } -#[doc = "< Signed channel format"] pub const cudaChannelFormatKind_cudaChannelFormatKindSigned: cudaChannelFormatKind = 0; -#[doc = "< Unsigned channel format"] pub const cudaChannelFormatKind_cudaChannelFormatKindUnsigned: cudaChannelFormatKind = 1; -#[doc = "< Float channel format"] pub const cudaChannelFormatKind_cudaChannelFormatKindFloat: cudaChannelFormatKind = 2; -#[doc = "< No channel format"] pub const cudaChannelFormatKind_cudaChannelFormatKindNone: cudaChannelFormatKind = 3; -#[doc = " Channel format kind"] pub type cudaChannelFormatKind = u32; -#[doc = " CUDA Channel format descriptor"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct cudaChannelFormatDesc { - #[doc = "< x"] pub x: ::std::os::raw::c_int, - #[doc = "< y"] pub y: ::std::os::raw::c_int, - #[doc = "< z"] pub z: ::std::os::raw::c_int, - #[doc = "< w"] pub w: ::std::os::raw::c_int, - #[doc = "< Channel format kind"] pub f: cudaChannelFormatKind, } #[repr(C)] @@ -11807,164 +2997,94 @@ pub struct cudaChannelFormatDesc { pub struct cudaArray { _unused: [u8; 0], } -#[doc = " CUDA array"] pub type cudaArray_t = *mut cudaArray; -#[doc = " CUDA array (as source copy argument)"] pub type cudaArray_const_t = *const cudaArray; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct cudaMipmappedArray { _unused: [u8; 0], } -#[doc = " CUDA mipmapped array"] pub type cudaMipmappedArray_t = *mut cudaMipmappedArray; -#[doc = " CUDA mipmapped array (as source argument)"] pub type cudaMipmappedArray_const_t = *const cudaMipmappedArray; -#[doc = "< Unregistered memory"] pub const cudaMemoryType_cudaMemoryTypeUnregistered: cudaMemoryType = 0; -#[doc = "< Host memory"] pub const cudaMemoryType_cudaMemoryTypeHost: cudaMemoryType = 1; -#[doc = "< Device memory"] pub const cudaMemoryType_cudaMemoryTypeDevice: cudaMemoryType = 2; -#[doc = "< Managed memory"] pub const cudaMemoryType_cudaMemoryTypeManaged: cudaMemoryType = 3; -#[doc = " CUDA memory types"] pub type cudaMemoryType = u32; pub mod cudaMemcpyKind { - #[doc = " CUDA memory copy types"] pub type Type = u32; - #[doc = "< Host -> Host"] pub const cudaMemcpyHostToHost: Type = 0; - #[doc = "< Host -> Device"] pub const cudaMemcpyHostToDevice: Type = 1; - #[doc = "< Device -> Host"] pub const cudaMemcpyDeviceToHost: Type = 2; - #[doc = "< Device -> Device"] pub const cudaMemcpyDeviceToDevice: Type = 3; - #[doc = "< Direction of the transfer is inferred from the pointer values. Requires unified virtual addressing"] pub const cudaMemcpyDefault: Type = 4; } -#[doc = " CUDA Pitched memory pointer"] -#[doc = ""] -#[doc = " \\sa ::make_cudaPitchedPtr"] #[repr(C)] pub struct cudaPitchedPtr { - #[doc = "< Pointer to allocated memory"] pub ptr: *mut ::std::os::raw::c_void, - #[doc = "< Pitch of allocated memory in bytes"] pub pitch: usize, - #[doc = "< Logical width of allocation in elements"] pub xsize: usize, - #[doc = "< Logical height of allocation in elements"] pub ysize: usize, } -#[doc = " CUDA extent"] -#[doc = ""] -#[doc = " \\sa ::make_cudaExtent"] #[repr(C)] pub struct cudaExtent { - #[doc = "< Width in elements when referring to array memory, in bytes when referring to linear memory"] pub width: usize, - #[doc = "< Height in elements"] pub height: usize, - #[doc = "< Depth in elements"] pub depth: usize, } -#[doc = " CUDA 3D position"] -#[doc = ""] -#[doc = " \\sa ::make_cudaPos"] #[repr(C)] pub struct cudaPos { - #[doc = "< x"] pub x: usize, - #[doc = "< y"] pub y: usize, - #[doc = "< z"] pub z: usize, } -#[doc = " CUDA 3D memory copying parameters"] #[repr(C)] pub struct cudaMemcpy3DParms { - #[doc = "< Source memory address"] pub srcArray: cudaArray_t, - #[doc = "< Source position offset"] pub srcPos: cudaPos, - #[doc = "< Pitched source memory address"] pub srcPtr: cudaPitchedPtr, - #[doc = "< Destination memory address"] pub dstArray: cudaArray_t, - #[doc = "< Destination position offset"] pub dstPos: cudaPos, - #[doc = "< Pitched destination memory address"] pub dstPtr: cudaPitchedPtr, - #[doc = "< Requested memory copy size"] pub extent: cudaExtent, - #[doc = "< Type of transfer"] pub kind: cudaMemcpyKind::Type, } -#[doc = " CUDA 3D cross-device memory copying parameters"] #[repr(C)] pub struct cudaMemcpy3DPeerParms { - #[doc = "< Source memory address"] pub srcArray: cudaArray_t, - #[doc = "< Source position offset"] pub srcPos: cudaPos, - #[doc = "< Pitched source memory address"] pub srcPtr: cudaPitchedPtr, - #[doc = "< Source device"] pub srcDevice: ::std::os::raw::c_int, - #[doc = "< Destination memory address"] pub dstArray: cudaArray_t, - #[doc = "< Destination position offset"] pub dstPos: cudaPos, - #[doc = "< Pitched destination memory address"] pub dstPtr: cudaPitchedPtr, - #[doc = "< Destination device"] pub dstDevice: ::std::os::raw::c_int, - #[doc = "< Requested memory copy size"] pub extent: cudaExtent, } -#[doc = " CUDA Memset node parameters"] #[repr(C)] pub struct cudaMemsetParams { - #[doc = "< Destination device pointer"] pub dst: *mut ::std::os::raw::c_void, - #[doc = "< Pitch of destination device pointer. Unused if height is 1"] pub pitch: usize, - #[doc = "< Value to be set"] pub value: ::std::os::raw::c_uint, - #[doc = "< Size of each element in bytes. Must be 1, 2, or 4."] pub elementSize: ::std::os::raw::c_uint, - #[doc = "< Width in bytes, of the row"] pub width: usize, - #[doc = "< Number of rows"] pub height: usize, } -#[doc = " CUDA host function"] -#[doc = " \\param userData Argument value passed to the function"] pub type cudaHostFn_t = ::std::option::Option< unsafe extern "C" fn(userData: *mut ::std::os::raw::c_void), >; -#[doc = " CUDA host node parameters"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct cudaHostNodeParams { - #[doc = "< The function to call when the node executes"] pub fn_: cudaHostFn_t, - #[doc = "< Argument to pass to the function"] pub userData: *mut ::std::os::raw::c_void, } -#[doc = "< Stream is not capturing"] pub const cudaStreamCaptureStatus_cudaStreamCaptureStatusNone: cudaStreamCaptureStatus = 0; -#[doc = "< Stream is actively capturing"] pub const cudaStreamCaptureStatus_cudaStreamCaptureStatusActive: cudaStreamCaptureStatus = 1; -#[doc = "< Stream is part of a capture sequence that"] -#[doc = "has been invalidated, but not terminated"] pub const cudaStreamCaptureStatus_cudaStreamCaptureStatusInvalidated: cudaStreamCaptureStatus = 2; -#[doc = " Possible stream capture statuses returned by ::cudaStreamIsCapturing"] pub type cudaStreamCaptureStatus = u32; pub const cudaStreamCaptureMode_cudaStreamCaptureModeGlobal: cudaStreamCaptureMode = 0; @@ -11972,178 +3092,117 @@ pub const cudaStreamCaptureMode_cudaStreamCaptureModeThreadLocal: cudaStreamCaptureMode = 1; pub const cudaStreamCaptureMode_cudaStreamCaptureModeRelaxed: cudaStreamCaptureMode = 2; -#[doc = " Possible modes for stream capture thread interactions. For more details see"] -#[doc = " ::cudaStreamBeginCapture and ::cudaThreadExchangeStreamCaptureMode"] pub type cudaStreamCaptureMode = u32; -#[doc = " CUDA graphics interop resource"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct cudaGraphicsResource { _unused: [u8; 0], } -#[doc = "< Default"] pub const cudaGraphicsRegisterFlags_cudaGraphicsRegisterFlagsNone: cudaGraphicsRegisterFlags = 0; -#[doc = "< CUDA will not write to this resource"] pub const cudaGraphicsRegisterFlags_cudaGraphicsRegisterFlagsReadOnly: cudaGraphicsRegisterFlags = 1; -#[doc = "< CUDA will only write to and will not read from this resource"] pub const cudaGraphicsRegisterFlags_cudaGraphicsRegisterFlagsWriteDiscard: cudaGraphicsRegisterFlags = 2; -#[doc = "< CUDA will bind this resource to a surface reference"] pub const cudaGraphicsRegisterFlags_cudaGraphicsRegisterFlagsSurfaceLoadStore : cudaGraphicsRegisterFlags = 4 ; -#[doc = "< CUDA will perform texture gather operations on this resource"] pub const cudaGraphicsRegisterFlags_cudaGraphicsRegisterFlagsTextureGather: cudaGraphicsRegisterFlags = 8; -#[doc = " CUDA graphics interop register flags"] pub type cudaGraphicsRegisterFlags = u32; -#[doc = "< Default; Assume resource can be read/written"] pub const cudaGraphicsMapFlags_cudaGraphicsMapFlagsNone: cudaGraphicsMapFlags = 0; -#[doc = "< CUDA will not write to this resource"] pub const cudaGraphicsMapFlags_cudaGraphicsMapFlagsReadOnly: cudaGraphicsMapFlags = 1; -#[doc = "< CUDA will only write to and will not read from this resource"] pub const cudaGraphicsMapFlags_cudaGraphicsMapFlagsWriteDiscard: cudaGraphicsMapFlags = 2; -#[doc = " CUDA graphics interop map flags"] pub type cudaGraphicsMapFlags = u32; -#[doc = "< Positive X face of cubemap"] pub const cudaGraphicsCubeFace_cudaGraphicsCubeFacePositiveX: cudaGraphicsCubeFace = 0; -#[doc = "< Negative X face of cubemap"] pub const cudaGraphicsCubeFace_cudaGraphicsCubeFaceNegativeX: cudaGraphicsCubeFace = 1; -#[doc = "< Positive Y face of cubemap"] pub const cudaGraphicsCubeFace_cudaGraphicsCubeFacePositiveY: cudaGraphicsCubeFace = 2; -#[doc = "< Negative Y face of cubemap"] pub const cudaGraphicsCubeFace_cudaGraphicsCubeFaceNegativeY: cudaGraphicsCubeFace = 3; -#[doc = "< Positive Z face of cubemap"] pub const cudaGraphicsCubeFace_cudaGraphicsCubeFacePositiveZ: cudaGraphicsCubeFace = 4; -#[doc = "< Negative Z face of cubemap"] pub const cudaGraphicsCubeFace_cudaGraphicsCubeFaceNegativeZ: cudaGraphicsCubeFace = 5; -#[doc = " CUDA graphics interop array indices for cube maps"] pub type cudaGraphicsCubeFace = u32; -#[doc = "< Array resource"] pub const cudaResourceType_cudaResourceTypeArray: cudaResourceType = 0; -#[doc = "< Mipmapped array resource"] pub const cudaResourceType_cudaResourceTypeMipmappedArray: cudaResourceType = 1; -#[doc = "< Linear resource"] pub const cudaResourceType_cudaResourceTypeLinear: cudaResourceType = 2; -#[doc = "< Pitch 2D resource"] pub const cudaResourceType_cudaResourceTypePitch2D: cudaResourceType = 3; -#[doc = " CUDA resource types"] pub type cudaResourceType = u32; -#[doc = "< No resource view format (use underlying resource format)"] pub const cudaResourceViewFormat_cudaResViewFormatNone: cudaResourceViewFormat = 0; -#[doc = "< 1 channel unsigned 8-bit integers"] pub const cudaResourceViewFormat_cudaResViewFormatUnsignedChar1: cudaResourceViewFormat = 1; -#[doc = "< 2 channel unsigned 8-bit integers"] pub const cudaResourceViewFormat_cudaResViewFormatUnsignedChar2: cudaResourceViewFormat = 2; -#[doc = "< 4 channel unsigned 8-bit integers"] pub const cudaResourceViewFormat_cudaResViewFormatUnsignedChar4: cudaResourceViewFormat = 3; -#[doc = "< 1 channel signed 8-bit integers"] pub const cudaResourceViewFormat_cudaResViewFormatSignedChar1: cudaResourceViewFormat = 4; -#[doc = "< 2 channel signed 8-bit integers"] pub const cudaResourceViewFormat_cudaResViewFormatSignedChar2: cudaResourceViewFormat = 5; -#[doc = "< 4 channel signed 8-bit integers"] pub const cudaResourceViewFormat_cudaResViewFormatSignedChar4: cudaResourceViewFormat = 6; -#[doc = "< 1 channel unsigned 16-bit integers"] pub const cudaResourceViewFormat_cudaResViewFormatUnsignedShort1: cudaResourceViewFormat = 7; -#[doc = "< 2 channel unsigned 16-bit integers"] pub const cudaResourceViewFormat_cudaResViewFormatUnsignedShort2: cudaResourceViewFormat = 8; -#[doc = "< 4 channel unsigned 16-bit integers"] pub const cudaResourceViewFormat_cudaResViewFormatUnsignedShort4: cudaResourceViewFormat = 9; -#[doc = "< 1 channel signed 16-bit integers"] pub const cudaResourceViewFormat_cudaResViewFormatSignedShort1: cudaResourceViewFormat = 10; -#[doc = "< 2 channel signed 16-bit integers"] pub const cudaResourceViewFormat_cudaResViewFormatSignedShort2: cudaResourceViewFormat = 11; -#[doc = "< 4 channel signed 16-bit integers"] pub const cudaResourceViewFormat_cudaResViewFormatSignedShort4: cudaResourceViewFormat = 12; -#[doc = "< 1 channel unsigned 32-bit integers"] pub const cudaResourceViewFormat_cudaResViewFormatUnsignedInt1: cudaResourceViewFormat = 13; -#[doc = "< 2 channel unsigned 32-bit integers"] pub const cudaResourceViewFormat_cudaResViewFormatUnsignedInt2: cudaResourceViewFormat = 14; -#[doc = "< 4 channel unsigned 32-bit integers"] pub const cudaResourceViewFormat_cudaResViewFormatUnsignedInt4: cudaResourceViewFormat = 15; -#[doc = "< 1 channel signed 32-bit integers"] pub const cudaResourceViewFormat_cudaResViewFormatSignedInt1: cudaResourceViewFormat = 16; -#[doc = "< 2 channel signed 32-bit integers"] pub const cudaResourceViewFormat_cudaResViewFormatSignedInt2: cudaResourceViewFormat = 17; -#[doc = "< 4 channel signed 32-bit integers"] pub const cudaResourceViewFormat_cudaResViewFormatSignedInt4: cudaResourceViewFormat = 18; -#[doc = "< 1 channel 16-bit floating point"] pub const cudaResourceViewFormat_cudaResViewFormatHalf1: cudaResourceViewFormat = 19; -#[doc = "< 2 channel 16-bit floating point"] pub const cudaResourceViewFormat_cudaResViewFormatHalf2: cudaResourceViewFormat = 20; -#[doc = "< 4 channel 16-bit floating point"] pub const cudaResourceViewFormat_cudaResViewFormatHalf4: cudaResourceViewFormat = 21; -#[doc = "< 1 channel 32-bit floating point"] pub const cudaResourceViewFormat_cudaResViewFormatFloat1: cudaResourceViewFormat = 22; -#[doc = "< 2 channel 32-bit floating point"] pub const cudaResourceViewFormat_cudaResViewFormatFloat2: cudaResourceViewFormat = 23; -#[doc = "< 4 channel 32-bit floating point"] pub const cudaResourceViewFormat_cudaResViewFormatFloat4: cudaResourceViewFormat = 24; -#[doc = "< Block compressed 1"] pub const cudaResourceViewFormat_cudaResViewFormatUnsignedBlockCompressed1: cudaResourceViewFormat = 25; -#[doc = "< Block compressed 2"] pub const cudaResourceViewFormat_cudaResViewFormatUnsignedBlockCompressed2: cudaResourceViewFormat = 26; -#[doc = "< Block compressed 3"] pub const cudaResourceViewFormat_cudaResViewFormatUnsignedBlockCompressed3: cudaResourceViewFormat = 27; -#[doc = "< Block compressed 4 unsigned"] pub const cudaResourceViewFormat_cudaResViewFormatUnsignedBlockCompressed4: cudaResourceViewFormat = 28; -#[doc = "< Block compressed 4 signed"] pub const cudaResourceViewFormat_cudaResViewFormatSignedBlockCompressed4: cudaResourceViewFormat = 29; -#[doc = "< Block compressed 5 unsigned"] pub const cudaResourceViewFormat_cudaResViewFormatUnsignedBlockCompressed5: cudaResourceViewFormat = 30; -#[doc = "< Block compressed 5 signed"] pub const cudaResourceViewFormat_cudaResViewFormatSignedBlockCompressed5: cudaResourceViewFormat = 31; -#[doc = "< Block compressed 6 unsigned half-float"] pub const cudaResourceViewFormat_cudaResViewFormatUnsignedBlockCompressed6H: cudaResourceViewFormat = 32; -#[doc = "< Block compressed 6 signed half-float"] pub const cudaResourceViewFormat_cudaResViewFormatSignedBlockCompressed6H: cudaResourceViewFormat = 33; -#[doc = "< Block compressed 7"] pub const cudaResourceViewFormat_cudaResViewFormatUnsignedBlockCompressed7: cudaResourceViewFormat = 34; -#[doc = " CUDA texture resource view formats"] pub type cudaResourceViewFormat = u32; #[repr(C)] pub struct cudaResourceDesc__bindgen_ty_1 { @@ -12160,151 +3219,71 @@ pub struct cudaResourceDesc__bindgen_ty_1 { #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct cudaResourceDesc__bindgen_ty_1__bindgen_ty_1 { - #[doc = "< CUDA array"] pub array: cudaArray_t, } #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct cudaResourceDesc__bindgen_ty_1__bindgen_ty_2 { - #[doc = "< CUDA mipmapped array"] pub mipmap: cudaMipmappedArray_t, } #[repr(C)] pub struct cudaResourceDesc__bindgen_ty_1__bindgen_ty_3 { - #[doc = "< Device pointer"] pub devPtr: *mut ::std::os::raw::c_void, - #[doc = "< Channel descriptor"] pub desc: cudaChannelFormatDesc, - #[doc = "< Size in bytes"] pub sizeInBytes: usize, } #[repr(C)] pub struct cudaResourceDesc__bindgen_ty_1__bindgen_ty_4 { - #[doc = "< Device pointer"] pub devPtr: *mut ::std::os::raw::c_void, - #[doc = "< Channel descriptor"] pub desc: cudaChannelFormatDesc, - #[doc = "< Width of the array in elements"] pub width: usize, - #[doc = "< Height of the array in elements"] pub height: usize, - #[doc = "< Pitch between two rows in bytes"] pub pitchInBytes: usize, } -#[doc = " CUDA resource view descriptor"] #[repr(C)] pub struct cudaResourceViewDesc { - #[doc = "< Resource view format"] pub format: cudaResourceViewFormat, - #[doc = "< Width of the resource view"] pub width: usize, - #[doc = "< Height of the resource view"] pub height: usize, - #[doc = "< Depth of the resource view"] pub depth: usize, - #[doc = "< First defined mipmap level"] pub firstMipmapLevel: ::std::os::raw::c_uint, - #[doc = "< Last defined mipmap level"] pub lastMipmapLevel: ::std::os::raw::c_uint, - #[doc = "< First layer index"] pub firstLayer: ::std::os::raw::c_uint, - #[doc = "< Last layer index"] pub lastLayer: ::std::os::raw::c_uint, } -#[doc = " CUDA pointer attributes"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct cudaPointerAttributes { - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " The physical location of the memory, ::cudaMemoryTypeHost or"] - #[doc = " ::cudaMemoryTypeDevice. Note that managed memory can return either"] - #[doc = " ::cudaMemoryTypeDevice or ::cudaMemoryTypeHost regardless of it's"] - #[doc = " physical location."] pub memoryType: cudaMemoryType, - #[doc = " The type of memory - ::cudaMemoryTypeUnregistered, ::cudaMemoryTypeHost,"] - #[doc = " ::cudaMemoryTypeDevice or ::cudaMemoryTypeManaged."] pub type_: cudaMemoryType, - #[doc = " The device against which the memory was allocated or registered."] - #[doc = " If the memory type is ::cudaMemoryTypeDevice then this identifies"] - #[doc = " the device on which the memory referred physically resides. If"] - #[doc = " the memory type is ::cudaMemoryTypeHost or::cudaMemoryTypeManaged then"] - #[doc = " this identifies the device which was current when the memory was allocated"] - #[doc = " or registered (and if that device is deinitialized then this allocation"] - #[doc = " will vanish with that device's state)."] pub device: ::std::os::raw::c_int, - #[doc = " The address which may be dereferenced on the current device to access"] - #[doc = " the memory or NULL if no such address exists."] pub devicePointer: *mut ::std::os::raw::c_void, - #[doc = " The address which may be dereferenced on the host to access the"] - #[doc = " memory or NULL if no such address exists."] - #[doc = ""] - #[doc = " \\note CUDA doesn't check if unregistered memory is allocated so this field"] - #[doc = " may contain invalid pointer if an invalid pointer has been passed to CUDA."] pub hostPointer: *mut ::std::os::raw::c_void, - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Indicates if this pointer points to managed memory"] pub isManaged: ::std::os::raw::c_int, } -#[doc = " CUDA function attributes"] #[repr(C)] pub struct cudaFuncAttributes { - #[doc = " The size in bytes of statically-allocated shared memory per block"] - #[doc = " required by this function. This does not include dynamically-allocated"] - #[doc = " shared memory requested by the user at runtime."] pub sharedSizeBytes: usize, - #[doc = " The size in bytes of user-allocated constant memory required by this"] - #[doc = " function."] pub constSizeBytes: usize, - #[doc = " The size in bytes of local memory used by each thread of this function."] pub localSizeBytes: usize, - #[doc = " The maximum number of threads per block, beyond which a launch of the"] - #[doc = " function would fail. This number depends on both the function and the"] - #[doc = " device on which the function is currently loaded."] pub maxThreadsPerBlock: ::std::os::raw::c_int, - #[doc = " The number of registers used by each thread of this function."] pub numRegs: ::std::os::raw::c_int, - #[doc = " The PTX virtual architecture version for which the function was"] - #[doc = " compiled. This value is the major PTX version * 10 + the minor PTX"] - #[doc = " version, so a PTX version 1.3 function would return the value 13."] pub ptxVersion: ::std::os::raw::c_int, - #[doc = " The binary architecture version for which the function was compiled."] - #[doc = " This value is the major binary version * 10 + the minor binary version,"] - #[doc = " so a binary version 1.3 function would return the value 13."] pub binaryVersion: ::std::os::raw::c_int, - #[doc = " The attribute to indicate whether the function has been compiled with"] - #[doc = " user specified option \"-Xptxas --dlcm=ca\" set."] pub cacheModeCA: ::std::os::raw::c_int, - #[doc = " The maximum size in bytes of dynamic shared memory per block for"] - #[doc = " this function. Any launch must have a dynamic shared memory size"] - #[doc = " smaller than this value."] pub maxDynamicSharedSizeBytes: ::std::os::raw::c_int, - #[doc = " On devices where the L1 cache and shared memory use the same hardware resources,"] - #[doc = " this sets the shared memory carveout preference, in percent of the maximum shared memory."] - #[doc = " Refer to ::cudaDevAttrMaxSharedMemoryPerMultiprocessor."] - #[doc = " This is only a hint, and the driver can choose a different ratio if required to execute the function."] - #[doc = " See ::cudaFuncSetAttribute"] pub preferredShmemCarveout: ::std::os::raw::c_int, } -#[doc = "< Maximum dynamic shared memory size"] pub const cudaFuncAttribute_cudaFuncAttributeMaxDynamicSharedMemorySize: cudaFuncAttribute = 8; -#[doc = "< Preferred shared memory-L1 cache split"] pub const cudaFuncAttribute_cudaFuncAttributePreferredSharedMemoryCarveout: cudaFuncAttribute = 9; pub const cudaFuncAttribute_cudaFuncAttributeMax: cudaFuncAttribute = 10; -#[doc = " CUDA function attributes that can be set using ::cudaFuncSetAttribute"] pub type cudaFuncAttribute = u32; -#[doc = "< Default function cache configuration, no preference"] pub const cudaFuncCache_cudaFuncCachePreferNone: cudaFuncCache = 0; -#[doc = "< Prefer larger shared memory and smaller L1 cache"] pub const cudaFuncCache_cudaFuncCachePreferShared: cudaFuncCache = 1; -#[doc = "< Prefer larger L1 cache and smaller shared memory"] pub const cudaFuncCache_cudaFuncCachePreferL1: cudaFuncCache = 2; -#[doc = "< Prefer equal size L1 cache and shared memory"] pub const cudaFuncCache_cudaFuncCachePreferEqual: cudaFuncCache = 3; -#[doc = " CUDA function cache configurations"] pub type cudaFuncCache = u32; pub const cudaSharedMemConfig_cudaSharedMemBankSizeDefault: cudaSharedMemConfig = 0; @@ -12312,631 +3291,357 @@ pub const cudaSharedMemConfig_cudaSharedMemBankSizeFourByte: cudaSharedMemConfig = 1; pub const cudaSharedMemConfig_cudaSharedMemBankSizeEightByte: cudaSharedMemConfig = 2; -#[doc = " CUDA shared memory configuration"] pub type cudaSharedMemConfig = u32; -#[doc = "< No preference for shared memory or L1 (default)"] pub const cudaSharedCarveout_cudaSharedmemCarveoutDefault: cudaSharedCarveout = -1; -#[doc = "< Prefer maximum available shared memory, minimum L1 cache"] pub const cudaSharedCarveout_cudaSharedmemCarveoutMaxShared: cudaSharedCarveout = 100; -#[doc = "< Prefer maximum available L1 cache, minimum shared memory"] pub const cudaSharedCarveout_cudaSharedmemCarveoutMaxL1: cudaSharedCarveout = 0; -#[doc = " Shared memory carveout configurations. These may be passed to cudaFuncSetAttribute"] pub type cudaSharedCarveout = i32; -#[doc = "< Default compute mode (Multiple threads can use ::cudaSetDevice() with this device)"] pub const cudaComputeMode_cudaComputeModeDefault: cudaComputeMode = 0; -#[doc = "< Compute-exclusive-thread mode (Only one thread in one process will be able to use ::cudaSetDevice() with this device)"] pub const cudaComputeMode_cudaComputeModeExclusive: cudaComputeMode = 1; -#[doc = "< Compute-prohibited mode (No threads can use ::cudaSetDevice() with this device)"] pub const cudaComputeMode_cudaComputeModeProhibited: cudaComputeMode = 2; -#[doc = "< Compute-exclusive-process mode (Many threads in one process will be able to use ::cudaSetDevice() with this device)"] pub const cudaComputeMode_cudaComputeModeExclusiveProcess: cudaComputeMode = 3; -#[doc = " CUDA device compute modes"] pub type cudaComputeMode = u32; -#[doc = "< GPU thread stack size"] pub const cudaLimit_cudaLimitStackSize: cudaLimit = 0; -#[doc = "< GPU printf FIFO size"] pub const cudaLimit_cudaLimitPrintfFifoSize: cudaLimit = 1; -#[doc = "< GPU malloc heap size"] pub const cudaLimit_cudaLimitMallocHeapSize: cudaLimit = 2; -#[doc = "< GPU device runtime synchronize depth"] pub const cudaLimit_cudaLimitDevRuntimeSyncDepth: cudaLimit = 3; -#[doc = "< GPU device runtime pending launch count"] pub const cudaLimit_cudaLimitDevRuntimePendingLaunchCount: cudaLimit = 4; -#[doc = "< A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint"] pub const cudaLimit_cudaLimitMaxL2FetchGranularity: cudaLimit = 5; -#[doc = " CUDA Limits"] pub type cudaLimit = u32; -#[doc = "< Data will mostly be read and only occassionally be written to"] pub const cudaMemoryAdvise_cudaMemAdviseSetReadMostly: cudaMemoryAdvise = 1; -#[doc = "< Undo the effect of ::cudaMemAdviseSetReadMostly"] pub const cudaMemoryAdvise_cudaMemAdviseUnsetReadMostly: cudaMemoryAdvise = 2; -#[doc = "< Set the preferred location for the data as the specified device"] pub const cudaMemoryAdvise_cudaMemAdviseSetPreferredLocation: cudaMemoryAdvise = 3; -#[doc = "< Clear the preferred location for the data"] pub const cudaMemoryAdvise_cudaMemAdviseUnsetPreferredLocation: cudaMemoryAdvise = 4; -#[doc = "< Data will be accessed by the specified device, so prevent page faults as much as possible"] pub const cudaMemoryAdvise_cudaMemAdviseSetAccessedBy: cudaMemoryAdvise = 5; -#[doc = "< Let the Unified Memory subsystem decide on the page faulting policy for the specified device"] pub const cudaMemoryAdvise_cudaMemAdviseUnsetAccessedBy: cudaMemoryAdvise = 6; -#[doc = " CUDA Memory Advise values"] pub type cudaMemoryAdvise = u32; -#[doc = "< Whether the range will mostly be read and only occassionally be written to"] pub const cudaMemRangeAttribute_cudaMemRangeAttributeReadMostly: cudaMemRangeAttribute = 1; -#[doc = "< The preferred location of the range"] pub const cudaMemRangeAttribute_cudaMemRangeAttributePreferredLocation: cudaMemRangeAttribute = 2; -#[doc = "< Memory range has ::cudaMemAdviseSetAccessedBy set for specified device"] pub const cudaMemRangeAttribute_cudaMemRangeAttributeAccessedBy: cudaMemRangeAttribute = 3; -#[doc = "< The last location to which the range was prefetched"] pub const cudaMemRangeAttribute_cudaMemRangeAttributeLastPrefetchLocation: cudaMemRangeAttribute = 4; -#[doc = " CUDA range attributes"] pub type cudaMemRangeAttribute = u32; -#[doc = "< Output mode Key-Value pair format."] pub const cudaOutputMode_cudaKeyValuePair: cudaOutputMode = 0; -#[doc = "< Output mode Comma separated values format."] pub const cudaOutputMode_cudaCSV: cudaOutputMode = 1; -#[doc = " CUDA Profiler Output modes"] pub type cudaOutputMode = u32; -#[doc = "< Maximum number of threads per block"] pub const cudaDeviceAttr_cudaDevAttrMaxThreadsPerBlock: cudaDeviceAttr = 1; -#[doc = "< Maximum block dimension X"] pub const cudaDeviceAttr_cudaDevAttrMaxBlockDimX: cudaDeviceAttr = 2; -#[doc = "< Maximum block dimension Y"] pub const cudaDeviceAttr_cudaDevAttrMaxBlockDimY: cudaDeviceAttr = 3; -#[doc = "< Maximum block dimension Z"] pub const cudaDeviceAttr_cudaDevAttrMaxBlockDimZ: cudaDeviceAttr = 4; -#[doc = "< Maximum grid dimension X"] pub const cudaDeviceAttr_cudaDevAttrMaxGridDimX: cudaDeviceAttr = 5; -#[doc = "< Maximum grid dimension Y"] pub const cudaDeviceAttr_cudaDevAttrMaxGridDimY: cudaDeviceAttr = 6; -#[doc = "< Maximum grid dimension Z"] pub const cudaDeviceAttr_cudaDevAttrMaxGridDimZ: cudaDeviceAttr = 7; -#[doc = "< Maximum shared memory available per block in bytes"] pub const cudaDeviceAttr_cudaDevAttrMaxSharedMemoryPerBlock: cudaDeviceAttr = 8; -#[doc = "< Memory available on device for __constant__ variables in a CUDA C kernel in bytes"] pub const cudaDeviceAttr_cudaDevAttrTotalConstantMemory: cudaDeviceAttr = 9; -#[doc = "< Warp size in threads"] pub const cudaDeviceAttr_cudaDevAttrWarpSize: cudaDeviceAttr = 10; -#[doc = "< Maximum pitch in bytes allowed by memory copies"] pub const cudaDeviceAttr_cudaDevAttrMaxPitch: cudaDeviceAttr = 11; -#[doc = "< Maximum number of 32-bit registers available per block"] pub const cudaDeviceAttr_cudaDevAttrMaxRegistersPerBlock: cudaDeviceAttr = 12; -#[doc = "< Peak clock frequency in kilohertz"] pub const cudaDeviceAttr_cudaDevAttrClockRate: cudaDeviceAttr = 13; -#[doc = "< Alignment requirement for textures"] pub const cudaDeviceAttr_cudaDevAttrTextureAlignment: cudaDeviceAttr = 14; -#[doc = "< Device can possibly copy memory and execute a kernel concurrently"] pub const cudaDeviceAttr_cudaDevAttrGpuOverlap: cudaDeviceAttr = 15; -#[doc = "< Number of multiprocessors on device"] pub const cudaDeviceAttr_cudaDevAttrMultiProcessorCount: cudaDeviceAttr = 16; -#[doc = "< Specifies whether there is a run time limit on kernels"] pub const cudaDeviceAttr_cudaDevAttrKernelExecTimeout: cudaDeviceAttr = 17; -#[doc = "< Device is integrated with host memory"] pub const cudaDeviceAttr_cudaDevAttrIntegrated: cudaDeviceAttr = 18; -#[doc = "< Device can map host memory into CUDA address space"] pub const cudaDeviceAttr_cudaDevAttrCanMapHostMemory: cudaDeviceAttr = 19; -#[doc = "< Compute mode (See ::cudaComputeMode for details)"] pub const cudaDeviceAttr_cudaDevAttrComputeMode: cudaDeviceAttr = 20; -#[doc = "< Maximum 1D texture width"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture1DWidth: cudaDeviceAttr = 21; -#[doc = "< Maximum 2D texture width"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture2DWidth: cudaDeviceAttr = 22; -#[doc = "< Maximum 2D texture height"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture2DHeight: cudaDeviceAttr = 23; -#[doc = "< Maximum 3D texture width"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture3DWidth: cudaDeviceAttr = 24; -#[doc = "< Maximum 3D texture height"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture3DHeight: cudaDeviceAttr = 25; -#[doc = "< Maximum 3D texture depth"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture3DDepth: cudaDeviceAttr = 26; -#[doc = "< Maximum 2D layered texture width"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture2DLayeredWidth: cudaDeviceAttr = 27; -#[doc = "< Maximum 2D layered texture height"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture2DLayeredHeight: cudaDeviceAttr = 28; -#[doc = "< Maximum layers in a 2D layered texture"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture2DLayeredLayers: cudaDeviceAttr = 29; -#[doc = "< Alignment requirement for surfaces"] pub const cudaDeviceAttr_cudaDevAttrSurfaceAlignment: cudaDeviceAttr = 30; -#[doc = "< Device can possibly execute multiple kernels concurrently"] pub const cudaDeviceAttr_cudaDevAttrConcurrentKernels: cudaDeviceAttr = 31; -#[doc = "< Device has ECC support enabled"] pub const cudaDeviceAttr_cudaDevAttrEccEnabled: cudaDeviceAttr = 32; -#[doc = "< PCI bus ID of the device"] pub const cudaDeviceAttr_cudaDevAttrPciBusId: cudaDeviceAttr = 33; -#[doc = "< PCI device ID of the device"] pub const cudaDeviceAttr_cudaDevAttrPciDeviceId: cudaDeviceAttr = 34; -#[doc = "< Device is using TCC driver model"] pub const cudaDeviceAttr_cudaDevAttrTccDriver: cudaDeviceAttr = 35; -#[doc = "< Peak memory clock frequency in kilohertz"] pub const cudaDeviceAttr_cudaDevAttrMemoryClockRate: cudaDeviceAttr = 36; -#[doc = "< Global memory bus width in bits"] pub const cudaDeviceAttr_cudaDevAttrGlobalMemoryBusWidth: cudaDeviceAttr = 37; -#[doc = "< Size of L2 cache in bytes"] pub const cudaDeviceAttr_cudaDevAttrL2CacheSize: cudaDeviceAttr = 38; -#[doc = "< Maximum resident threads per multiprocessor"] pub const cudaDeviceAttr_cudaDevAttrMaxThreadsPerMultiProcessor: cudaDeviceAttr = 39; -#[doc = "< Number of asynchronous engines"] pub const cudaDeviceAttr_cudaDevAttrAsyncEngineCount: cudaDeviceAttr = 40; -#[doc = "< Device shares a unified address space with the host"] pub const cudaDeviceAttr_cudaDevAttrUnifiedAddressing: cudaDeviceAttr = 41; -#[doc = "< Maximum 1D layered texture width"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture1DLayeredWidth: cudaDeviceAttr = 42; -#[doc = "< Maximum layers in a 1D layered texture"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture1DLayeredLayers: cudaDeviceAttr = 43; -#[doc = "< Maximum 2D texture width if cudaArrayTextureGather is set"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture2DGatherWidth: cudaDeviceAttr = 45; -#[doc = "< Maximum 2D texture height if cudaArrayTextureGather is set"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture2DGatherHeight: cudaDeviceAttr = 46; -#[doc = "< Alternate maximum 3D texture width"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture3DWidthAlt: cudaDeviceAttr = 47; -#[doc = "< Alternate maximum 3D texture height"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture3DHeightAlt: cudaDeviceAttr = 48; -#[doc = "< Alternate maximum 3D texture depth"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture3DDepthAlt: cudaDeviceAttr = 49; -#[doc = "< PCI domain ID of the device"] pub const cudaDeviceAttr_cudaDevAttrPciDomainId: cudaDeviceAttr = 50; -#[doc = "< Pitch alignment requirement for textures"] pub const cudaDeviceAttr_cudaDevAttrTexturePitchAlignment: cudaDeviceAttr = 51; -#[doc = "< Maximum cubemap texture width/height"] pub const cudaDeviceAttr_cudaDevAttrMaxTextureCubemapWidth: cudaDeviceAttr = 52; -#[doc = "< Maximum cubemap layered texture width/height"] pub const cudaDeviceAttr_cudaDevAttrMaxTextureCubemapLayeredWidth: cudaDeviceAttr = 53; -#[doc = "< Maximum layers in a cubemap layered texture"] pub const cudaDeviceAttr_cudaDevAttrMaxTextureCubemapLayeredLayers: cudaDeviceAttr = 54; -#[doc = "< Maximum 1D surface width"] pub const cudaDeviceAttr_cudaDevAttrMaxSurface1DWidth: cudaDeviceAttr = 55; -#[doc = "< Maximum 2D surface width"] pub const cudaDeviceAttr_cudaDevAttrMaxSurface2DWidth: cudaDeviceAttr = 56; -#[doc = "< Maximum 2D surface height"] pub const cudaDeviceAttr_cudaDevAttrMaxSurface2DHeight: cudaDeviceAttr = 57; -#[doc = "< Maximum 3D surface width"] pub const cudaDeviceAttr_cudaDevAttrMaxSurface3DWidth: cudaDeviceAttr = 58; -#[doc = "< Maximum 3D surface height"] pub const cudaDeviceAttr_cudaDevAttrMaxSurface3DHeight: cudaDeviceAttr = 59; -#[doc = "< Maximum 3D surface depth"] pub const cudaDeviceAttr_cudaDevAttrMaxSurface3DDepth: cudaDeviceAttr = 60; -#[doc = "< Maximum 1D layered surface width"] pub const cudaDeviceAttr_cudaDevAttrMaxSurface1DLayeredWidth: cudaDeviceAttr = 61; -#[doc = "< Maximum layers in a 1D layered surface"] pub const cudaDeviceAttr_cudaDevAttrMaxSurface1DLayeredLayers: cudaDeviceAttr = 62; -#[doc = "< Maximum 2D layered surface width"] pub const cudaDeviceAttr_cudaDevAttrMaxSurface2DLayeredWidth: cudaDeviceAttr = 63; -#[doc = "< Maximum 2D layered surface height"] pub const cudaDeviceAttr_cudaDevAttrMaxSurface2DLayeredHeight: cudaDeviceAttr = 64; -#[doc = "< Maximum layers in a 2D layered surface"] pub const cudaDeviceAttr_cudaDevAttrMaxSurface2DLayeredLayers: cudaDeviceAttr = 65; -#[doc = "< Maximum cubemap surface width"] pub const cudaDeviceAttr_cudaDevAttrMaxSurfaceCubemapWidth: cudaDeviceAttr = 66; -#[doc = "< Maximum cubemap layered surface width"] pub const cudaDeviceAttr_cudaDevAttrMaxSurfaceCubemapLayeredWidth: cudaDeviceAttr = 67; -#[doc = "< Maximum layers in a cubemap layered surface"] pub const cudaDeviceAttr_cudaDevAttrMaxSurfaceCubemapLayeredLayers: cudaDeviceAttr = 68; -#[doc = "< Maximum 1D linear texture width"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture1DLinearWidth: cudaDeviceAttr = 69; -#[doc = "< Maximum 2D linear texture width"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture2DLinearWidth: cudaDeviceAttr = 70; -#[doc = "< Maximum 2D linear texture height"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture2DLinearHeight: cudaDeviceAttr = 71; -#[doc = "< Maximum 2D linear texture pitch in bytes"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture2DLinearPitch: cudaDeviceAttr = 72; -#[doc = "< Maximum mipmapped 2D texture width"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture2DMipmappedWidth: cudaDeviceAttr = 73; -#[doc = "< Maximum mipmapped 2D texture height"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture2DMipmappedHeight: cudaDeviceAttr = 74; -#[doc = "< Major compute capability version number"] pub const cudaDeviceAttr_cudaDevAttrComputeCapabilityMajor: cudaDeviceAttr = 75; -#[doc = "< Minor compute capability version number"] pub const cudaDeviceAttr_cudaDevAttrComputeCapabilityMinor: cudaDeviceAttr = 76; -#[doc = "< Maximum mipmapped 1D texture width"] pub const cudaDeviceAttr_cudaDevAttrMaxTexture1DMipmappedWidth: cudaDeviceAttr = 77; -#[doc = "< Device supports stream priorities"] pub const cudaDeviceAttr_cudaDevAttrStreamPrioritiesSupported: cudaDeviceAttr = 78; -#[doc = "< Device supports caching globals in L1"] pub const cudaDeviceAttr_cudaDevAttrGlobalL1CacheSupported: cudaDeviceAttr = 79; -#[doc = "< Device supports caching locals in L1"] pub const cudaDeviceAttr_cudaDevAttrLocalL1CacheSupported: cudaDeviceAttr = 80; -#[doc = "< Maximum shared memory available per multiprocessor in bytes"] pub const cudaDeviceAttr_cudaDevAttrMaxSharedMemoryPerMultiprocessor: cudaDeviceAttr = 81; -#[doc = "< Maximum number of 32-bit registers available per multiprocessor"] pub const cudaDeviceAttr_cudaDevAttrMaxRegistersPerMultiprocessor: cudaDeviceAttr = 82; -#[doc = "< Device can allocate managed memory on this system"] pub const cudaDeviceAttr_cudaDevAttrManagedMemory: cudaDeviceAttr = 83; -#[doc = "< Device is on a multi-GPU board"] pub const cudaDeviceAttr_cudaDevAttrIsMultiGpuBoard: cudaDeviceAttr = 84; -#[doc = "< Unique identifier for a group of devices on the same multi-GPU board"] pub const cudaDeviceAttr_cudaDevAttrMultiGpuBoardGroupID: cudaDeviceAttr = 85; -#[doc = "< Link between the device and the host supports native atomic operations"] pub const cudaDeviceAttr_cudaDevAttrHostNativeAtomicSupported: cudaDeviceAttr = 86; -#[doc = "< Ratio of single precision performance (in floating-point operations per second) to double precision performance"] pub const cudaDeviceAttr_cudaDevAttrSingleToDoublePrecisionPerfRatio: cudaDeviceAttr = 87; -#[doc = "< Device supports coherently accessing pageable memory without calling cudaHostRegister on it"] pub const cudaDeviceAttr_cudaDevAttrPageableMemoryAccess: cudaDeviceAttr = 88; -#[doc = "< Device can coherently access managed memory concurrently with the CPU"] pub const cudaDeviceAttr_cudaDevAttrConcurrentManagedAccess: cudaDeviceAttr = 89; -#[doc = "< Device supports Compute Preemption"] pub const cudaDeviceAttr_cudaDevAttrComputePreemptionSupported: cudaDeviceAttr = 90; -#[doc = "< Device can access host registered memory at the same virtual address as the CPU"] pub const cudaDeviceAttr_cudaDevAttrCanUseHostPointerForRegisteredMem: cudaDeviceAttr = 91; pub const cudaDeviceAttr_cudaDevAttrReserved92: cudaDeviceAttr = 92; pub const cudaDeviceAttr_cudaDevAttrReserved93: cudaDeviceAttr = 93; pub const cudaDeviceAttr_cudaDevAttrReserved94: cudaDeviceAttr = 94; -#[doc = "< Device supports launching cooperative kernels via ::cudaLaunchCooperativeKernel"] pub const cudaDeviceAttr_cudaDevAttrCooperativeLaunch: cudaDeviceAttr = 95; -#[doc = "< Device can participate in cooperative kernels launched via ::cudaLaunchCooperativeKernelMultiDevice"] pub const cudaDeviceAttr_cudaDevAttrCooperativeMultiDeviceLaunch: cudaDeviceAttr = 96; -#[doc = "< The maximum optin shared memory per block. This value may vary by chip. See ::cudaFuncSetAttribute"] pub const cudaDeviceAttr_cudaDevAttrMaxSharedMemoryPerBlockOptin: cudaDeviceAttr = 97; -#[doc = "< Device supports flushing of outstanding remote writes."] pub const cudaDeviceAttr_cudaDevAttrCanFlushRemoteWrites: cudaDeviceAttr = 98; -#[doc = "< Device supports host memory registration via ::cudaHostRegister."] pub const cudaDeviceAttr_cudaDevAttrHostRegisterSupported: cudaDeviceAttr = 99; -#[doc = "< Device accesses pageable memory via the host's page tables."] pub const cudaDeviceAttr_cudaDevAttrPageableMemoryAccessUsesHostPageTables: cudaDeviceAttr = 100; -#[doc = "< Host can directly access managed memory on the device without migration."] pub const cudaDeviceAttr_cudaDevAttrDirectManagedMemAccessFromHost: cudaDeviceAttr = 101; -#[doc = " CUDA device attributes"] pub type cudaDeviceAttr = u32; -#[doc = "< A relative value indicating the performance of the link between two devices"] pub const cudaDeviceP2PAttr_cudaDevP2PAttrPerformanceRank: cudaDeviceP2PAttr = 1; -#[doc = "< Peer access is enabled"] pub const cudaDeviceP2PAttr_cudaDevP2PAttrAccessSupported: cudaDeviceP2PAttr = 2; -#[doc = "< Native atomic operation over the link supported"] pub const cudaDeviceP2PAttr_cudaDevP2PAttrNativeAtomicSupported: cudaDeviceP2PAttr = 3; -#[doc = "< Accessing CUDA arrays over the link supported"] pub const cudaDeviceP2PAttr_cudaDevP2PAttrCudaArrayAccessSupported: cudaDeviceP2PAttr = 4; -#[doc = " CUDA device P2P attributes"] pub type cudaDeviceP2PAttr = u32; pub type cudaUUID_t = CUuuid_st; -#[doc = " CUDA device properties"] #[repr(C)] pub struct cudaDeviceProp { - #[doc = "< ASCII string identifying device"] pub name: [::std::os::raw::c_char; 256usize], - #[doc = "< 16-byte unique identifier"] pub uuid: cudaUUID_t, - #[doc = "< 8-byte locally unique identifier. Value is undefined on TCC and non-Windows platforms"] pub luid: [::std::os::raw::c_char; 8usize], - #[doc = "< LUID device node mask. Value is undefined on TCC and non-Windows platforms"] pub luidDeviceNodeMask: ::std::os::raw::c_uint, - #[doc = "< Global memory available on device in bytes"] pub totalGlobalMem: usize, - #[doc = "< Shared memory available per block in bytes"] pub sharedMemPerBlock: usize, - #[doc = "< 32-bit registers available per block"] pub regsPerBlock: ::std::os::raw::c_int, - #[doc = "< Warp size in threads"] pub warpSize: ::std::os::raw::c_int, - #[doc = "< Maximum pitch in bytes allowed by memory copies"] pub memPitch: usize, - #[doc = "< Maximum number of threads per block"] pub maxThreadsPerBlock: ::std::os::raw::c_int, - #[doc = "< Maximum size of each dimension of a block"] pub maxThreadsDim: [::std::os::raw::c_int; 3usize], - #[doc = "< Maximum size of each dimension of a grid"] pub maxGridSize: [::std::os::raw::c_int; 3usize], - #[doc = "< Clock frequency in kilohertz"] pub clockRate: ::std::os::raw::c_int, - #[doc = "< Constant memory available on device in bytes"] pub totalConstMem: usize, - #[doc = "< Major compute capability"] pub major: ::std::os::raw::c_int, - #[doc = "< Minor compute capability"] pub minor: ::std::os::raw::c_int, - #[doc = "< Alignment requirement for textures"] pub textureAlignment: usize, - #[doc = "< Pitch alignment requirement for texture references bound to pitched memory"] pub texturePitchAlignment: usize, - #[doc = "< Device can concurrently copy memory and execute a kernel. Deprecated. Use instead asyncEngineCount."] pub deviceOverlap: ::std::os::raw::c_int, - #[doc = "< Number of multiprocessors on device"] pub multiProcessorCount: ::std::os::raw::c_int, - #[doc = "< Specified whether there is a run time limit on kernels"] pub kernelExecTimeoutEnabled: ::std::os::raw::c_int, - #[doc = "< Device is integrated as opposed to discrete"] pub integrated: ::std::os::raw::c_int, - #[doc = "< Device can map host memory with cudaHostAlloc/cudaHostGetDevicePointer"] pub canMapHostMemory: ::std::os::raw::c_int, - #[doc = "< Compute mode (See ::cudaComputeMode)"] pub computeMode: ::std::os::raw::c_int, - #[doc = "< Maximum 1D texture size"] pub maxTexture1D: ::std::os::raw::c_int, - #[doc = "< Maximum 1D mipmapped texture size"] pub maxTexture1DMipmap: ::std::os::raw::c_int, - #[doc = "< Maximum size for 1D textures bound to linear memory"] pub maxTexture1DLinear: ::std::os::raw::c_int, - #[doc = "< Maximum 2D texture dimensions"] pub maxTexture2D: [::std::os::raw::c_int; 2usize], - #[doc = "< Maximum 2D mipmapped texture dimensions"] pub maxTexture2DMipmap: [::std::os::raw::c_int; 2usize], - #[doc = "< Maximum dimensions (width, height, pitch) for 2D textures bound to pitched memory"] pub maxTexture2DLinear: [::std::os::raw::c_int; 3usize], - #[doc = "< Maximum 2D texture dimensions if texture gather operations have to be performed"] pub maxTexture2DGather: [::std::os::raw::c_int; 2usize], - #[doc = "< Maximum 3D texture dimensions"] pub maxTexture3D: [::std::os::raw::c_int; 3usize], - #[doc = "< Maximum alternate 3D texture dimensions"] pub maxTexture3DAlt: [::std::os::raw::c_int; 3usize], - #[doc = "< Maximum Cubemap texture dimensions"] pub maxTextureCubemap: ::std::os::raw::c_int, - #[doc = "< Maximum 1D layered texture dimensions"] pub maxTexture1DLayered: [::std::os::raw::c_int; 2usize], - #[doc = "< Maximum 2D layered texture dimensions"] pub maxTexture2DLayered: [::std::os::raw::c_int; 3usize], - #[doc = "< Maximum Cubemap layered texture dimensions"] pub maxTextureCubemapLayered: [::std::os::raw::c_int; 2usize], - #[doc = "< Maximum 1D surface size"] pub maxSurface1D: ::std::os::raw::c_int, - #[doc = "< Maximum 2D surface dimensions"] pub maxSurface2D: [::std::os::raw::c_int; 2usize], - #[doc = "< Maximum 3D surface dimensions"] pub maxSurface3D: [::std::os::raw::c_int; 3usize], - #[doc = "< Maximum 1D layered surface dimensions"] pub maxSurface1DLayered: [::std::os::raw::c_int; 2usize], - #[doc = "< Maximum 2D layered surface dimensions"] pub maxSurface2DLayered: [::std::os::raw::c_int; 3usize], - #[doc = "< Maximum Cubemap surface dimensions"] pub maxSurfaceCubemap: ::std::os::raw::c_int, - #[doc = "< Maximum Cubemap layered surface dimensions"] pub maxSurfaceCubemapLayered: [::std::os::raw::c_int; 2usize], - #[doc = "< Alignment requirements for surfaces"] pub surfaceAlignment: usize, - #[doc = "< Device can possibly execute multiple kernels concurrently"] pub concurrentKernels: ::std::os::raw::c_int, - #[doc = "< Device has ECC support enabled"] pub ECCEnabled: ::std::os::raw::c_int, - #[doc = "< PCI bus ID of the device"] pub pciBusID: ::std::os::raw::c_int, - #[doc = "< PCI device ID of the device"] pub pciDeviceID: ::std::os::raw::c_int, - #[doc = "< PCI domain ID of the device"] pub pciDomainID: ::std::os::raw::c_int, - #[doc = "< 1 if device is a Tesla device using TCC driver, 0 otherwise"] pub tccDriver: ::std::os::raw::c_int, - #[doc = "< Number of asynchronous engines"] pub asyncEngineCount: ::std::os::raw::c_int, - #[doc = "< Device shares a unified address space with the host"] pub unifiedAddressing: ::std::os::raw::c_int, - #[doc = "< Peak memory clock frequency in kilohertz"] pub memoryClockRate: ::std::os::raw::c_int, - #[doc = "< Global memory bus width in bits"] pub memoryBusWidth: ::std::os::raw::c_int, - #[doc = "< Size of L2 cache in bytes"] pub l2CacheSize: ::std::os::raw::c_int, - #[doc = "< Maximum resident threads per multiprocessor"] pub maxThreadsPerMultiProcessor: ::std::os::raw::c_int, - #[doc = "< Device supports stream priorities"] pub streamPrioritiesSupported: ::std::os::raw::c_int, - #[doc = "< Device supports caching globals in L1"] pub globalL1CacheSupported: ::std::os::raw::c_int, - #[doc = "< Device supports caching locals in L1"] pub localL1CacheSupported: ::std::os::raw::c_int, - #[doc = "< Shared memory available per multiprocessor in bytes"] pub sharedMemPerMultiprocessor: usize, - #[doc = "< 32-bit registers available per multiprocessor"] pub regsPerMultiprocessor: ::std::os::raw::c_int, - #[doc = "< Device supports allocating managed memory on this system"] pub managedMemory: ::std::os::raw::c_int, - #[doc = "< Device is on a multi-GPU board"] pub isMultiGpuBoard: ::std::os::raw::c_int, - #[doc = "< Unique identifier for a group of devices on the same multi-GPU board"] pub multiGpuBoardGroupID: ::std::os::raw::c_int, - #[doc = "< Link between the device and the host supports native atomic operations"] pub hostNativeAtomicSupported: ::std::os::raw::c_int, - #[doc = "< Ratio of single precision performance (in floating-point operations per second) to double precision performance"] pub singleToDoublePrecisionPerfRatio: ::std::os::raw::c_int, - #[doc = "< Device supports coherently accessing pageable memory without calling cudaHostRegister on it"] pub pageableMemoryAccess: ::std::os::raw::c_int, - #[doc = "< Device can coherently access managed memory concurrently with the CPU"] pub concurrentManagedAccess: ::std::os::raw::c_int, - #[doc = "< Device supports Compute Preemption"] pub computePreemptionSupported: ::std::os::raw::c_int, - #[doc = "< Device can access host registered memory at the same virtual address as the CPU"] pub canUseHostPointerForRegisteredMem: ::std::os::raw::c_int, - #[doc = "< Device supports launching cooperative kernels via ::cudaLaunchCooperativeKernel"] pub cooperativeLaunch: ::std::os::raw::c_int, - #[doc = "< Device can participate in cooperative kernels launched via ::cudaLaunchCooperativeKernelMultiDevice"] pub cooperativeMultiDeviceLaunch: ::std::os::raw::c_int, - #[doc = "< Per device maximum shared memory per block usable by special opt in"] pub sharedMemPerBlockOptin: usize, - #[doc = "< Device accesses pageable memory via the host's page tables"] pub pageableMemoryAccessUsesHostPageTables: ::std::os::raw::c_int, - #[doc = "< Host can directly access managed memory on the device without migration."] pub directManagedMemAccessFromHost: ::std::os::raw::c_int, } -#[doc = " CUDA IPC event handle"] #[repr(C)] #[derive(Copy, Clone)] pub struct cudaIpcEventHandle_st { pub reserved: [::std::os::raw::c_char; 64usize], } pub type cudaIpcEventHandle_t = cudaIpcEventHandle_st; -#[doc = " CUDA IPC memory handle"] #[repr(C)] #[derive(Copy, Clone)] pub struct cudaIpcMemHandle_st { pub reserved: [::std::os::raw::c_char; 64usize], } pub type cudaIpcMemHandle_t = cudaIpcMemHandle_st; -#[doc = " Handle is an opaque file descriptor"] pub const cudaExternalMemoryHandleType_cudaExternalMemoryHandleTypeOpaqueFd: cudaExternalMemoryHandleType = 1; -#[doc = " Handle is an opaque shared NT handle"] pub const cudaExternalMemoryHandleType_cudaExternalMemoryHandleTypeOpaqueWin32 : cudaExternalMemoryHandleType = 2 ; -#[doc = " Handle is an opaque, globally shared handle"] pub const cudaExternalMemoryHandleType_cudaExternalMemoryHandleTypeOpaqueWin32Kmt : cudaExternalMemoryHandleType = 3 ; -#[doc = " Handle is a D3D12 heap object"] pub const cudaExternalMemoryHandleType_cudaExternalMemoryHandleTypeD3D12Heap: cudaExternalMemoryHandleType = 4; -#[doc = " Handle is a D3D12 committed resource"] pub const cudaExternalMemoryHandleType_cudaExternalMemoryHandleTypeD3D12Resource : cudaExternalMemoryHandleType = 5 ; -#[doc = " External memory handle types"] pub type cudaExternalMemoryHandleType = u32; -#[doc = " External memory handle descriptor"] #[repr(C)] #[derive(Copy, Clone)] pub struct cudaExternalMemoryHandleDesc { - #[doc = " Type of the handle"] pub type_: cudaExternalMemoryHandleType, pub handle: cudaExternalMemoryHandleDesc__bindgen_ty_1, - #[doc = " Size of the memory allocation"] pub size: ::std::os::raw::c_ulonglong, - #[doc = " Flags must either be zero or ::cudaExternalMemoryDedicated"] pub flags: ::std::os::raw::c_uint, } #[repr(C)] #[derive(Copy, Clone)] pub union cudaExternalMemoryHandleDesc__bindgen_ty_1 { - #[doc = " File descriptor referencing the memory object. Valid"] - #[doc = " when type is"] - #[doc = " ::cudaExternalMemoryHandleTypeOpaqueFd"] pub fd: ::std::os::raw::c_int, pub win32: cudaExternalMemoryHandleDesc__bindgen_ty_1__bindgen_ty_1, _bindgen_union_align: [u64; 2usize], } -#[doc = " Win32 handle referencing the semaphore object. Valid when"] -#[doc = " type is one of the following:"] -#[doc = " - ::cudaExternalMemoryHandleTypeOpaqueWin32"] -#[doc = " - ::cudaExternalMemoryHandleTypeOpaqueWin32Kmt"] -#[doc = " - ::cudaExternalMemoryHandleTypeD3D12Heap"] -#[doc = " - ::cudaExternalMemoryHandleTypeD3D12Resource"] -#[doc = " Exactly one of 'handle' and 'name' must be non-NULL. If"] -#[doc = " type is ::cudaExternalMemoryHandleTypeOpaqueWin32Kmt"] -#[doc = " then 'name' must be NULL."] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct cudaExternalMemoryHandleDesc__bindgen_ty_1__bindgen_ty_1 { - #[doc = " Valid NT handle. Must be NULL if 'name' is non-NULL"] pub handle: *mut ::std::os::raw::c_void, - #[doc = " Name of a valid memory object."] - #[doc = " Must be NULL if 'handle' is non-NULL."] pub name: *const ::std::os::raw::c_void, } -#[doc = " External memory buffer descriptor"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct cudaExternalMemoryBufferDesc { - #[doc = " Offset into the memory object where the buffer's base is"] pub offset: ::std::os::raw::c_ulonglong, - #[doc = " Size of the buffer"] pub size: ::std::os::raw::c_ulonglong, - #[doc = " Flags reserved for future use. Must be zero."] pub flags: ::std::os::raw::c_uint, } -#[doc = " External memory mipmap descriptor"] #[repr(C)] pub struct cudaExternalMemoryMipmappedArrayDesc { - #[doc = " Offset into the memory object where the base level of the"] - #[doc = " mipmap chain is."] pub offset: ::std::os::raw::c_ulonglong, - #[doc = " Format of base level of the mipmap chain"] pub formatDesc: cudaChannelFormatDesc, - #[doc = " Dimensions of base level of the mipmap chain"] pub extent: cudaExtent, - #[doc = " Flags associated with CUDA mipmapped arrays."] - #[doc = " See ::cudaMallocMipmappedArray"] pub flags: ::std::os::raw::c_uint, - #[doc = " Total number of levels in the mipmap chain"] pub numLevels: ::std::os::raw::c_uint, } -#[doc = " Handle is an opaque file descriptor"] pub const cudaExternalSemaphoreHandleType_cudaExternalSemaphoreHandleTypeOpaqueFd : cudaExternalSemaphoreHandleType = 1 ; -#[doc = " Handle is an opaque shared NT handle"] pub const cudaExternalSemaphoreHandleType_cudaExternalSemaphoreHandleTypeOpaqueWin32 : cudaExternalSemaphoreHandleType = 2 ; -#[doc = " Handle is an opaque, globally shared handle"] pub const cudaExternalSemaphoreHandleType_cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt : cudaExternalSemaphoreHandleType = 3 ; -#[doc = " Handle is a shared NT handle referencing a D3D12 fence object"] pub const cudaExternalSemaphoreHandleType_cudaExternalSemaphoreHandleTypeD3D12Fence : cudaExternalSemaphoreHandleType = 4 ; -#[doc = " External semaphore handle types"] pub type cudaExternalSemaphoreHandleType = u32; -#[doc = " External semaphore handle descriptor"] #[repr(C)] #[derive(Copy, Clone)] pub struct cudaExternalSemaphoreHandleDesc { - #[doc = " Type of the handle"] pub type_: cudaExternalSemaphoreHandleType, pub handle: cudaExternalSemaphoreHandleDesc__bindgen_ty_1, - #[doc = " Flags reserved for the future. Must be zero."] pub flags: ::std::os::raw::c_uint, } #[repr(C)] #[derive(Copy, Clone)] pub union cudaExternalSemaphoreHandleDesc__bindgen_ty_1 { - #[doc = " File descriptor referencing the semaphore object. Valid"] - #[doc = " when type is ::cudaExternalSemaphoreHandleTypeOpaqueFd"] pub fd: ::std::os::raw::c_int, pub win32: cudaExternalSemaphoreHandleDesc__bindgen_ty_1__bindgen_ty_1, _bindgen_union_align: [u64; 2usize], } -#[doc = " Win32 handle referencing the semaphore object. Valid when"] -#[doc = " type is one of the following:"] -#[doc = " - ::cudaExternalSemaphoreHandleTypeOpaqueWin32"] -#[doc = " - ::cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt"] -#[doc = " - ::cudaExternalSemaphoreHandleTypeD3D12Fence"] -#[doc = " Exactly one of 'handle' and 'name' must be non-NULL. If"] -#[doc = " type is ::cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt"] -#[doc = " then 'name' must be NULL."] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct cudaExternalSemaphoreHandleDesc__bindgen_ty_1__bindgen_ty_1 { - #[doc = " Valid NT handle. Must be NULL if 'name' is non-NULL"] pub handle: *mut ::std::os::raw::c_void, - #[doc = " Name of a valid synchronization primitive."] - #[doc = " Must be NULL if 'handle' is non-NULL."] pub name: *const ::std::os::raw::c_void, } -#[doc = " External semaphore signal parameters"] #[repr(C)] #[derive(Copy, Clone)] pub struct cudaExternalSemaphoreSignalParams { pub params: cudaExternalSemaphoreSignalParams__bindgen_ty_1, - #[doc = " Flags reserved for the future. Must be zero."] pub flags: ::std::os::raw::c_uint, } #[repr(C)] @@ -12945,19 +3650,15 @@ pub union cudaExternalSemaphoreSignalParams__bindgen_ty_1 { pub fence: cudaExternalSemaphoreSignalParams__bindgen_ty_1__bindgen_ty_1, _bindgen_union_align: u64, } -#[doc = " Parameters for fence objects"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct cudaExternalSemaphoreSignalParams__bindgen_ty_1__bindgen_ty_1 { - #[doc = " Value of fence to be signaled"] pub value: ::std::os::raw::c_ulonglong, } -#[doc = " External semaphore wait parameters"] #[repr(C)] #[derive(Copy, Clone)] pub struct cudaExternalSemaphoreWaitParams { pub params: cudaExternalSemaphoreWaitParams__bindgen_ty_1, - #[doc = " Flags reserved for the future. Must be zero."] pub flags: ::std::os::raw::c_uint, } #[repr(C)] @@ -12966,617 +3667,171 @@ pub union cudaExternalSemaphoreWaitParams__bindgen_ty_1 { pub fence: cudaExternalSemaphoreWaitParams__bindgen_ty_1__bindgen_ty_1, _bindgen_union_align: u64, } -#[doc = " Parameters for fence objects"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct cudaExternalSemaphoreWaitParams__bindgen_ty_1__bindgen_ty_1 { - #[doc = " Value of fence to be waited on"] pub value: ::std::os::raw::c_ulonglong, } -#[doc = " CUDA Error types"] pub use self::cudaError::Type as cudaError_t; -#[doc = " CUDA stream"] pub type cudaStream_t = *mut CUstream_st; -#[doc = " CUDA event types"] pub type cudaEvent_t = *mut CUevent_st; -#[doc = " CUDA graphics resource types"] pub type cudaGraphicsResource_t = *mut cudaGraphicsResource; -#[doc = " CUDA output file modes"] pub use self::cudaOutputMode as cudaOutputMode_t; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct CUexternalMemory_st { _unused: [u8; 0], } -#[doc = " CUDA external memory"] pub type cudaExternalMemory_t = *mut CUexternalMemory_st; #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct CUexternalSemaphore_st { _unused: [u8; 0], } -#[doc = " CUDA external semaphore"] pub type cudaExternalSemaphore_t = *mut CUexternalSemaphore_st; -#[doc = " CUDA graph"] pub type cudaGraph_t = *mut CUgraph_st; -#[doc = " CUDA graph node."] pub type cudaGraphNode_t = *mut CUgraphNode_st; -#[doc = "< Invalid cooperative group scope"] pub const cudaCGScope_cudaCGScopeInvalid: cudaCGScope = 0; -#[doc = "< Scope represented by a grid_group"] pub const cudaCGScope_cudaCGScopeGrid: cudaCGScope = 1; -#[doc = "< Scope represented by a multi_grid_group"] pub const cudaCGScope_cudaCGScopeMultiGrid: cudaCGScope = 2; -#[doc = " CUDA cooperative group scope"] pub type cudaCGScope = u32; -#[doc = " CUDA launch parameters"] #[repr(C)] pub struct cudaLaunchParams { - #[doc = "< Device function symbol"] pub func: *mut ::std::os::raw::c_void, - #[doc = "< Grid dimentions"] pub gridDim: dim3, - #[doc = "< Block dimentions"] pub blockDim: dim3, - #[doc = "< Arguments"] pub args: *mut *mut ::std::os::raw::c_void, - #[doc = "< Shared memory"] pub sharedMem: usize, - #[doc = "< Stream identifier"] pub stream: cudaStream_t, } -#[doc = " CUDA GPU kernel node parameters"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct cudaKernelNodeParams { - #[doc = "< Kernel to launch"] pub func: *mut ::std::os::raw::c_void, - #[doc = "< Grid dimensions"] pub gridDim: dim3, - #[doc = "< Block dimensions"] pub blockDim: dim3, - #[doc = "< Dynamic shared-memory size per thread block in bytes"] pub sharedMemBytes: ::std::os::raw::c_uint, - #[doc = "< Array of pointers to individual kernel arguments"] pub kernelParams: *mut *mut ::std::os::raw::c_void, - #[doc = "< Pointer to kernel arguments in the \"extra\" format"] pub extra: *mut *mut ::std::os::raw::c_void, } -#[doc = "< GPU kernel node"] pub const cudaGraphNodeType_cudaGraphNodeTypeKernel: cudaGraphNodeType = 0; -#[doc = "< Memcpy node"] pub const cudaGraphNodeType_cudaGraphNodeTypeMemcpy: cudaGraphNodeType = 1; -#[doc = "< Memset node"] pub const cudaGraphNodeType_cudaGraphNodeTypeMemset: cudaGraphNodeType = 2; -#[doc = "< Host (executable) node"] pub const cudaGraphNodeType_cudaGraphNodeTypeHost: cudaGraphNodeType = 3; -#[doc = "< Node which executes an embedded graph"] pub const cudaGraphNodeType_cudaGraphNodeTypeGraph: cudaGraphNodeType = 4; -#[doc = "< Empty (no-op) node"] pub const cudaGraphNodeType_cudaGraphNodeTypeEmpty: cudaGraphNodeType = 5; pub const cudaGraphNodeType_cudaGraphNodeTypeCount: cudaGraphNodeType = 6; -#[doc = " CUDA Graph node types"] pub type cudaGraphNodeType = u32; -#[doc = " CUDA executable (launchable) graph"] pub type cudaGraphExec_t = *mut CUgraphExec_st; -#[doc = "< Zero boundary mode"] pub const cudaSurfaceBoundaryMode_cudaBoundaryModeZero: cudaSurfaceBoundaryMode = 0; -#[doc = "< Clamp boundary mode"] pub const cudaSurfaceBoundaryMode_cudaBoundaryModeClamp: cudaSurfaceBoundaryMode = 1; -#[doc = "< Trap boundary mode"] pub const cudaSurfaceBoundaryMode_cudaBoundaryModeTrap: cudaSurfaceBoundaryMode = 2; -#[doc = " CUDA Surface boundary modes"] pub type cudaSurfaceBoundaryMode = u32; -#[doc = "< Forced format mode"] pub const cudaSurfaceFormatMode_cudaFormatModeForced: cudaSurfaceFormatMode = 0; -#[doc = "< Auto format mode"] pub const cudaSurfaceFormatMode_cudaFormatModeAuto: cudaSurfaceFormatMode = 1; -#[doc = " CUDA Surface format modes"] pub type cudaSurfaceFormatMode = u32; -#[doc = " CUDA Surface reference"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct surfaceReference { - #[doc = " Channel descriptor for surface reference"] pub channelDesc: cudaChannelFormatDesc, } -#[doc = " An opaque value that represents a CUDA Surface object"] pub type cudaSurfaceObject_t = ::std::os::raw::c_ulonglong; -#[doc = "< Wrapping address mode"] pub const cudaTextureAddressMode_cudaAddressModeWrap: cudaTextureAddressMode = 0; -#[doc = "< Clamp to edge address mode"] pub const cudaTextureAddressMode_cudaAddressModeClamp: cudaTextureAddressMode = 1; -#[doc = "< Mirror address mode"] pub const cudaTextureAddressMode_cudaAddressModeMirror: cudaTextureAddressMode = 2; -#[doc = "< Border address mode"] pub const cudaTextureAddressMode_cudaAddressModeBorder: cudaTextureAddressMode = 3; -#[doc = " CUDA texture address modes"] pub type cudaTextureAddressMode = u32; -#[doc = "< Point filter mode"] pub const cudaTextureFilterMode_cudaFilterModePoint: cudaTextureFilterMode = 0; -#[doc = "< Linear filter mode"] pub const cudaTextureFilterMode_cudaFilterModeLinear: cudaTextureFilterMode = 1; -#[doc = " CUDA texture filter modes"] pub type cudaTextureFilterMode = u32; -#[doc = "< Read texture as specified element type"] pub const cudaTextureReadMode_cudaReadModeElementType: cudaTextureReadMode = 0; -#[doc = "< Read texture as normalized float"] pub const cudaTextureReadMode_cudaReadModeNormalizedFloat: cudaTextureReadMode = 1; -#[doc = " CUDA texture read modes"] pub type cudaTextureReadMode = u32; -#[doc = " CUDA texture reference"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct textureReference { - #[doc = " Indicates whether texture reads are normalized or not"] pub normalized: ::std::os::raw::c_int, - #[doc = " Texture filter mode"] pub filterMode: cudaTextureFilterMode, - #[doc = " Texture address mode for up to 3 dimensions"] pub addressMode: [cudaTextureAddressMode; 3usize], - #[doc = " Channel descriptor for the texture reference"] pub channelDesc: cudaChannelFormatDesc, - #[doc = " Perform sRGB->linear conversion during texture read"] pub sRGB: ::std::os::raw::c_int, - #[doc = " Limit to the anisotropy ratio"] pub maxAnisotropy: ::std::os::raw::c_uint, - #[doc = " Mipmap filter mode"] pub mipmapFilterMode: cudaTextureFilterMode, - #[doc = " Offset applied to the supplied mipmap level"] pub mipmapLevelBias: f32, - #[doc = " Lower end of the mipmap level range to clamp access to"] pub minMipmapLevelClamp: f32, - #[doc = " Upper end of the mipmap level range to clamp access to"] pub maxMipmapLevelClamp: f32, pub __cudaReserved: [::std::os::raw::c_int; 15usize], } -#[doc = " CUDA texture descriptor"] #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct cudaTextureDesc { - #[doc = " Texture address mode for up to 3 dimensions"] pub addressMode: [cudaTextureAddressMode; 3usize], - #[doc = " Texture filter mode"] pub filterMode: cudaTextureFilterMode, - #[doc = " Texture read mode"] pub readMode: cudaTextureReadMode, - #[doc = " Perform sRGB->linear conversion during texture read"] pub sRGB: ::std::os::raw::c_int, - #[doc = " Texture Border Color"] pub borderColor: [f32; 4usize], - #[doc = " Indicates whether texture reads are normalized or not"] pub normalizedCoords: ::std::os::raw::c_int, - #[doc = " Limit to the anisotropy ratio"] pub maxAnisotropy: ::std::os::raw::c_uint, - #[doc = " Mipmap filter mode"] pub mipmapFilterMode: cudaTextureFilterMode, - #[doc = " Offset applied to the supplied mipmap level"] pub mipmapLevelBias: f32, - #[doc = " Lower end of the mipmap level range to clamp access to"] pub minMipmapLevelClamp: f32, - #[doc = " Upper end of the mipmap level range to clamp access to"] pub maxMipmapLevelClamp: f32, } -#[doc = " An opaque value that represents a CUDA texture object"] pub type cudaTextureObject_t = ::std::os::raw::c_ulonglong; extern "C" { - #[doc = " \\brief Destroy all allocations and reset all state on the current device"] - #[doc = " in the current process."] - #[doc = ""] - #[doc = " Explicitly destroys and cleans up all resources associated with the current"] - #[doc = " device in the current process. Any subsequent API call to this device will"] - #[doc = " reinitialize the device."] - #[doc = ""] - #[doc = " Note that this function will reset the device immediately. It is the caller's"] - #[doc = " responsibility to ensure that the device is not being accessed by any"] - #[doc = " other host threads from the process when this function is called."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaDeviceSynchronize"] pub fn cudaDeviceReset() -> cudaError_t; } extern "C" { - #[doc = " \\brief Wait for compute device to finish"] - #[doc = ""] - #[doc = " Blocks until the device has completed all preceding requested tasks."] - #[doc = " ::cudaDeviceSynchronize() returns an error if one of the preceding tasks"] - #[doc = " has failed. If the ::cudaDeviceScheduleBlockingSync flag was set for"] - #[doc = " this device, the host thread will block until the device has finished"] - #[doc = " its work."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaDeviceReset,"] - #[doc = " ::cuCtxSynchronize"] pub fn cudaDeviceSynchronize() -> cudaError_t; } extern "C" { - #[doc = " \\brief Set resource limits"] - #[doc = ""] - #[doc = " Setting \\p limit to \\p value is a request by the application to update"] - #[doc = " the current limit maintained by the device. The driver is free to"] - #[doc = " modify the requested value to meet h/w requirements (this could be"] - #[doc = " clamping to minimum or maximum values, rounding up to nearest element"] - #[doc = " size, etc). The application can use ::cudaDeviceGetLimit() to find out"] - #[doc = " exactly what the limit has been set to."] - #[doc = ""] - #[doc = " Setting each ::cudaLimit has its own specific restrictions, so each is"] - #[doc = " discussed here."] - #[doc = ""] - #[doc = " - ::cudaLimitStackSize controls the stack size in bytes of each GPU thread."] - #[doc = " Note that the CUDA driver will set the \\p limit to the maximum of \\p value"] - #[doc = " and what the kernel function requires."] - #[doc = ""] - #[doc = " - ::cudaLimitPrintfFifoSize controls the size in bytes of the shared FIFO"] - #[doc = " used by the ::printf() device system call. Setting"] - #[doc = " ::cudaLimitPrintfFifoSize must not be performed after launching any kernel"] - #[doc = " that uses the ::printf() device system call - in such case"] - #[doc = " ::cudaErrorInvalidValue will be returned."] - #[doc = ""] - #[doc = " - ::cudaLimitMallocHeapSize controls the size in bytes of the heap used by"] - #[doc = " the ::malloc() and ::free() device system calls. Setting"] - #[doc = " ::cudaLimitMallocHeapSize must not be performed after launching any kernel"] - #[doc = " that uses the ::malloc() or ::free() device system calls - in such case"] - #[doc = " ::cudaErrorInvalidValue will be returned."] - #[doc = ""] - #[doc = " - ::cudaLimitDevRuntimeSyncDepth controls the maximum nesting depth of a"] - #[doc = " grid at which a thread can safely call ::cudaDeviceSynchronize(). Setting"] - #[doc = " this limit must be performed before any launch of a kernel that uses the"] - #[doc = " device runtime and calls ::cudaDeviceSynchronize() above the default sync"] - #[doc = " depth, two levels of grids. Calls to ::cudaDeviceSynchronize() will fail"] - #[doc = " with error code ::cudaErrorSyncDepthExceeded if the limitation is"] - #[doc = " violated. This limit can be set smaller than the default or up the maximum"] - #[doc = " launch depth of 24. When setting this limit, keep in mind that additional"] - #[doc = " levels of sync depth require the runtime to reserve large amounts of"] - #[doc = " device memory which can no longer be used for user allocations. If these"] - #[doc = " reservations of device memory fail, ::cudaDeviceSetLimit will return"] - #[doc = " ::cudaErrorMemoryAllocation, and the limit can be reset to a lower value."] - #[doc = " This limit is only applicable to devices of compute capability 3.5 and"] - #[doc = " higher. Attempting to set this limit on devices of compute capability less"] - #[doc = " than 3.5 will result in the error ::cudaErrorUnsupportedLimit being"] - #[doc = " returned."] - #[doc = ""] - #[doc = " - ::cudaLimitDevRuntimePendingLaunchCount controls the maximum number of"] - #[doc = " outstanding device runtime launches that can be made from the current"] - #[doc = " device. A grid is outstanding from the point of launch up until the grid"] - #[doc = " is known to have been completed. Device runtime launches which violate"] - #[doc = " this limitation fail and return ::cudaErrorLaunchPendingCountExceeded when"] - #[doc = " ::cudaGetLastError() is called after launch. If more pending launches than"] - #[doc = " the default (2048 launches) are needed for a module using the device"] - #[doc = " runtime, this limit can be increased. Keep in mind that being able to"] - #[doc = " sustain additional pending launches will require the runtime to reserve"] - #[doc = " larger amounts of device memory upfront which can no longer be used for"] - #[doc = " allocations. If these reservations fail, ::cudaDeviceSetLimit will return"] - #[doc = " ::cudaErrorMemoryAllocation, and the limit can be reset to a lower value."] - #[doc = " This limit is only applicable to devices of compute capability 3.5 and"] - #[doc = " higher. Attempting to set this limit on devices of compute capability less"] - #[doc = " than 3.5 will result in the error ::cudaErrorUnsupportedLimit being"] - #[doc = " returned."] - #[doc = ""] - #[doc = " - ::cudaLimitMaxL2FetchGranularity controls the L2 cache fetch granularity."] - #[doc = " Values can range from 0B to 128B. This is purely a performance hint and"] - #[doc = " it can be ignored or clamped depending on the platform."] - #[doc = ""] - #[doc = " \\param limit - Limit to set"] - #[doc = " \\param value - Size of limit"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorUnsupportedLimit,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorMemoryAllocation"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaDeviceGetLimit,"] - #[doc = " ::cuCtxSetLimit"] pub fn cudaDeviceSetLimit(limit: cudaLimit, value: usize) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns resource limits"] - #[doc = ""] - #[doc = " Returns in \\p *pValue the current size of \\p limit. The supported"] - #[doc = " ::cudaLimit values are:"] - #[doc = " - ::cudaLimitStackSize: stack size in bytes of each GPU thread;"] - #[doc = " - ::cudaLimitPrintfFifoSize: size in bytes of the shared FIFO used by the"] - #[doc = " ::printf() device system call."] - #[doc = " - ::cudaLimitMallocHeapSize: size in bytes of the heap used by the"] - #[doc = " ::malloc() and ::free() device system calls;"] - #[doc = " - ::cudaLimitDevRuntimeSyncDepth: maximum grid depth at which a"] - #[doc = " thread can isssue the device runtime call ::cudaDeviceSynchronize()"] - #[doc = " to wait on child grid launches to complete."] - #[doc = " - ::cudaLimitDevRuntimePendingLaunchCount: maximum number of outstanding"] - #[doc = " device runtime launches."] - #[doc = " - ::cudaLimitMaxL2FetchGranularity: L2 cache fetch granularity."] - #[doc = ""] - #[doc = " \\param limit - Limit to query"] - #[doc = " \\param pValue - Returned size of the limit"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorUnsupportedLimit,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaDeviceSetLimit,"] - #[doc = " ::cuCtxGetLimit"] pub fn cudaDeviceGetLimit( pValue: *mut usize, limit: cudaLimit, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns the preferred cache configuration for the current device."] - #[doc = ""] - #[doc = " On devices where the L1 cache and shared memory use the same hardware"] - #[doc = " resources, this returns through \\p pCacheConfig the preferred cache"] - #[doc = " configuration for the current device. This is only a preference. The"] - #[doc = " runtime will use the requested configuration if possible, but it is free to"] - #[doc = " choose a different configuration if required to execute functions."] - #[doc = ""] - #[doc = " This will return a \\p pCacheConfig of ::cudaFuncCachePreferNone on devices"] - #[doc = " where the size of the L1 cache and shared memory are fixed."] - #[doc = ""] - #[doc = " The supported cache configurations are:"] - #[doc = " - ::cudaFuncCachePreferNone: no preference for shared memory or L1 (default)"] - #[doc = " - ::cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache"] - #[doc = " - ::cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory"] - #[doc = " - ::cudaFuncCachePreferEqual: prefer equal size L1 cache and shared memory"] - #[doc = ""] - #[doc = " \\param pCacheConfig - Returned cache configuration"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa cudaDeviceSetCacheConfig,"] - #[doc = " \\ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) \"cudaFuncSetCacheConfig (C API)\","] - #[doc = " \\ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) \"cudaFuncSetCacheConfig (C++ API)\","] - #[doc = " ::cuCtxGetCacheConfig"] pub fn cudaDeviceGetCacheConfig( pCacheConfig: *mut cudaFuncCache, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns numerical values that correspond to the least and"] - #[doc = " greatest stream priorities."] - #[doc = ""] - #[doc = " Returns in \\p *leastPriority and \\p *greatestPriority the numerical values that correspond"] - #[doc = " to the least and greatest stream priorities respectively. Stream priorities"] - #[doc = " follow a convention where lower numbers imply greater priorities. The range of"] - #[doc = " meaningful stream priorities is given by [\\p *greatestPriority, \\p *leastPriority]."] - #[doc = " If the user attempts to create a stream with a priority value that is"] - #[doc = " outside the the meaningful range as specified by this API, the priority is"] - #[doc = " automatically clamped down or up to either \\p *leastPriority or \\p *greatestPriority"] - #[doc = " respectively. See ::cudaStreamCreateWithPriority for details on creating a"] - #[doc = " priority stream."] - #[doc = " A NULL may be passed in for \\p *leastPriority or \\p *greatestPriority if the value"] - #[doc = " is not desired."] - #[doc = ""] - #[doc = " This function will return '0' in both \\p *leastPriority and \\p *greatestPriority if"] - #[doc = " the current context's device does not support stream priorities"] - #[doc = " (see ::cudaDeviceGetAttribute)."] - #[doc = ""] - #[doc = " \\param leastPriority - Pointer to an int in which the numerical value for least"] - #[doc = " stream priority is returned"] - #[doc = " \\param greatestPriority - Pointer to an int in which the numerical value for greatest"] - #[doc = " stream priority is returned"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaStreamCreateWithPriority,"] - #[doc = " ::cudaStreamGetPriority,"] - #[doc = " ::cuCtxGetStreamPriorityRange"] pub fn cudaDeviceGetStreamPriorityRange( leastPriority: *mut ::std::os::raw::c_int, greatestPriority: *mut ::std::os::raw::c_int, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Sets the preferred cache configuration for the current device."] - #[doc = ""] - #[doc = " On devices where the L1 cache and shared memory use the same hardware"] - #[doc = " resources, this sets through \\p cacheConfig the preferred cache"] - #[doc = " configuration for the current device. This is only a preference. The"] - #[doc = " runtime will use the requested configuration if possible, but it is free to"] - #[doc = " choose a different configuration if required to execute the function. Any"] - #[doc = " function preference set via"] - #[doc = " \\ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) \"cudaFuncSetCacheConfig (C API)\""] - #[doc = " or"] - #[doc = " \\ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) \"cudaFuncSetCacheConfig (C++ API)\""] - #[doc = " will be preferred over this device-wide setting. Setting the device-wide"] - #[doc = " cache configuration to ::cudaFuncCachePreferNone will cause subsequent"] - #[doc = " kernel launches to prefer to not change the cache configuration unless"] - #[doc = " required to launch the kernel."] - #[doc = ""] - #[doc = " This setting does nothing on devices where the size of the L1 cache and"] - #[doc = " shared memory are fixed."] - #[doc = ""] - #[doc = " Launching a kernel with a different preference than the most recent"] - #[doc = " preference setting may insert a device-side synchronization point."] - #[doc = ""] - #[doc = " The supported cache configurations are:"] - #[doc = " - ::cudaFuncCachePreferNone: no preference for shared memory or L1 (default)"] - #[doc = " - ::cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache"] - #[doc = " - ::cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory"] - #[doc = " - ::cudaFuncCachePreferEqual: prefer equal size L1 cache and shared memory"] - #[doc = ""] - #[doc = " \\param cacheConfig - Requested cache configuration"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaDeviceGetCacheConfig,"] - #[doc = " \\ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) \"cudaFuncSetCacheConfig (C API)\","] - #[doc = " \\ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) \"cudaFuncSetCacheConfig (C++ API)\","] - #[doc = " ::cuCtxSetCacheConfig"] pub fn cudaDeviceSetCacheConfig(cacheConfig: cudaFuncCache) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns the shared memory configuration for the current device."] - #[doc = ""] - #[doc = " This function will return in \\p pConfig the current size of shared memory banks"] - #[doc = " on the current device. On devices with configurable shared memory banks,"] - #[doc = " ::cudaDeviceSetSharedMemConfig can be used to change this setting, so that all"] - #[doc = " subsequent kernel launches will by default use the new bank size. When"] - #[doc = " ::cudaDeviceGetSharedMemConfig is called on devices without configurable shared"] - #[doc = " memory, it will return the fixed bank size of the hardware."] - #[doc = ""] - #[doc = " The returned bank configurations can be either:"] - #[doc = " - ::cudaSharedMemBankSizeFourByte - shared memory bank width is four bytes."] - #[doc = " - ::cudaSharedMemBankSizeEightByte - shared memory bank width is eight bytes."] - #[doc = ""] - #[doc = " \\param pConfig - Returned cache configuration"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaDeviceSetCacheConfig,"] - #[doc = " ::cudaDeviceGetCacheConfig,"] - #[doc = " ::cudaDeviceSetSharedMemConfig,"] - #[doc = " ::cudaFuncSetCacheConfig,"] - #[doc = " ::cuCtxGetSharedMemConfig"] pub fn cudaDeviceGetSharedMemConfig( pConfig: *mut cudaSharedMemConfig, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Sets the shared memory configuration for the current device."] - #[doc = ""] - #[doc = " On devices with configurable shared memory banks, this function will set"] - #[doc = " the shared memory bank size which is used for all subsequent kernel launches."] - #[doc = " Any per-function setting of shared memory set via ::cudaFuncSetSharedMemConfig"] - #[doc = " will override the device wide setting."] - #[doc = ""] - #[doc = " Changing the shared memory configuration between launches may introduce"] - #[doc = " a device side synchronization point."] - #[doc = ""] - #[doc = " Changing the shared memory bank size will not increase shared memory usage"] - #[doc = " or affect occupancy of kernels, but may have major effects on performance."] - #[doc = " Larger bank sizes will allow for greater potential bandwidth to shared memory,"] - #[doc = " but will change what kinds of accesses to shared memory will result in bank"] - #[doc = " conflicts."] - #[doc = ""] - #[doc = " This function will do nothing on devices with fixed shared memory bank size."] - #[doc = ""] - #[doc = " The supported bank configurations are:"] - #[doc = " - ::cudaSharedMemBankSizeDefault: set bank width the device default (currently,"] - #[doc = " four bytes)"] - #[doc = " - ::cudaSharedMemBankSizeFourByte: set shared memory bank width to be four bytes"] - #[doc = " natively."] - #[doc = " - ::cudaSharedMemBankSizeEightByte: set shared memory bank width to be eight"] - #[doc = " bytes natively."] - #[doc = ""] - #[doc = " \\param config - Requested cache configuration"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaDeviceSetCacheConfig,"] - #[doc = " ::cudaDeviceGetCacheConfig,"] - #[doc = " ::cudaDeviceGetSharedMemConfig,"] - #[doc = " ::cudaFuncSetCacheConfig,"] - #[doc = " ::cuCtxSetSharedMemConfig"] pub fn cudaDeviceSetSharedMemConfig( config: cudaSharedMemConfig, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns a handle to a compute device"] - #[doc = ""] - #[doc = " Returns in \\p *device a device ordinal given a PCI bus ID string."] - #[doc = ""] - #[doc = " \\param device - Returned device ordinal"] - #[doc = ""] - #[doc = " \\param pciBusId - String in one of the following forms:"] - #[doc = " [domain]:[bus]:[device].[function]"] - #[doc = " [domain]:[bus]:[device]"] - #[doc = " [bus]:[device].[function]"] - #[doc = " where \\p domain, \\p bus, \\p device, and \\p function are all hexadecimal values"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidDevice"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaDeviceGetPCIBusId,"] - #[doc = " ::cuDeviceGetByPCIBusId"] pub fn cudaDeviceGetByPCIBusId( device: *mut ::std::os::raw::c_int, pciBusId: *const ::std::os::raw::c_char, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns a PCI Bus Id string for the device"] - #[doc = ""] - #[doc = " Returns an ASCII string identifying the device \\p dev in the NULL-terminated"] - #[doc = " string pointed to by \\p pciBusId. \\p len specifies the maximum length of the"] - #[doc = " string that may be returned."] - #[doc = ""] - #[doc = " \\param pciBusId - Returned identifier string for the device in the following format"] - #[doc = " [domain]:[bus]:[device].[function]"] - #[doc = " where \\p domain, \\p bus, \\p device, and \\p function are all hexadecimal values."] - #[doc = " pciBusId should be large enough to store 13 characters including the NULL-terminator."] - #[doc = ""] - #[doc = " \\param len - Maximum length of string to store in \\p name"] - #[doc = ""] - #[doc = " \\param device - Device to get identifier string for"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidDevice"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaDeviceGetByPCIBusId,"] - #[doc = " ::cuDeviceGetPCIBusId"] pub fn cudaDeviceGetPCIBusId( pciBusId: *mut ::std::os::raw::c_char, len: ::std::os::raw::c_int, @@ -13584,195 +3839,24 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Gets an interprocess handle for a previously allocated event"] - #[doc = ""] - #[doc = " Takes as input a previously allocated event. This event must have been"] - #[doc = " created with the ::cudaEventInterprocess and ::cudaEventDisableTiming"] - #[doc = " flags set. This opaque handle may be copied into other processes and"] - #[doc = " opened with ::cudaIpcOpenEventHandle to allow efficient hardware"] - #[doc = " synchronization between GPU work in different processes."] - #[doc = ""] - #[doc = " After the event has been been opened in the importing process,"] - #[doc = " ::cudaEventRecord, ::cudaEventSynchronize, ::cudaStreamWaitEvent and"] - #[doc = " ::cudaEventQuery may be used in either process. Performing operations"] - #[doc = " on the imported event after the exported event has been freed"] - #[doc = " with ::cudaEventDestroy will result in undefined behavior."] - #[doc = ""] - #[doc = " IPC functionality is restricted to devices with support for unified"] - #[doc = " addressing on Linux operating systems. IPC functionality is not supported"] - #[doc = " on Tegra platforms."] - #[doc = ""] - #[doc = " \\param handle - Pointer to a user allocated cudaIpcEventHandle"] - #[doc = " in which to return the opaque event handle"] - #[doc = " \\param event - Event allocated with ::cudaEventInterprocess and"] - #[doc = " ::cudaEventDisableTiming flags."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidResourceHandle,"] - #[doc = " ::cudaErrorMemoryAllocation,"] - #[doc = " ::cudaErrorMapBufferObjectFailed,"] - #[doc = " ::cudaErrorNotSupported"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaEventCreate,"] - #[doc = " ::cudaEventDestroy,"] - #[doc = " ::cudaEventSynchronize,"] - #[doc = " ::cudaEventQuery,"] - #[doc = " ::cudaStreamWaitEvent,"] - #[doc = " ::cudaIpcOpenEventHandle,"] - #[doc = " ::cudaIpcGetMemHandle,"] - #[doc = " ::cudaIpcOpenMemHandle,"] - #[doc = " ::cudaIpcCloseMemHandle,"] - #[doc = " ::cuIpcGetEventHandle"] pub fn cudaIpcGetEventHandle( handle: *mut cudaIpcEventHandle_t, event: cudaEvent_t, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Opens an interprocess event handle for use in the current process"] - #[doc = ""] - #[doc = " Opens an interprocess event handle exported from another process with"] - #[doc = " ::cudaIpcGetEventHandle. This function returns a ::cudaEvent_t that behaves like"] - #[doc = " a locally created event with the ::cudaEventDisableTiming flag specified."] - #[doc = " This event must be freed with ::cudaEventDestroy."] - #[doc = ""] - #[doc = " Performing operations on the imported event after the exported event has"] - #[doc = " been freed with ::cudaEventDestroy will result in undefined behavior."] - #[doc = ""] - #[doc = " IPC functionality is restricted to devices with support for unified"] - #[doc = " addressing on Linux operating systems. IPC functionality is not supported"] - #[doc = " on Tegra platforms."] - #[doc = ""] - #[doc = " \\param event - Returns the imported event"] - #[doc = " \\param handle - Interprocess handle to open"] - #[doc = ""] - #[doc = " \\returns"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorMapBufferObjectFailed,"] - #[doc = " ::cudaErrorInvalidResourceHandle,"] - #[doc = " ::cudaErrorNotSupported"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaEventCreate,"] - #[doc = " ::cudaEventDestroy,"] - #[doc = " ::cudaEventSynchronize,"] - #[doc = " ::cudaEventQuery,"] - #[doc = " ::cudaStreamWaitEvent,"] - #[doc = " ::cudaIpcGetEventHandle,"] - #[doc = " ::cudaIpcGetMemHandle,"] - #[doc = " ::cudaIpcOpenMemHandle,"] - #[doc = " ::cudaIpcCloseMemHandle,"] - #[doc = " ::cuIpcOpenEventHandle"] pub fn cudaIpcOpenEventHandle( event: *mut cudaEvent_t, handle: cudaIpcEventHandle_t, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Gets an interprocess memory handle for an existing device memory"] - #[doc = " allocation"] - #[doc = ""] - #[doc = " Takes a pointer to the base of an existing device memory allocation created"] - #[doc = " with ::cudaMalloc and exports it for use in another process. This is a"] - #[doc = " lightweight operation and may be called multiple times on an allocation"] - #[doc = " without adverse effects."] - #[doc = ""] - #[doc = " If a region of memory is freed with ::cudaFree and a subsequent call"] - #[doc = " to ::cudaMalloc returns memory with the same device address,"] - #[doc = " ::cudaIpcGetMemHandle will return a unique handle for the"] - #[doc = " new memory."] - #[doc = ""] - #[doc = " IPC functionality is restricted to devices with support for unified"] - #[doc = " addressing on Linux operating systems. IPC functionality is not supported"] - #[doc = " on Tegra platforms."] - #[doc = ""] - #[doc = " \\param handle - Pointer to user allocated ::cudaIpcMemHandle to return"] - #[doc = " the handle in."] - #[doc = " \\param devPtr - Base pointer to previously allocated device memory"] - #[doc = ""] - #[doc = " \\returns"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidResourceHandle,"] - #[doc = " ::cudaErrorMemoryAllocation,"] - #[doc = " ::cudaErrorMapBufferObjectFailed,"] - #[doc = " ::cudaErrorNotSupported"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaMalloc,"] - #[doc = " ::cudaFree,"] - #[doc = " ::cudaIpcGetEventHandle,"] - #[doc = " ::cudaIpcOpenEventHandle,"] - #[doc = " ::cudaIpcOpenMemHandle,"] - #[doc = " ::cudaIpcCloseMemHandle,"] - #[doc = " ::cuIpcGetMemHandle"] pub fn cudaIpcGetMemHandle( handle: *mut cudaIpcMemHandle_t, devPtr: *mut ::std::os::raw::c_void, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Opens an interprocess memory handle exported from another process"] - #[doc = " and returns a device pointer usable in the local process."] - #[doc = ""] - #[doc = " Maps memory exported from another process with ::cudaIpcGetMemHandle into"] - #[doc = " the current device address space. For contexts on different devices"] - #[doc = " ::cudaIpcOpenMemHandle can attempt to enable peer access between the"] - #[doc = " devices as if the user called ::cudaDeviceEnablePeerAccess. This behavior is"] - #[doc = " controlled by the ::cudaIpcMemLazyEnablePeerAccess flag."] - #[doc = " ::cudaDeviceCanAccessPeer can determine if a mapping is possible."] - #[doc = ""] - #[doc = " ::cudaIpcOpenMemHandle can open handles to devices that may not be visible"] - #[doc = " in the process calling the API."] - #[doc = ""] - #[doc = " Contexts that may open ::cudaIpcMemHandles are restricted in the following way."] - #[doc = " ::cudaIpcMemHandles from each device in a given process may only be opened"] - #[doc = " by one context per device per other process."] - #[doc = ""] - #[doc = " Memory returned from ::cudaIpcOpenMemHandle must be freed with"] - #[doc = " ::cudaIpcCloseMemHandle."] - #[doc = ""] - #[doc = " Calling ::cudaFree on an exported memory region before calling"] - #[doc = " ::cudaIpcCloseMemHandle in the importing context will result in undefined"] - #[doc = " behavior."] - #[doc = ""] - #[doc = " IPC functionality is restricted to devices with support for unified"] - #[doc = " addressing on Linux operating systems. IPC functionality is not supported"] - #[doc = " on Tegra platforms."] - #[doc = ""] - #[doc = " \\param devPtr - Returned device pointer"] - #[doc = " \\param handle - ::cudaIpcMemHandle to open"] - #[doc = " \\param flags - Flags for this operation. Must be specified as ::cudaIpcMemLazyEnablePeerAccess"] - #[doc = ""] - #[doc = " \\returns"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorMapBufferObjectFailed,"] - #[doc = " ::cudaErrorInvalidResourceHandle,"] - #[doc = " ::cudaErrorTooManyPeers,"] - #[doc = " ::cudaErrorNotSupported"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\note No guarantees are made about the address returned in \\p *devPtr."] - #[doc = " In particular, multiple processes may not receive the same address for the same \\p handle."] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaMalloc,"] - #[doc = " ::cudaFree,"] - #[doc = " ::cudaIpcGetEventHandle,"] - #[doc = " ::cudaIpcOpenEventHandle,"] - #[doc = " ::cudaIpcGetMemHandle,"] - #[doc = " ::cudaIpcCloseMemHandle,"] - #[doc = " ::cudaDeviceEnablePeerAccess,"] - #[doc = " ::cudaDeviceCanAccessPeer,"] - #[doc = " ::cuIpcOpenMemHandle"] pub fn cudaIpcOpenMemHandle( devPtr: *mut *mut ::std::os::raw::c_void, handle: cudaIpcMemHandle_t, @@ -13780,863 +3864,60 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Close memory mapped with cudaIpcOpenMemHandle"] - #[doc = ""] - #[doc = " Unmaps memory returnd by ::cudaIpcOpenMemHandle. The original allocation"] - #[doc = " in the exporting process as well as imported mappings in other processes"] - #[doc = " will be unaffected."] - #[doc = ""] - #[doc = " Any resources used to enable peer access will be freed if this is the"] - #[doc = " last mapping using them."] - #[doc = ""] - #[doc = " IPC functionality is restricted to devices with support for unified"] - #[doc = " addressing on Linux operating systems. IPC functionality is not supported"] - #[doc = " on Tegra platforms."] - #[doc = ""] - #[doc = " \\param devPtr - Device pointer returned by ::cudaIpcOpenMemHandle"] - #[doc = ""] - #[doc = " \\returns"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorMapBufferObjectFailed,"] - #[doc = " ::cudaErrorInvalidResourceHandle,"] - #[doc = " ::cudaErrorNotSupported"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaMalloc,"] - #[doc = " ::cudaFree,"] - #[doc = " ::cudaIpcGetEventHandle,"] - #[doc = " ::cudaIpcOpenEventHandle,"] - #[doc = " ::cudaIpcGetMemHandle,"] - #[doc = " ::cudaIpcOpenMemHandle,"] - #[doc = " ::cuIpcCloseMemHandle"] pub fn cudaIpcCloseMemHandle( devPtr: *mut ::std::os::raw::c_void, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Exit and clean up from CUDA launches"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Note that this function is deprecated because its name does not"] - #[doc = " reflect its behavior. Its functionality is identical to the"] - #[doc = " non-deprecated function ::cudaDeviceReset(), which should be used"] - #[doc = " instead."] - #[doc = ""] - #[doc = " Explicitly destroys all cleans up all resources associated with the current"] - #[doc = " device in the current process. Any subsequent API call to this device will"] - #[doc = " reinitialize the device."] - #[doc = ""] - #[doc = " Note that this function will reset the device immediately. It is the caller's"] - #[doc = " responsibility to ensure that the device is not being accessed by any"] - #[doc = " other host threads from the process when this function is called."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaDeviceReset"] pub fn cudaThreadExit() -> cudaError_t; } extern "C" { - #[doc = " \\brief Wait for compute device to finish"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Note that this function is deprecated because its name does not"] - #[doc = " reflect its behavior. Its functionality is similar to the"] - #[doc = " non-deprecated function ::cudaDeviceSynchronize(), which should be used"] - #[doc = " instead."] - #[doc = ""] - #[doc = " Blocks until the device has completed all preceding requested tasks."] - #[doc = " ::cudaThreadSynchronize() returns an error if one of the preceding tasks"] - #[doc = " has failed. If the ::cudaDeviceScheduleBlockingSync flag was set for"] - #[doc = " this device, the host thread will block until the device has finished"] - #[doc = " its work."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaDeviceSynchronize"] pub fn cudaThreadSynchronize() -> cudaError_t; } extern "C" { - #[doc = " \\brief Set resource limits"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Note that this function is deprecated because its name does not"] - #[doc = " reflect its behavior. Its functionality is identical to the"] - #[doc = " non-deprecated function ::cudaDeviceSetLimit(), which should be used"] - #[doc = " instead."] - #[doc = ""] - #[doc = " Setting \\p limit to \\p value is a request by the application to update"] - #[doc = " the current limit maintained by the device. The driver is free to"] - #[doc = " modify the requested value to meet h/w requirements (this could be"] - #[doc = " clamping to minimum or maximum values, rounding up to nearest element"] - #[doc = " size, etc). The application can use ::cudaThreadGetLimit() to find out"] - #[doc = " exactly what the limit has been set to."] - #[doc = ""] - #[doc = " Setting each ::cudaLimit has its own specific restrictions, so each is"] - #[doc = " discussed here."] - #[doc = ""] - #[doc = " - ::cudaLimitStackSize controls the stack size of each GPU thread."] - #[doc = ""] - #[doc = " - ::cudaLimitPrintfFifoSize controls the size of the shared FIFO"] - #[doc = " used by the ::printf() device system call."] - #[doc = " Setting ::cudaLimitPrintfFifoSize must be performed before"] - #[doc = " launching any kernel that uses the ::printf() device"] - #[doc = " system call, otherwise ::cudaErrorInvalidValue will be returned."] - #[doc = ""] - #[doc = " - ::cudaLimitMallocHeapSize controls the size of the heap used"] - #[doc = " by the ::malloc() and ::free() device system calls. Setting"] - #[doc = " ::cudaLimitMallocHeapSize must be performed before launching"] - #[doc = " any kernel that uses the ::malloc() or ::free() device system calls,"] - #[doc = " otherwise ::cudaErrorInvalidValue will be returned."] - #[doc = ""] - #[doc = " \\param limit - Limit to set"] - #[doc = " \\param value - Size in bytes of limit"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorUnsupportedLimit,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaDeviceSetLimit"] pub fn cudaThreadSetLimit(limit: cudaLimit, value: usize) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns resource limits"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Note that this function is deprecated because its name does not"] - #[doc = " reflect its behavior. Its functionality is identical to the"] - #[doc = " non-deprecated function ::cudaDeviceGetLimit(), which should be used"] - #[doc = " instead."] - #[doc = ""] - #[doc = " Returns in \\p *pValue the current size of \\p limit. The supported"] - #[doc = " ::cudaLimit values are:"] - #[doc = " - ::cudaLimitStackSize: stack size of each GPU thread;"] - #[doc = " - ::cudaLimitPrintfFifoSize: size of the shared FIFO used by the"] - #[doc = " ::printf() device system call."] - #[doc = " - ::cudaLimitMallocHeapSize: size of the heap used by the"] - #[doc = " ::malloc() and ::free() device system calls;"] - #[doc = ""] - #[doc = " \\param limit - Limit to query"] - #[doc = " \\param pValue - Returned size in bytes of limit"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorUnsupportedLimit,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaDeviceGetLimit"] pub fn cudaThreadGetLimit( pValue: *mut usize, limit: cudaLimit, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns the preferred cache configuration for the current device."] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Note that this function is deprecated because its name does not"] - #[doc = " reflect its behavior. Its functionality is identical to the"] - #[doc = " non-deprecated function ::cudaDeviceGetCacheConfig(), which should be"] - #[doc = " used instead."] - #[doc = ""] - #[doc = " On devices where the L1 cache and shared memory use the same hardware"] - #[doc = " resources, this returns through \\p pCacheConfig the preferred cache"] - #[doc = " configuration for the current device. This is only a preference. The"] - #[doc = " runtime will use the requested configuration if possible, but it is free to"] - #[doc = " choose a different configuration if required to execute functions."] - #[doc = ""] - #[doc = " This will return a \\p pCacheConfig of ::cudaFuncCachePreferNone on devices"] - #[doc = " where the size of the L1 cache and shared memory are fixed."] - #[doc = ""] - #[doc = " The supported cache configurations are:"] - #[doc = " - ::cudaFuncCachePreferNone: no preference for shared memory or L1 (default)"] - #[doc = " - ::cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache"] - #[doc = " - ::cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory"] - #[doc = ""] - #[doc = " \\param pCacheConfig - Returned cache configuration"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaDeviceGetCacheConfig"] pub fn cudaThreadGetCacheConfig( pCacheConfig: *mut cudaFuncCache, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Sets the preferred cache configuration for the current device."] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Note that this function is deprecated because its name does not"] - #[doc = " reflect its behavior. Its functionality is identical to the"] - #[doc = " non-deprecated function ::cudaDeviceSetCacheConfig(), which should be"] - #[doc = " used instead."] - #[doc = ""] - #[doc = " On devices where the L1 cache and shared memory use the same hardware"] - #[doc = " resources, this sets through \\p cacheConfig the preferred cache"] - #[doc = " configuration for the current device. This is only a preference. The"] - #[doc = " runtime will use the requested configuration if possible, but it is free to"] - #[doc = " choose a different configuration if required to execute the function. Any"] - #[doc = " function preference set via"] - #[doc = " \\ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) \"cudaFuncSetCacheConfig (C API)\""] - #[doc = " or"] - #[doc = " \\ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) \"cudaFuncSetCacheConfig (C++ API)\""] - #[doc = " will be preferred over this device-wide setting. Setting the device-wide"] - #[doc = " cache configuration to ::cudaFuncCachePreferNone will cause subsequent"] - #[doc = " kernel launches to prefer to not change the cache configuration unless"] - #[doc = " required to launch the kernel."] - #[doc = ""] - #[doc = " This setting does nothing on devices where the size of the L1 cache and"] - #[doc = " shared memory are fixed."] - #[doc = ""] - #[doc = " Launching a kernel with a different preference than the most recent"] - #[doc = " preference setting may insert a device-side synchronization point."] - #[doc = ""] - #[doc = " The supported cache configurations are:"] - #[doc = " - ::cudaFuncCachePreferNone: no preference for shared memory or L1 (default)"] - #[doc = " - ::cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache"] - #[doc = " - ::cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory"] - #[doc = ""] - #[doc = " \\param cacheConfig - Requested cache configuration"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaDeviceSetCacheConfig"] pub fn cudaThreadSetCacheConfig(cacheConfig: cudaFuncCache) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns the last error from a runtime call"] - #[doc = ""] - #[doc = " Returns the last error that has been produced by any of the runtime calls"] - #[doc = " in the same host thread and resets it to ::cudaSuccess."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorMissingConfiguration,"] - #[doc = " ::cudaErrorMemoryAllocation,"] - #[doc = " ::cudaErrorInitializationError,"] - #[doc = " ::cudaErrorLaunchFailure,"] - #[doc = " ::cudaErrorLaunchTimeout,"] - #[doc = " ::cudaErrorLaunchOutOfResources,"] - #[doc = " ::cudaErrorInvalidDeviceFunction,"] - #[doc = " ::cudaErrorInvalidConfiguration,"] - #[doc = " ::cudaErrorInvalidDevice,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidPitchValue,"] - #[doc = " ::cudaErrorInvalidSymbol,"] - #[doc = " ::cudaErrorUnmapBufferObjectFailed,"] - #[doc = " ::cudaErrorInvalidDevicePointer,"] - #[doc = " ::cudaErrorInvalidTexture,"] - #[doc = " ::cudaErrorInvalidTextureBinding,"] - #[doc = " ::cudaErrorInvalidChannelDescriptor,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection,"] - #[doc = " ::cudaErrorInvalidFilterSetting,"] - #[doc = " ::cudaErrorInvalidNormSetting,"] - #[doc = " ::cudaErrorUnknown,"] - #[doc = " ::cudaErrorInvalidResourceHandle,"] - #[doc = " ::cudaErrorInsufficientDriver,"] - #[doc = " ::cudaErrorNoDevice,"] - #[doc = " ::cudaErrorSetOnActiveProcess,"] - #[doc = " ::cudaErrorStartupFailure,"] - #[doc = " ::cudaErrorInvalidPtx,"] - #[doc = " ::cudaErrorNoKernelImageForDevice,"] - #[doc = " ::cudaErrorJitCompilerNotFound"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaPeekAtLastError, ::cudaGetErrorName, ::cudaGetErrorString, ::cudaError"] pub fn cudaGetLastError() -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns the last error from a runtime call"] - #[doc = ""] - #[doc = " Returns the last error that has been produced by any of the runtime calls"] - #[doc = " in the same host thread. Note that this call does not reset the error to"] - #[doc = " ::cudaSuccess like ::cudaGetLastError()."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorMissingConfiguration,"] - #[doc = " ::cudaErrorMemoryAllocation,"] - #[doc = " ::cudaErrorInitializationError,"] - #[doc = " ::cudaErrorLaunchFailure,"] - #[doc = " ::cudaErrorLaunchTimeout,"] - #[doc = " ::cudaErrorLaunchOutOfResources,"] - #[doc = " ::cudaErrorInvalidDeviceFunction,"] - #[doc = " ::cudaErrorInvalidConfiguration,"] - #[doc = " ::cudaErrorInvalidDevice,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidPitchValue,"] - #[doc = " ::cudaErrorInvalidSymbol,"] - #[doc = " ::cudaErrorUnmapBufferObjectFailed,"] - #[doc = " ::cudaErrorInvalidDevicePointer,"] - #[doc = " ::cudaErrorInvalidTexture,"] - #[doc = " ::cudaErrorInvalidTextureBinding,"] - #[doc = " ::cudaErrorInvalidChannelDescriptor,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection,"] - #[doc = " ::cudaErrorInvalidFilterSetting,"] - #[doc = " ::cudaErrorInvalidNormSetting,"] - #[doc = " ::cudaErrorUnknown,"] - #[doc = " ::cudaErrorInvalidResourceHandle,"] - #[doc = " ::cudaErrorInsufficientDriver,"] - #[doc = " ::cudaErrorNoDevice,"] - #[doc = " ::cudaErrorSetOnActiveProcess,"] - #[doc = " ::cudaErrorStartupFailure,"] - #[doc = " ::cudaErrorInvalidPtx,"] - #[doc = " ::cudaErrorNoKernelImageForDevice,"] - #[doc = " ::cudaErrorJitCompilerNotFound"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaGetLastError, ::cudaGetErrorName, ::cudaGetErrorString, ::cudaError"] pub fn cudaPeekAtLastError() -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns the string representation of an error code enum name"] - #[doc = ""] - #[doc = " Returns a string containing the name of an error code in the enum. If the error"] - #[doc = " code is not recognized, \"unrecognized error code\" is returned."] - #[doc = ""] - #[doc = " \\param error - Error code to convert to string"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " \\p char* pointer to a NULL-terminated string"] - #[doc = ""] - #[doc = " \\sa ::cudaGetErrorString, ::cudaGetLastError, ::cudaPeekAtLastError, ::cudaError,"] - #[doc = " ::cuGetErrorName"] pub fn cudaGetErrorName( error: cudaError_t, ) -> *const ::std::os::raw::c_char; } extern "C" { - #[doc = " \\brief Returns the description string for an error code"] - #[doc = ""] - #[doc = " Returns the description string for an error code. If the error"] - #[doc = " code is not recognized, \"unrecognized error code\" is returned."] - #[doc = ""] - #[doc = " \\param error - Error code to convert to string"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " \\p char* pointer to a NULL-terminated string"] - #[doc = ""] - #[doc = " \\sa ::cudaGetErrorName, ::cudaGetLastError, ::cudaPeekAtLastError, ::cudaError,"] - #[doc = " ::cuGetErrorString"] pub fn cudaGetErrorString( error: cudaError_t, ) -> *const ::std::os::raw::c_char; } extern "C" { - #[doc = " \\brief Returns the number of compute-capable devices"] - #[doc = ""] - #[doc = " Returns in \\p *count the number of devices with compute capability greater"] - #[doc = " or equal to 2.0 that are available for execution."] - #[doc = ""] - #[doc = " \\param count - Returns the number of devices with compute capability"] - #[doc = " greater or equal to 2.0"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaErrorInvalidValue (if a NULL device pointer is assigned), ::cudaSuccess"] - #[doc = ""] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaGetDevice, ::cudaSetDevice, ::cudaGetDeviceProperties,"] - #[doc = " ::cudaChooseDevice,"] - #[doc = " ::cuDeviceGetCount"] pub fn cudaGetDeviceCount(count: *mut ::std::os::raw::c_int) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns information about the compute-device"] - #[doc = ""] - #[doc = " Returns in \\p *prop the properties of device \\p dev. The ::cudaDeviceProp"] - #[doc = " structure is defined as:"] - #[doc = " \\code"] - #[doc = "struct cudaDeviceProp {"] - #[doc = "char name[256];"] - #[doc = "cudaUUID_t uuid;"] - #[doc = "size_t totalGlobalMem;"] - #[doc = "size_t sharedMemPerBlock;"] - #[doc = "int regsPerBlock;"] - #[doc = "int warpSize;"] - #[doc = "size_t memPitch;"] - #[doc = "int maxThreadsPerBlock;"] - #[doc = "int maxThreadsDim[3];"] - #[doc = "int maxGridSize[3];"] - #[doc = "int clockRate;"] - #[doc = "size_t totalConstMem;"] - #[doc = "int major;"] - #[doc = "int minor;"] - #[doc = "size_t textureAlignment;"] - #[doc = "size_t texturePitchAlignment;"] - #[doc = "int deviceOverlap;"] - #[doc = "int multiProcessorCount;"] - #[doc = "int kernelExecTimeoutEnabled;"] - #[doc = "int integrated;"] - #[doc = "int canMapHostMemory;"] - #[doc = "int computeMode;"] - #[doc = "int maxTexture1D;"] - #[doc = "int maxTexture1DMipmap;"] - #[doc = "int maxTexture1DLinear;"] - #[doc = "int maxTexture2D[2];"] - #[doc = "int maxTexture2DMipmap[2];"] - #[doc = "int maxTexture2DLinear[3];"] - #[doc = "int maxTexture2DGather[2];"] - #[doc = "int maxTexture3D[3];"] - #[doc = "int maxTexture3DAlt[3];"] - #[doc = "int maxTextureCubemap;"] - #[doc = "int maxTexture1DLayered[2];"] - #[doc = "int maxTexture2DLayered[3];"] - #[doc = "int maxTextureCubemapLayered[2];"] - #[doc = "int maxSurface1D;"] - #[doc = "int maxSurface2D[2];"] - #[doc = "int maxSurface3D[3];"] - #[doc = "int maxSurface1DLayered[2];"] - #[doc = "int maxSurface2DLayered[3];"] - #[doc = "int maxSurfaceCubemap;"] - #[doc = "int maxSurfaceCubemapLayered[2];"] - #[doc = "size_t surfaceAlignment;"] - #[doc = "int concurrentKernels;"] - #[doc = "int ECCEnabled;"] - #[doc = "int pciBusID;"] - #[doc = "int pciDeviceID;"] - #[doc = "int pciDomainID;"] - #[doc = "int tccDriver;"] - #[doc = "int asyncEngineCount;"] - #[doc = "int unifiedAddressing;"] - #[doc = "int memoryClockRate;"] - #[doc = "int memoryBusWidth;"] - #[doc = "int l2CacheSize;"] - #[doc = "int maxThreadsPerMultiProcessor;"] - #[doc = "int streamPrioritiesSupported;"] - #[doc = "int globalL1CacheSupported;"] - #[doc = "int localL1CacheSupported;"] - #[doc = "size_t sharedMemPerMultiprocessor;"] - #[doc = "int regsPerMultiprocessor;"] - #[doc = "int managedMemory;"] - #[doc = "int isMultiGpuBoard;"] - #[doc = "int multiGpuBoardGroupID;"] - #[doc = "int singleToDoublePrecisionPerfRatio;"] - #[doc = "int pageableMemoryAccess;"] - #[doc = "int concurrentManagedAccess;"] - #[doc = "int computePreemptionSupported;"] - #[doc = "int canUseHostPointerForRegisteredMem;"] - #[doc = "int cooperativeLaunch;"] - #[doc = "int cooperativeMultiDeviceLaunch;"] - #[doc = "int pageableMemoryAccessUsesHostPageTables;"] - #[doc = "int directManagedMemAccessFromHost;"] - #[doc = "}"] - #[doc = "\\endcode"] - #[doc = " where:"] - #[doc = " - \\ref ::cudaDeviceProp::name \"name[256]\" is an ASCII string identifying"] - #[doc = " the device;"] - #[doc = " - \\ref ::cudaDeviceProp::uuid \"uuid\" is a 16-byte unique identifier."] - #[doc = " - \\ref ::cudaDeviceProp::totalGlobalMem \"totalGlobalMem\" is the total"] - #[doc = " amount of global memory available on the device in bytes;"] - #[doc = " - \\ref ::cudaDeviceProp::sharedMemPerBlock \"sharedMemPerBlock\" is the"] - #[doc = " maximum amount of shared memory available to a thread block in bytes;"] - #[doc = " - \\ref ::cudaDeviceProp::regsPerBlock \"regsPerBlock\" is the maximum number"] - #[doc = " of 32-bit registers available to a thread block;"] - #[doc = " - \\ref ::cudaDeviceProp::warpSize \"warpSize\" is the warp size in threads;"] - #[doc = " - \\ref ::cudaDeviceProp::memPitch \"memPitch\" is the maximum pitch in"] - #[doc = " bytes allowed by the memory copy functions that involve memory regions"] - #[doc = " allocated through ::cudaMallocPitch();"] - #[doc = " - \\ref ::cudaDeviceProp::maxThreadsPerBlock \"maxThreadsPerBlock\" is the"] - #[doc = " maximum number of threads per block;"] - #[doc = " - \\ref ::cudaDeviceProp::maxThreadsDim \"maxThreadsDim[3]\" contains the"] - #[doc = " maximum size of each dimension of a block;"] - #[doc = " - \\ref ::cudaDeviceProp::maxGridSize \"maxGridSize[3]\" contains the"] - #[doc = " maximum size of each dimension of a grid;"] - #[doc = " - \\ref ::cudaDeviceProp::clockRate \"clockRate\" is the clock frequency in"] - #[doc = " kilohertz;"] - #[doc = " - \\ref ::cudaDeviceProp::totalConstMem \"totalConstMem\" is the total amount"] - #[doc = " of constant memory available on the device in bytes;"] - #[doc = " - \\ref ::cudaDeviceProp::major \"major\","] - #[doc = " \\ref ::cudaDeviceProp::minor \"minor\" are the major and minor revision"] - #[doc = " numbers defining the device's compute capability;"] - #[doc = " - \\ref ::cudaDeviceProp::textureAlignment \"textureAlignment\" is the"] - #[doc = " alignment requirement; texture base addresses that are aligned to"] - #[doc = " \\ref ::cudaDeviceProp::textureAlignment \"textureAlignment\" bytes do not"] - #[doc = " need an offset applied to texture fetches;"] - #[doc = " - \\ref ::cudaDeviceProp::texturePitchAlignment \"texturePitchAlignment\" is the"] - #[doc = " pitch alignment requirement for 2D texture references that are bound to"] - #[doc = " pitched memory;"] - #[doc = " - \\ref ::cudaDeviceProp::deviceOverlap \"deviceOverlap\" is 1 if the device"] - #[doc = " can concurrently copy memory between host and device while executing a"] - #[doc = " kernel, or 0 if not. Deprecated, use instead asyncEngineCount."] - #[doc = " - \\ref ::cudaDeviceProp::multiProcessorCount \"multiProcessorCount\" is the"] - #[doc = " number of multiprocessors on the device;"] - #[doc = " - \\ref ::cudaDeviceProp::kernelExecTimeoutEnabled \"kernelExecTimeoutEnabled\""] - #[doc = " is 1 if there is a run time limit for kernels executed on the device, or"] - #[doc = " 0 if not."] - #[doc = " - \\ref ::cudaDeviceProp::integrated \"integrated\" is 1 if the device is an"] - #[doc = " integrated (motherboard) GPU and 0 if it is a discrete (card) component."] - #[doc = " - \\ref ::cudaDeviceProp::canMapHostMemory \"canMapHostMemory\" is 1 if the"] - #[doc = " device can map host memory into the CUDA address space for use with"] - #[doc = " ::cudaHostAlloc()/::cudaHostGetDevicePointer(), or 0 if not;"] - #[doc = " - \\ref ::cudaDeviceProp::computeMode \"computeMode\" is the compute mode"] - #[doc = " that the device is currently in. Available modes are as follows:"] - #[doc = " - cudaComputeModeDefault: Default mode - Device is not restricted and"] - #[doc = " multiple threads can use ::cudaSetDevice() with this device."] - #[doc = " - cudaComputeModeExclusive: Compute-exclusive mode - Only one thread will"] - #[doc = " be able to use ::cudaSetDevice() with this device."] - #[doc = " - cudaComputeModeProhibited: Compute-prohibited mode - No threads can use"] - #[doc = " ::cudaSetDevice() with this device."] - #[doc = " - cudaComputeModeExclusiveProcess: Compute-exclusive-process mode - Many"] - #[doc = " threads in one process will be able to use ::cudaSetDevice() with this device."] - #[doc = "
If ::cudaSetDevice() is called on an already occupied \\p device with"] - #[doc = " computeMode ::cudaComputeModeExclusive, ::cudaErrorDeviceAlreadyInUse"] - #[doc = " will be immediately returned indicating the device cannot be used."] - #[doc = " When an occupied exclusive mode device is chosen with ::cudaSetDevice,"] - #[doc = " all subsequent non-device management runtime functions will return"] - #[doc = " ::cudaErrorDevicesUnavailable."] - #[doc = " - \\ref ::cudaDeviceProp::maxTexture1D \"maxTexture1D\" is the maximum 1D"] - #[doc = " texture size."] - #[doc = " - \\ref ::cudaDeviceProp::maxTexture1DMipmap \"maxTexture1DMipmap\" is the maximum"] - #[doc = " 1D mipmapped texture texture size."] - #[doc = " - \\ref ::cudaDeviceProp::maxTexture1DLinear \"maxTexture1DLinear\" is the maximum"] - #[doc = " 1D texture size for textures bound to linear memory."] - #[doc = " - \\ref ::cudaDeviceProp::maxTexture2D \"maxTexture2D[2]\" contains the maximum"] - #[doc = " 2D texture dimensions."] - #[doc = " - \\ref ::cudaDeviceProp::maxTexture2DMipmap \"maxTexture2DMipmap[2]\" contains the"] - #[doc = " maximum 2D mipmapped texture dimensions."] - #[doc = " - \\ref ::cudaDeviceProp::maxTexture2DLinear \"maxTexture2DLinear[3]\" contains the"] - #[doc = " maximum 2D texture dimensions for 2D textures bound to pitch linear memory."] - #[doc = " - \\ref ::cudaDeviceProp::maxTexture2DGather \"maxTexture2DGather[2]\" contains the"] - #[doc = " maximum 2D texture dimensions if texture gather operations have to be performed."] - #[doc = " - \\ref ::cudaDeviceProp::maxTexture3D \"maxTexture3D[3]\" contains the maximum"] - #[doc = " 3D texture dimensions."] - #[doc = " - \\ref ::cudaDeviceProp::maxTexture3DAlt \"maxTexture3DAlt[3]\""] - #[doc = " contains the maximum alternate 3D texture dimensions."] - #[doc = " - \\ref ::cudaDeviceProp::maxTextureCubemap \"maxTextureCubemap\" is the"] - #[doc = " maximum cubemap texture width or height."] - #[doc = " - \\ref ::cudaDeviceProp::maxTexture1DLayered \"maxTexture1DLayered[2]\" contains"] - #[doc = " the maximum 1D layered texture dimensions."] - #[doc = " - \\ref ::cudaDeviceProp::maxTexture2DLayered \"maxTexture2DLayered[3]\" contains"] - #[doc = " the maximum 2D layered texture dimensions."] - #[doc = " - \\ref ::cudaDeviceProp::maxTextureCubemapLayered \"maxTextureCubemapLayered[2]\""] - #[doc = " contains the maximum cubemap layered texture dimensions."] - #[doc = " - \\ref ::cudaDeviceProp::maxSurface1D \"maxSurface1D\" is the maximum 1D"] - #[doc = " surface size."] - #[doc = " - \\ref ::cudaDeviceProp::maxSurface2D \"maxSurface2D[2]\" contains the maximum"] - #[doc = " 2D surface dimensions."] - #[doc = " - \\ref ::cudaDeviceProp::maxSurface3D \"maxSurface3D[3]\" contains the maximum"] - #[doc = " 3D surface dimensions."] - #[doc = " - \\ref ::cudaDeviceProp::maxSurface1DLayered \"maxSurface1DLayered[2]\" contains"] - #[doc = " the maximum 1D layered surface dimensions."] - #[doc = " - \\ref ::cudaDeviceProp::maxSurface2DLayered \"maxSurface2DLayered[3]\" contains"] - #[doc = " the maximum 2D layered surface dimensions."] - #[doc = " - \\ref ::cudaDeviceProp::maxSurfaceCubemap \"maxSurfaceCubemap\" is the maximum"] - #[doc = " cubemap surface width or height."] - #[doc = " - \\ref ::cudaDeviceProp::maxSurfaceCubemapLayered \"maxSurfaceCubemapLayered[2]\""] - #[doc = " contains the maximum cubemap layered surface dimensions."] - #[doc = " - \\ref ::cudaDeviceProp::surfaceAlignment \"surfaceAlignment\" specifies the"] - #[doc = " alignment requirements for surfaces."] - #[doc = " - \\ref ::cudaDeviceProp::concurrentKernels \"concurrentKernels\" is 1 if the"] - #[doc = " device supports executing multiple kernels within the same context"] - #[doc = " simultaneously, or 0 if not. It is not guaranteed that multiple kernels"] - #[doc = " will be resident on the device concurrently so this feature should not be"] - #[doc = " relied upon for correctness;"] - #[doc = " - \\ref ::cudaDeviceProp::ECCEnabled \"ECCEnabled\" is 1 if the device has ECC"] - #[doc = " support turned on, or 0 if not."] - #[doc = " - \\ref ::cudaDeviceProp::pciBusID \"pciBusID\" is the PCI bus identifier of"] - #[doc = " the device."] - #[doc = " - \\ref ::cudaDeviceProp::pciDeviceID \"pciDeviceID\" is the PCI device"] - #[doc = " (sometimes called slot) identifier of the device."] - #[doc = " - \\ref ::cudaDeviceProp::pciDomainID \"pciDomainID\" is the PCI domain identifier"] - #[doc = " of the device."] - #[doc = " - \\ref ::cudaDeviceProp::tccDriver \"tccDriver\" is 1 if the device is using a"] - #[doc = " TCC driver or 0 if not."] - #[doc = " - \\ref ::cudaDeviceProp::asyncEngineCount \"asyncEngineCount\" is 1 when the"] - #[doc = " device can concurrently copy memory between host and device while executing"] - #[doc = " a kernel. It is 2 when the device can concurrently copy memory between host"] - #[doc = " and device in both directions and execute a kernel at the same time. It is"] - #[doc = " 0 if neither of these is supported."] - #[doc = " - \\ref ::cudaDeviceProp::unifiedAddressing \"unifiedAddressing\" is 1 if the device"] - #[doc = " shares a unified address space with the host and 0 otherwise."] - #[doc = " - \\ref ::cudaDeviceProp::memoryClockRate \"memoryClockRate\" is the peak memory"] - #[doc = " clock frequency in kilohertz."] - #[doc = " - \\ref ::cudaDeviceProp::memoryBusWidth \"memoryBusWidth\" is the memory bus width"] - #[doc = " in bits."] - #[doc = " - \\ref ::cudaDeviceProp::l2CacheSize \"l2CacheSize\" is L2 cache size in bytes."] - #[doc = " - \\ref ::cudaDeviceProp::maxThreadsPerMultiProcessor \"maxThreadsPerMultiProcessor\""] - #[doc = " is the number of maximum resident threads per multiprocessor."] - #[doc = " - \\ref ::cudaDeviceProp::streamPrioritiesSupported \"streamPrioritiesSupported\""] - #[doc = " is 1 if the device supports stream priorities, or 0 if it is not supported."] - #[doc = " - \\ref ::cudaDeviceProp::globalL1CacheSupported \"globalL1CacheSupported\""] - #[doc = " is 1 if the device supports caching of globals in L1 cache, or 0 if it is not supported."] - #[doc = " - \\ref ::cudaDeviceProp::localL1CacheSupported \"localL1CacheSupported\""] - #[doc = " is 1 if the device supports caching of locals in L1 cache, or 0 if it is not supported."] - #[doc = " - \\ref ::cudaDeviceProp::sharedMemPerMultiprocessor \"sharedMemPerMultiprocessor\" is the"] - #[doc = " maximum amount of shared memory available to a multiprocessor in bytes; this amount is"] - #[doc = " shared by all thread blocks simultaneously resident on a multiprocessor;"] - #[doc = " - \\ref ::cudaDeviceProp::regsPerMultiprocessor \"regsPerMultiprocessor\" is the maximum number"] - #[doc = " of 32-bit registers available to a multiprocessor; this number is shared"] - #[doc = " by all thread blocks simultaneously resident on a multiprocessor;"] - #[doc = " - \\ref ::cudaDeviceProp::managedMemory \"managedMemory\""] - #[doc = " is 1 if the device supports allocating managed memory on this system, or 0 if it is not supported."] - #[doc = " - \\ref ::cudaDeviceProp::isMultiGpuBoard \"isMultiGpuBoard\""] - #[doc = " is 1 if the device is on a multi-GPU board (e.g. Gemini cards), and 0 if not;"] - #[doc = " - \\ref ::cudaDeviceProp::multiGpuBoardGroupID \"multiGpuBoardGroupID\" is a unique identifier"] - #[doc = " for a group of devices associated with the same board."] - #[doc = " Devices on the same multi-GPU board will share the same identifier;"] - #[doc = " - \\ref ::cudaDeviceProp::singleToDoublePrecisionPerfRatio \"singleToDoublePrecisionPerfRatio\""] - #[doc = " is the ratio of single precision performance (in floating-point operations per second)"] - #[doc = " to double precision performance."] - #[doc = " - \\ref ::cudaDeviceProp::pageableMemoryAccess \"pageableMemoryAccess\" is 1 if the device supports"] - #[doc = " coherently accessing pageable memory without calling cudaHostRegister on it, and 0 otherwise."] - #[doc = " - \\ref ::cudaDeviceProp::concurrentManagedAccess \"concurrentManagedAccess\" is 1 if the device can"] - #[doc = " coherently access managed memory concurrently with the CPU, and 0 otherwise."] - #[doc = " - \\ref ::cudaDeviceProp::computePreemptionSupported \"computePreemptionSupported\" is 1 if the device"] - #[doc = " supports Compute Preemption, and 0 otherwise."] - #[doc = " - \\ref ::cudaDeviceProp::canUseHostPointerForRegisteredMem \"canUseHostPointerForRegisteredMem\" is 1 if"] - #[doc = " the device can access host registered memory at the same virtual address as the CPU, and 0 otherwise."] - #[doc = " - \\ref ::cudaDeviceProp::cooperativeLaunch \"cooperativeLaunch\" is 1 if the device supports launching"] - #[doc = " cooperative kernels via ::cudaLaunchCooperativeKernel, and 0 otherwise."] - #[doc = " - \\ref ::cudaDeviceProp::cooperativeMultiDeviceLaunch \"cooperativeMultiDeviceLaunch\" is 1 if the device"] - #[doc = " supports launching cooperative kernels via ::cudaLaunchCooperativeKernelMultiDevice, and 0 otherwise."] - #[doc = " - \\ref ::cudaDeviceProp::pageableMemoryAccessUsesHostPageTables \"pageableMemoryAccessUsesHostPageTables\" is 1 if the device accesses"] - #[doc = " pageable memory via the host's page tables, and 0 otherwise."] - #[doc = " - \\ref ::cudaDeviceProp::directManagedMemAccessFromHost \"directManagedMemAccessFromHost\" is 1 if the host can directly access managed"] - #[doc = " memory on the device without migration, and 0 otherwise."] - #[doc = ""] - #[doc = " \\param prop - Properties for the specified device"] - #[doc = " \\param device - Device number to get properties for"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidDevice"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaGetDeviceCount, ::cudaGetDevice, ::cudaSetDevice, ::cudaChooseDevice,"] - #[doc = " ::cudaDeviceGetAttribute,"] - #[doc = " ::cuDeviceGetAttribute,"] - #[doc = " ::cuDeviceGetName"] pub fn cudaGetDeviceProperties( prop: *mut cudaDeviceProp, device: ::std::os::raw::c_int, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns information about the device"] - #[doc = ""] - #[doc = " Returns in \\p *value the integer value of the attribute \\p attr on device"] - #[doc = " \\p device. The supported attributes are:"] - #[doc = " - ::cudaDevAttrMaxThreadsPerBlock: Maximum number of threads per block;"] - #[doc = " - ::cudaDevAttrMaxBlockDimX: Maximum x-dimension of a block;"] - #[doc = " - ::cudaDevAttrMaxBlockDimY: Maximum y-dimension of a block;"] - #[doc = " - ::cudaDevAttrMaxBlockDimZ: Maximum z-dimension of a block;"] - #[doc = " - ::cudaDevAttrMaxGridDimX: Maximum x-dimension of a grid;"] - #[doc = " - ::cudaDevAttrMaxGridDimY: Maximum y-dimension of a grid;"] - #[doc = " - ::cudaDevAttrMaxGridDimZ: Maximum z-dimension of a grid;"] - #[doc = " - ::cudaDevAttrMaxSharedMemoryPerBlock: Maximum amount of shared memory"] - #[doc = " available to a thread block in bytes;"] - #[doc = " - ::cudaDevAttrTotalConstantMemory: Memory available on device for"] - #[doc = " __constant__ variables in a CUDA C kernel in bytes;"] - #[doc = " - ::cudaDevAttrWarpSize: Warp size in threads;"] - #[doc = " - ::cudaDevAttrMaxPitch: Maximum pitch in bytes allowed by the memory copy"] - #[doc = " functions that involve memory regions allocated through ::cudaMallocPitch();"] - #[doc = " - ::cudaDevAttrMaxTexture1DWidth: Maximum 1D texture width;"] - #[doc = " - ::cudaDevAttrMaxTexture1DLinearWidth: Maximum width for a 1D texture bound"] - #[doc = " to linear memory;"] - #[doc = " - ::cudaDevAttrMaxTexture1DMipmappedWidth: Maximum mipmapped 1D texture width;"] - #[doc = " - ::cudaDevAttrMaxTexture2DWidth: Maximum 2D texture width;"] - #[doc = " - ::cudaDevAttrMaxTexture2DHeight: Maximum 2D texture height;"] - #[doc = " - ::cudaDevAttrMaxTexture2DLinearWidth: Maximum width for a 2D texture"] - #[doc = " bound to linear memory;"] - #[doc = " - ::cudaDevAttrMaxTexture2DLinearHeight: Maximum height for a 2D texture"] - #[doc = " bound to linear memory;"] - #[doc = " - ::cudaDevAttrMaxTexture2DLinearPitch: Maximum pitch in bytes for a 2D"] - #[doc = " texture bound to linear memory;"] - #[doc = " - ::cudaDevAttrMaxTexture2DMipmappedWidth: Maximum mipmapped 2D texture"] - #[doc = " width;"] - #[doc = " - ::cudaDevAttrMaxTexture2DMipmappedHeight: Maximum mipmapped 2D texture"] - #[doc = " height;"] - #[doc = " - ::cudaDevAttrMaxTexture3DWidth: Maximum 3D texture width;"] - #[doc = " - ::cudaDevAttrMaxTexture3DHeight: Maximum 3D texture height;"] - #[doc = " - ::cudaDevAttrMaxTexture3DDepth: Maximum 3D texture depth;"] - #[doc = " - ::cudaDevAttrMaxTexture3DWidthAlt: Alternate maximum 3D texture width,"] - #[doc = " 0 if no alternate maximum 3D texture size is supported;"] - #[doc = " - ::cudaDevAttrMaxTexture3DHeightAlt: Alternate maximum 3D texture height,"] - #[doc = " 0 if no alternate maximum 3D texture size is supported;"] - #[doc = " - ::cudaDevAttrMaxTexture3DDepthAlt: Alternate maximum 3D texture depth,"] - #[doc = " 0 if no alternate maximum 3D texture size is supported;"] - #[doc = " - ::cudaDevAttrMaxTextureCubemapWidth: Maximum cubemap texture width or"] - #[doc = " height;"] - #[doc = " - ::cudaDevAttrMaxTexture1DLayeredWidth: Maximum 1D layered texture width;"] - #[doc = " - ::cudaDevAttrMaxTexture1DLayeredLayers: Maximum layers in a 1D layered"] - #[doc = " texture;"] - #[doc = " - ::cudaDevAttrMaxTexture2DLayeredWidth: Maximum 2D layered texture width;"] - #[doc = " - ::cudaDevAttrMaxTexture2DLayeredHeight: Maximum 2D layered texture height;"] - #[doc = " - ::cudaDevAttrMaxTexture2DLayeredLayers: Maximum layers in a 2D layered"] - #[doc = " texture;"] - #[doc = " - ::cudaDevAttrMaxTextureCubemapLayeredWidth: Maximum cubemap layered"] - #[doc = " texture width or height;"] - #[doc = " - ::cudaDevAttrMaxTextureCubemapLayeredLayers: Maximum layers in a cubemap"] - #[doc = " layered texture;"] - #[doc = " - ::cudaDevAttrMaxSurface1DWidth: Maximum 1D surface width;"] - #[doc = " - ::cudaDevAttrMaxSurface2DWidth: Maximum 2D surface width;"] - #[doc = " - ::cudaDevAttrMaxSurface2DHeight: Maximum 2D surface height;"] - #[doc = " - ::cudaDevAttrMaxSurface3DWidth: Maximum 3D surface width;"] - #[doc = " - ::cudaDevAttrMaxSurface3DHeight: Maximum 3D surface height;"] - #[doc = " - ::cudaDevAttrMaxSurface3DDepth: Maximum 3D surface depth;"] - #[doc = " - ::cudaDevAttrMaxSurface1DLayeredWidth: Maximum 1D layered surface width;"] - #[doc = " - ::cudaDevAttrMaxSurface1DLayeredLayers: Maximum layers in a 1D layered"] - #[doc = " surface;"] - #[doc = " - ::cudaDevAttrMaxSurface2DLayeredWidth: Maximum 2D layered surface width;"] - #[doc = " - ::cudaDevAttrMaxSurface2DLayeredHeight: Maximum 2D layered surface height;"] - #[doc = " - ::cudaDevAttrMaxSurface2DLayeredLayers: Maximum layers in a 2D layered"] - #[doc = " surface;"] - #[doc = " - ::cudaDevAttrMaxSurfaceCubemapWidth: Maximum cubemap surface width;"] - #[doc = " - ::cudaDevAttrMaxSurfaceCubemapLayeredWidth: Maximum cubemap layered"] - #[doc = " surface width;"] - #[doc = " - ::cudaDevAttrMaxSurfaceCubemapLayeredLayers: Maximum layers in a cubemap"] - #[doc = " layered surface;"] - #[doc = " - ::cudaDevAttrMaxRegistersPerBlock: Maximum number of 32-bit registers"] - #[doc = " available to a thread block;"] - #[doc = " - ::cudaDevAttrClockRate: Peak clock frequency in kilohertz;"] - #[doc = " - ::cudaDevAttrTextureAlignment: Alignment requirement; texture base"] - #[doc = " addresses aligned to ::textureAlign bytes do not need an offset applied"] - #[doc = " to texture fetches;"] - #[doc = " - ::cudaDevAttrTexturePitchAlignment: Pitch alignment requirement for 2D"] - #[doc = " texture references bound to pitched memory;"] - #[doc = " - ::cudaDevAttrGpuOverlap: 1 if the device can concurrently copy memory"] - #[doc = " between host and device while executing a kernel, or 0 if not;"] - #[doc = " - ::cudaDevAttrMultiProcessorCount: Number of multiprocessors on the device;"] - #[doc = " - ::cudaDevAttrKernelExecTimeout: 1 if there is a run time limit for kernels"] - #[doc = " executed on the device, or 0 if not;"] - #[doc = " - ::cudaDevAttrIntegrated: 1 if the device is integrated with the memory"] - #[doc = " subsystem, or 0 if not;"] - #[doc = " - ::cudaDevAttrCanMapHostMemory: 1 if the device can map host memory into"] - #[doc = " the CUDA address space, or 0 if not;"] - #[doc = " - ::cudaDevAttrComputeMode: Compute mode is the compute mode that the device"] - #[doc = " is currently in. Available modes are as follows:"] - #[doc = " - ::cudaComputeModeDefault: Default mode - Device is not restricted and"] - #[doc = " multiple threads can use ::cudaSetDevice() with this device."] - #[doc = " - ::cudaComputeModeExclusive: Compute-exclusive mode - Only one thread will"] - #[doc = " be able to use ::cudaSetDevice() with this device."] - #[doc = " - ::cudaComputeModeProhibited: Compute-prohibited mode - No threads can use"] - #[doc = " ::cudaSetDevice() with this device."] - #[doc = " - ::cudaComputeModeExclusiveProcess: Compute-exclusive-process mode - Many"] - #[doc = " threads in one process will be able to use ::cudaSetDevice() with this"] - #[doc = " device."] - #[doc = " - ::cudaDevAttrConcurrentKernels: 1 if the device supports executing"] - #[doc = " multiple kernels within the same context simultaneously, or 0 if"] - #[doc = " not. It is not guaranteed that multiple kernels will be resident on the"] - #[doc = " device concurrently so this feature should not be relied upon for"] - #[doc = " correctness;"] - #[doc = " - ::cudaDevAttrEccEnabled: 1 if error correction is enabled on the device,"] - #[doc = " 0 if error correction is disabled or not supported by the device;"] - #[doc = " - ::cudaDevAttrPciBusId: PCI bus identifier of the device;"] - #[doc = " - ::cudaDevAttrPciDeviceId: PCI device (also known as slot) identifier of"] - #[doc = " the device;"] - #[doc = " - ::cudaDevAttrTccDriver: 1 if the device is using a TCC driver. TCC is only"] - #[doc = " available on Tesla hardware running Windows Vista or later;"] - #[doc = " - ::cudaDevAttrMemoryClockRate: Peak memory clock frequency in kilohertz;"] - #[doc = " - ::cudaDevAttrGlobalMemoryBusWidth: Global memory bus width in bits;"] - #[doc = " - ::cudaDevAttrL2CacheSize: Size of L2 cache in bytes. 0 if the device"] - #[doc = " doesn't have L2 cache;"] - #[doc = " - ::cudaDevAttrMaxThreadsPerMultiProcessor: Maximum resident threads per"] - #[doc = " multiprocessor;"] - #[doc = " - ::cudaDevAttrUnifiedAddressing: 1 if the device shares a unified address"] - #[doc = " space with the host, or 0 if not;"] - #[doc = " - ::cudaDevAttrComputeCapabilityMajor: Major compute capability version"] - #[doc = " number;"] - #[doc = " - ::cudaDevAttrComputeCapabilityMinor: Minor compute capability version"] - #[doc = " number;"] - #[doc = " - ::cudaDevAttrStreamPrioritiesSupported: 1 if the device supports stream"] - #[doc = " priorities, or 0 if not;"] - #[doc = " - ::cudaDevAttrGlobalL1CacheSupported: 1 if device supports caching globals"] - #[doc = " in L1 cache, 0 if not;"] - #[doc = " - ::cudaDevAttrLocalL1CacheSupported: 1 if device supports caching locals"] - #[doc = " in L1 cache, 0 if not;"] - #[doc = " - ::cudaDevAttrMaxSharedMemoryPerMultiprocessor: Maximum amount of shared memory"] - #[doc = " available to a multiprocessor in bytes; this amount is shared by all"] - #[doc = " thread blocks simultaneously resident on a multiprocessor;"] - #[doc = " - ::cudaDevAttrMaxRegistersPerMultiprocessor: Maximum number of 32-bit registers"] - #[doc = " available to a multiprocessor; this number is shared by all thread blocks"] - #[doc = " simultaneously resident on a multiprocessor;"] - #[doc = " - ::cudaDevAttrManagedMemory: 1 if device supports allocating"] - #[doc = " managed memory, 0 if not;"] - #[doc = " - ::cudaDevAttrIsMultiGpuBoard: 1 if device is on a multi-GPU board, 0 if not;"] - #[doc = " - ::cudaDevAttrMultiGpuBoardGroupID: Unique identifier for a group of devices on the"] - #[doc = " same multi-GPU board;"] - #[doc = " - ::cudaDevAttrHostNativeAtomicSupported: 1 if the link between the device and the"] - #[doc = " host supports native atomic operations;"] - #[doc = " - ::cudaDevAttrSingleToDoublePrecisionPerfRatio: Ratio of single precision performance"] - #[doc = " (in floating-point operations per second) to double precision performance;"] - #[doc = " - ::cudaDevAttrPageableMemoryAccess: 1 if the device supports coherently accessing"] - #[doc = " pageable memory without calling cudaHostRegister on it, and 0 otherwise."] - #[doc = " - ::cudaDevAttrConcurrentManagedAccess: 1 if the device can coherently access managed"] - #[doc = " memory concurrently with the CPU, and 0 otherwise."] - #[doc = " - ::cudaDevAttrComputePreemptionSupported: 1 if the device supports"] - #[doc = " Compute Preemption, 0 if not."] - #[doc = " - ::cudaDevAttrCanUseHostPointerForRegisteredMem: 1 if the device can access host"] - #[doc = " registered memory at the same virtual address as the CPU, and 0 otherwise."] - #[doc = " - ::cudaDevAttrCooperativeLaunch: 1 if the device supports launching cooperative kernels"] - #[doc = " via ::cudaLaunchCooperativeKernel, and 0 otherwise."] - #[doc = " - ::cudaDevAttrCooperativeMultiDeviceLaunch: 1 if the device supports launching cooperative"] - #[doc = " kernels via ::cudaLaunchCooperativeKernelMultiDevice, and 0 otherwise."] - #[doc = " - ::cudaDevAttrCanFlushRemoteWrites: 1 if the device supports flushing of outstanding"] - #[doc = " remote writes, and 0 otherwise."] - #[doc = " - ::cudaDevAttrHostRegisterSupported: 1 if the device supports host memory registration"] - #[doc = " via ::cudaHostRegister, and 0 otherwise."] - #[doc = " - ::cudaDevAttrPageableMemoryAccessUsesHostPageTables: 1 if the device accesses pageable memory via the"] - #[doc = " host's page tables, and 0 otherwise."] - #[doc = " - ::cudaDevAttrDirectManagedMemAccessFromHost: 1 if the host can directly access managed memory on the device"] - #[doc = " without migration, and 0 otherwise."] - #[doc = " - ::cudaDevAttrMaxSharedMemoryPerBlockOptin: Maximum per block shared memory size on the device. This value can"] - #[doc = " be opted into when using ::cudaFuncSetAttribute"] - #[doc = ""] - #[doc = " \\param value - Returned device attribute value"] - #[doc = " \\param attr - Device attribute to query"] - #[doc = " \\param device - Device number to query"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidDevice,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaGetDeviceCount, ::cudaGetDevice, ::cudaSetDevice, ::cudaChooseDevice,"] - #[doc = " ::cudaGetDeviceProperties,"] - #[doc = " ::cuDeviceGetAttribute"] pub fn cudaDeviceGetAttribute( value: *mut ::std::os::raw::c_int, attr: cudaDeviceAttr, @@ -14644,42 +3925,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Queries attributes of the link between two devices."] - #[doc = ""] - #[doc = " Returns in \\p *value the value of the requested attribute \\p attrib of the"] - #[doc = " link between \\p srcDevice and \\p dstDevice. The supported attributes are:"] - #[doc = " - ::cudaDevP2PAttrPerformanceRank: A relative value indicating the"] - #[doc = " performance of the link between two devices. Lower value means better"] - #[doc = " performance (0 being the value used for most performant link)."] - #[doc = " - ::cudaDevP2PAttrAccessSupported: 1 if peer access is enabled."] - #[doc = " - ::cudaDevP2PAttrNativeAtomicSupported: 1 if native atomic operations over"] - #[doc = " the link are supported."] - #[doc = " - ::cudaDevP2PAttrCudaArrayAccessSupported: 1 if accessing CUDA arrays over"] - #[doc = " the link is supported."] - #[doc = ""] - #[doc = " Returns ::cudaErrorInvalidDevice if \\p srcDevice or \\p dstDevice are not valid"] - #[doc = " or if they represent the same device."] - #[doc = ""] - #[doc = " Returns ::cudaErrorInvalidValue if \\p attrib is not valid or if \\p value is"] - #[doc = " a null pointer."] - #[doc = ""] - #[doc = " \\param value - Returned value of the requested attribute"] - #[doc = " \\param attrib - The requested attribute of the link between \\p srcDevice and \\p dstDevice."] - #[doc = " \\param srcDevice - The source device of the target link."] - #[doc = " \\param dstDevice - The destination device of the target link."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidDevice,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaCtxEnablePeerAccess,"] - #[doc = " ::cudaCtxDisablePeerAccess,"] - #[doc = " ::cudaCtxCanAccessPeer,"] - #[doc = " ::cuDeviceGetP2PAttribute"] pub fn cudaDeviceGetP2PAttribute( value: *mut ::std::os::raw::c_int, attr: cudaDeviceP2PAttr, @@ -14688,335 +3933,41 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Select compute-device which best matches criteria"] - #[doc = ""] - #[doc = " Returns in \\p *device the device which has properties that best match"] - #[doc = " \\p *prop."] - #[doc = ""] - #[doc = " \\param device - Device with best match"] - #[doc = " \\param prop - Desired device properties"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaGetDeviceCount, ::cudaGetDevice, ::cudaSetDevice,"] - #[doc = " ::cudaGetDeviceProperties"] pub fn cudaChooseDevice( device: *mut ::std::os::raw::c_int, prop: *const cudaDeviceProp, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Set device to be used for GPU executions"] - #[doc = ""] - #[doc = " Sets \\p device as the current device for the calling host thread."] - #[doc = " Valid device id's are 0 to (::cudaGetDeviceCount() - 1)."] - #[doc = ""] - #[doc = " Any device memory subsequently allocated from this host thread"] - #[doc = " using ::cudaMalloc(), ::cudaMallocPitch() or ::cudaMallocArray()"] - #[doc = " will be physically resident on \\p device. Any host memory allocated"] - #[doc = " from this host thread using ::cudaMallocHost() or ::cudaHostAlloc()"] - #[doc = " or ::cudaHostRegister() will have its lifetime associated with"] - #[doc = " \\p device. Any streams or events created from this host thread will"] - #[doc = " be associated with \\p device. Any kernels launched from this host"] - #[doc = " thread using the <<<>>> operator or ::cudaLaunchKernel() will be executed"] - #[doc = " on \\p device."] - #[doc = ""] - #[doc = " This call may be made from any host thread, to any device, and at"] - #[doc = " any time. This function will do no synchronization with the previous"] - #[doc = " or new device, and should be considered a very low overhead call."] - #[doc = ""] - #[doc = " \\param device - Device on which the active host thread should execute the"] - #[doc = " device code."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidDevice,"] - #[doc = " ::cudaErrorDeviceAlreadyInUse"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaGetDeviceCount, ::cudaGetDevice, ::cudaGetDeviceProperties,"] - #[doc = " ::cudaChooseDevice,"] - #[doc = " ::cuCtxSetCurrent"] pub fn cudaSetDevice(device: ::std::os::raw::c_int) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns which device is currently being used"] - #[doc = ""] - #[doc = " Returns in \\p *device the current device for the calling host thread."] - #[doc = ""] - #[doc = " \\param device - Returns the device on which the active host thread"] - #[doc = " executes the device code."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaGetDeviceCount, ::cudaSetDevice, ::cudaGetDeviceProperties,"] - #[doc = " ::cudaChooseDevice,"] - #[doc = " ::cuCtxGetCurrent"] pub fn cudaGetDevice(device: *mut ::std::os::raw::c_int) -> cudaError_t; } extern "C" { - #[doc = " \\brief Set a list of devices that can be used for CUDA"] - #[doc = ""] - #[doc = " Sets a list of devices for CUDA execution in priority order using"] - #[doc = " \\p device_arr. The parameter \\p len specifies the number of elements in the"] - #[doc = " list. CUDA will try devices from the list sequentially until it finds one"] - #[doc = " that works. If this function is not called, or if it is called with a \\p len"] - #[doc = " of 0, then CUDA will go back to its default behavior of trying devices"] - #[doc = " sequentially from a default list containing all of the available CUDA"] - #[doc = " devices in the system. If a specified device ID in the list does not exist,"] - #[doc = " this function will return ::cudaErrorInvalidDevice. If \\p len is not 0 and"] - #[doc = " \\p device_arr is NULL or if \\p len exceeds the number of devices in"] - #[doc = " the system, then ::cudaErrorInvalidValue is returned."] - #[doc = ""] - #[doc = " \\param device_arr - List of devices to try"] - #[doc = " \\param len - Number of devices in specified list"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidDevice"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaGetDeviceCount, ::cudaSetDevice, ::cudaGetDeviceProperties,"] - #[doc = " ::cudaSetDeviceFlags,"] - #[doc = " ::cudaChooseDevice"] pub fn cudaSetValidDevices( device_arr: *mut ::std::os::raw::c_int, len: ::std::os::raw::c_int, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Sets flags to be used for device executions"] - #[doc = ""] - #[doc = " Records \\p flags as the flags to use when initializing the current"] - #[doc = " device. If no device has been made current to the calling thread,"] - #[doc = " then \\p flags will be applied to the initialization of any device"] - #[doc = " initialized by the calling host thread, unless that device has had"] - #[doc = " its initialization flags set explicitly by this or any host thread."] - #[doc = ""] - #[doc = " If the current device has been set and that device has already been"] - #[doc = " initialized then this call will fail with the error"] - #[doc = " ::cudaErrorSetOnActiveProcess. In this case it is necessary"] - #[doc = " to reset \\p device using ::cudaDeviceReset() before the device's"] - #[doc = " initialization flags may be set."] - #[doc = ""] - #[doc = " The two LSBs of the \\p flags parameter can be used to control how the CPU"] - #[doc = " thread interacts with the OS scheduler when waiting for results from the"] - #[doc = " device."] - #[doc = ""] - #[doc = " - ::cudaDeviceScheduleAuto: The default value if the \\p flags parameter is"] - #[doc = " zero, uses a heuristic based on the number of active CUDA contexts in the"] - #[doc = " process \\p C and the number of logical processors in the system \\p P. If"] - #[doc = " \\p C \\> \\p P, then CUDA will yield to other OS threads when waiting for the"] - #[doc = " device, otherwise CUDA will not yield while waiting for results and"] - #[doc = " actively spin on the processor. Additionally, on Tegra devices,"] - #[doc = " ::cudaDeviceScheduleAuto uses a heuristic based on the power profile of"] - #[doc = " the platform and may choose ::cudaDeviceScheduleBlockingSync for low-powered"] - #[doc = " devices."] - #[doc = " - ::cudaDeviceScheduleSpin: Instruct CUDA to actively spin when waiting for"] - #[doc = " results from the device. This can decrease latency when waiting for the"] - #[doc = " device, but may lower the performance of CPU threads if they are performing"] - #[doc = " work in parallel with the CUDA thread."] - #[doc = " - ::cudaDeviceScheduleYield: Instruct CUDA to yield its thread when waiting"] - #[doc = " for results from the device. This can increase latency when waiting for the"] - #[doc = " device, but can increase the performance of CPU threads performing work in"] - #[doc = " parallel with the device."] - #[doc = " - ::cudaDeviceScheduleBlockingSync: Instruct CUDA to block the CPU thread"] - #[doc = " on a synchronization primitive when waiting for the device to finish work."] - #[doc = " - ::cudaDeviceBlockingSync: Instruct CUDA to block the CPU thread on a"] - #[doc = " synchronization primitive when waiting for the device to finish work.
"] - #[doc = " \\ref deprecated \"Deprecated:\" This flag was deprecated as of CUDA 4.0 and"] - #[doc = " replaced with ::cudaDeviceScheduleBlockingSync."] - #[doc = " - ::cudaDeviceMapHost: This flag enables allocating pinned"] - #[doc = " host memory that is accessible to the device. It is implicit for the"] - #[doc = " runtime but may be absent if a context is created using the driver API."] - #[doc = " If this flag is not set, ::cudaHostGetDevicePointer() will always return"] - #[doc = " a failure code."] - #[doc = " - ::cudaDeviceLmemResizeToMax: Instruct CUDA to not reduce local memory"] - #[doc = " after resizing local memory for a kernel. This can prevent thrashing by"] - #[doc = " local memory allocations when launching many kernels with high local"] - #[doc = " memory usage at the cost of potentially increased memory usage."] - #[doc = ""] - #[doc = " \\param flags - Parameters for device operation"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorSetOnActiveProcess"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaGetDeviceFlags, ::cudaGetDeviceCount, ::cudaGetDevice, ::cudaGetDeviceProperties,"] - #[doc = " ::cudaSetDevice, ::cudaSetValidDevices,"] - #[doc = " ::cudaChooseDevice,"] - #[doc = " ::cuDevicePrimaryCtxSetFlags"] pub fn cudaSetDeviceFlags(flags: ::std::os::raw::c_uint) -> cudaError_t; } extern "C" { - #[doc = " \\brief Gets the flags for the current device"] - #[doc = ""] - #[doc = " Returns in \\p flags the flags for the current device. If there is a"] - #[doc = " current device for the calling thread, and the device has been initialized"] - #[doc = " or flags have been set on that device specifically, the flags for the"] - #[doc = " device are returned. If there is no current device, but flags have been"] - #[doc = " set for the thread with ::cudaSetDeviceFlags, the thread flags are returned."] - #[doc = " Finally, if there is no current device and no thread flags, the flags for"] - #[doc = " the first device are returned, which may be the default flags. Compare"] - #[doc = " to the behavior of ::cudaSetDeviceFlags."] - #[doc = ""] - #[doc = " Typically, the flags returned should match the behavior that will be seen"] - #[doc = " if the calling thread uses a device after this call, without any change to"] - #[doc = " the flags or current device inbetween by this or another thread. Note that"] - #[doc = " if the device is not initialized, it is possible for another thread to"] - #[doc = " change the flags for the current device before it is initialized."] - #[doc = " Additionally, when using exclusive mode, if this thread has not requested a"] - #[doc = " specific device, it may use a device other than the first device, contrary"] - #[doc = " to the assumption made by this function."] - #[doc = ""] - #[doc = " If a context has been created via the driver API and is current to the"] - #[doc = " calling thread, the flags for that context are always returned."] - #[doc = ""] - #[doc = " Flags returned by this function may specifically include ::cudaDeviceMapHost"] - #[doc = " even though it is not accepted by ::cudaSetDeviceFlags because it is"] - #[doc = " implicit in runtime API flags. The reason for this is that the current"] - #[doc = " context may have been created via the driver API in which case the flag is"] - #[doc = " not implicit and may be unset."] - #[doc = ""] - #[doc = " \\param flags - Pointer to store the device flags"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess, ::cudaErrorInvalidDevice, ::cudaErrorInvalidValue"] - #[doc = ""] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaGetDevice, ::cudaGetDeviceProperties,"] - #[doc = " ::cudaSetDevice, ::cudaSetDeviceFlags,"] - #[doc = " ::cuCtxGetFlags,"] - #[doc = " ::cuDevicePrimaryCtxGetState"] pub fn cudaGetDeviceFlags( flags: *mut ::std::os::raw::c_uint, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Create an asynchronous stream"] - #[doc = ""] - #[doc = " Creates a new asynchronous stream."] - #[doc = ""] - #[doc = " \\param pStream - Pointer to new stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaStreamCreateWithPriority,"] - #[doc = " ::cudaStreamCreateWithFlags,"] - #[doc = " ::cudaStreamGetPriority,"] - #[doc = " ::cudaStreamGetFlags,"] - #[doc = " ::cudaStreamQuery,"] - #[doc = " ::cudaStreamSynchronize,"] - #[doc = " ::cudaStreamWaitEvent,"] - #[doc = " ::cudaStreamAddCallback,"] - #[doc = " ::cudaStreamDestroy,"] - #[doc = " ::cuStreamCreate"] pub fn cudaStreamCreate(pStream: *mut cudaStream_t) -> cudaError_t; } extern "C" { - #[doc = " \\brief Create an asynchronous stream"] - #[doc = ""] - #[doc = " Creates a new asynchronous stream. The \\p flags argument determines the"] - #[doc = " behaviors of the stream. Valid values for \\p flags are"] - #[doc = " - ::cudaStreamDefault: Default stream creation flag."] - #[doc = " - ::cudaStreamNonBlocking: Specifies that work running in the created"] - #[doc = " stream may run concurrently with work in stream 0 (the NULL stream), and that"] - #[doc = " the created stream should perform no implicit synchronization with stream 0."] - #[doc = ""] - #[doc = " \\param pStream - Pointer to new stream identifier"] - #[doc = " \\param flags - Parameters for stream creation"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaStreamCreate,"] - #[doc = " ::cudaStreamCreateWithPriority,"] - #[doc = " ::cudaStreamGetFlags,"] - #[doc = " ::cudaStreamQuery,"] - #[doc = " ::cudaStreamSynchronize,"] - #[doc = " ::cudaStreamWaitEvent,"] - #[doc = " ::cudaStreamAddCallback,"] - #[doc = " ::cudaStreamDestroy,"] - #[doc = " ::cuStreamCreate"] pub fn cudaStreamCreateWithFlags( pStream: *mut cudaStream_t, flags: ::std::os::raw::c_uint, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Create an asynchronous stream with the specified priority"] - #[doc = ""] - #[doc = " Creates a stream with the specified priority and returns a handle in \\p pStream."] - #[doc = " This API alters the scheduler priority of work in the stream. Work in a higher"] - #[doc = " priority stream may preempt work already executing in a low priority stream."] - #[doc = ""] - #[doc = " \\p priority follows a convention where lower numbers represent higher priorities."] - #[doc = " '0' represents default priority. The range of meaningful numerical priorities can"] - #[doc = " be queried using ::cudaDeviceGetStreamPriorityRange. If the specified priority is"] - #[doc = " outside the numerical range returned by ::cudaDeviceGetStreamPriorityRange,"] - #[doc = " it will automatically be clamped to the lowest or the highest number in the range."] - #[doc = ""] - #[doc = " \\param pStream - Pointer to new stream identifier"] - #[doc = " \\param flags - Flags for stream creation. See ::cudaStreamCreateWithFlags for a list of valid flags that can be passed"] - #[doc = " \\param priority - Priority of the stream. Lower numbers represent higher priorities."] - #[doc = " See ::cudaDeviceGetStreamPriorityRange for more information about"] - #[doc = " the meaningful stream priorities that can be passed."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\note Stream priorities are supported only on GPUs"] - #[doc = " with compute capability 3.5 or higher."] - #[doc = ""] - #[doc = " \\note In the current implementation, only compute kernels launched in"] - #[doc = " priority streams are affected by the stream's priority. Stream priorities have"] - #[doc = " no effect on host-to-device and device-to-host memory operations."] - #[doc = ""] - #[doc = " \\sa ::cudaStreamCreate,"] - #[doc = " ::cudaStreamCreateWithFlags,"] - #[doc = " ::cudaDeviceGetStreamPriorityRange,"] - #[doc = " ::cudaStreamGetPriority,"] - #[doc = " ::cudaStreamQuery,"] - #[doc = " ::cudaStreamWaitEvent,"] - #[doc = " ::cudaStreamAddCallback,"] - #[doc = " ::cudaStreamSynchronize,"] - #[doc = " ::cudaStreamDestroy,"] - #[doc = " ::cuStreamCreateWithPriority"] pub fn cudaStreamCreateWithPriority( pStream: *mut cudaStream_t, flags: ::std::os::raw::c_uint, @@ -15024,124 +3975,27 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Query the priority of a stream"] - #[doc = ""] - #[doc = " Query the priority of a stream. The priority is returned in in \\p priority."] - #[doc = " Note that if the stream was created with a priority outside the meaningful"] - #[doc = " numerical range returned by ::cudaDeviceGetStreamPriorityRange,"] - #[doc = " this function returns the clamped priority."] - #[doc = " See ::cudaStreamCreateWithPriority for details about priority clamping."] - #[doc = ""] - #[doc = " \\param hStream - Handle to the stream to be queried"] - #[doc = " \\param priority - Pointer to a signed integer in which the stream's priority is returned"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidResourceHandle"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaStreamCreateWithPriority,"] - #[doc = " ::cudaDeviceGetStreamPriorityRange,"] - #[doc = " ::cudaStreamGetFlags,"] - #[doc = " ::cuStreamGetPriority"] pub fn cudaStreamGetPriority( hStream: cudaStream_t, priority: *mut ::std::os::raw::c_int, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Query the flags of a stream"] - #[doc = ""] - #[doc = " Query the flags of a stream. The flags are returned in \\p flags."] - #[doc = " See ::cudaStreamCreateWithFlags for a list of valid flags."] - #[doc = ""] - #[doc = " \\param hStream - Handle to the stream to be queried"] - #[doc = " \\param flags - Pointer to an unsigned integer in which the stream's flags are returned"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidResourceHandle"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaStreamCreateWithPriority,"] - #[doc = " ::cudaStreamCreateWithFlags,"] - #[doc = " ::cudaStreamGetPriority,"] - #[doc = " ::cuStreamGetFlags"] pub fn cudaStreamGetFlags( hStream: cudaStream_t, flags: *mut ::std::os::raw::c_uint, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Destroys and cleans up an asynchronous stream"] - #[doc = ""] - #[doc = " Destroys and cleans up the asynchronous stream specified by \\p stream."] - #[doc = ""] - #[doc = " In case the device is still doing work in the stream \\p stream"] - #[doc = " when ::cudaStreamDestroy() is called, the function will return immediately"] - #[doc = " and the resources associated with \\p stream will be released automatically"] - #[doc = " once the device has completed all work in \\p stream."] - #[doc = ""] - #[doc = " \\param stream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidResourceHandle"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaStreamCreate,"] - #[doc = " ::cudaStreamCreateWithFlags,"] - #[doc = " ::cudaStreamQuery,"] - #[doc = " ::cudaStreamWaitEvent,"] - #[doc = " ::cudaStreamSynchronize,"] - #[doc = " ::cudaStreamAddCallback,"] - #[doc = " ::cuStreamDestroy"] pub fn cudaStreamDestroy(stream: cudaStream_t) -> cudaError_t; } extern "C" { - #[doc = " \\brief Make a compute stream wait on an event"] - #[doc = ""] - #[doc = " Makes all future work submitted to \\p stream wait for all work captured in"] - #[doc = " \\p event. See ::cudaEventRecord() for details on what is captured by an event."] - #[doc = " The synchronization will be performed efficiently on the device when applicable."] - #[doc = " \\p event may be from a different device than \\p stream."] - #[doc = ""] - #[doc = " \\param stream - Stream to wait"] - #[doc = " \\param event - Event to wait on"] - #[doc = " \\param flags - Parameters for the operation (must be 0)"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidResourceHandle"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaStreamCreate, ::cudaStreamCreateWithFlags, ::cudaStreamQuery, ::cudaStreamSynchronize, ::cudaStreamAddCallback, ::cudaStreamDestroy,"] - #[doc = " ::cuStreamWaitEvent"] pub fn cudaStreamWaitEvent( stream: cudaStream_t, event: cudaEvent_t, flags: ::std::os::raw::c_uint, ) -> cudaError_t; } -#[doc = " Type of stream callback functions."] -#[doc = " \\param stream The stream as passed to ::cudaStreamAddCallback, may be NULL."] -#[doc = " \\param status ::cudaSuccess or any persistent error on the stream."] -#[doc = " \\param userData User parameter provided at registration."] pub type cudaStreamCallback_t = ::std::option::Option< unsafe extern "C" fn( stream: cudaStream_t, @@ -15150,69 +4004,6 @@ pub type cudaStreamCallback_t = ::std::option::Option< ), >; extern "C" { - #[doc = " \\brief Add a callback to a compute stream"] - #[doc = ""] - #[doc = " \\note This function is slated for eventual deprecation and removal. If"] - #[doc = " you do not require the callback to execute in case of a device error,"] - #[doc = " consider using ::cudaLaunchHostFunc. Additionally, this function is not"] - #[doc = " supported with ::cudaStreamBeginCapture and ::cudaStreamEndCapture, unlike"] - #[doc = " ::cudaLaunchHostFunc."] - #[doc = ""] - #[doc = " Adds a callback to be called on the host after all currently enqueued"] - #[doc = " items in the stream have completed. For each"] - #[doc = " cudaStreamAddCallback call, a callback will be executed exactly once."] - #[doc = " The callback will block later work in the stream until it is finished."] - #[doc = ""] - #[doc = " The callback may be passed ::cudaSuccess or an error code. In the event"] - #[doc = " of a device error, all subsequently executed callbacks will receive an"] - #[doc = " appropriate ::cudaError_t."] - #[doc = ""] - #[doc = " Callbacks must not make any CUDA API calls. Attempting to use CUDA APIs"] - #[doc = " may result in ::cudaErrorNotPermitted. Callbacks must not perform any"] - #[doc = " synchronization that may depend on outstanding device work or other callbacks"] - #[doc = " that are not mandated to run earlier. Callbacks without a mandated order"] - #[doc = " (in independent streams) execute in undefined order and may be serialized."] - #[doc = ""] - #[doc = " For the purposes of Unified Memory, callback execution makes a number of"] - #[doc = " guarantees:"] - #[doc = " "] - #[doc = ""] - #[doc = " \\param stream - Stream to add callback to"] - #[doc = " \\param callback - The function to call once preceding stream operations are complete"] - #[doc = " \\param userData - User specified data to be passed to the callback function"] - #[doc = " \\param flags - Reserved for future use, must be 0"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidResourceHandle,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorNotSupported"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaStreamCreate, ::cudaStreamCreateWithFlags, ::cudaStreamQuery, ::cudaStreamSynchronize, ::cudaStreamWaitEvent, ::cudaStreamDestroy, ::cudaMallocManaged, ::cudaStreamAttachMemAsync,"] - #[doc = " ::cudaLaunchHostFunc, ::cuStreamAddCallback"] pub fn cudaStreamAddCallback( stream: cudaStream_t, callback: cudaStreamCallback_t, @@ -15221,131 +4012,12 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Waits for stream tasks to complete"] - #[doc = ""] - #[doc = " Blocks until \\p stream has completed all operations. If the"] - #[doc = " ::cudaDeviceScheduleBlockingSync flag was set for this device,"] - #[doc = " the host thread will block until the stream is finished with"] - #[doc = " all of its tasks."] - #[doc = ""] - #[doc = " \\param stream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidResourceHandle"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaStreamCreate, ::cudaStreamCreateWithFlags, ::cudaStreamQuery, ::cudaStreamWaitEvent, ::cudaStreamAddCallback, ::cudaStreamDestroy,"] - #[doc = " ::cuStreamSynchronize"] pub fn cudaStreamSynchronize(stream: cudaStream_t) -> cudaError_t; } extern "C" { - #[doc = " \\brief Queries an asynchronous stream for completion status"] - #[doc = ""] - #[doc = " Returns ::cudaSuccess if all operations in \\p stream have"] - #[doc = " completed, or ::cudaErrorNotReady if not."] - #[doc = ""] - #[doc = " For the purposes of Unified Memory, a return value of ::cudaSuccess"] - #[doc = " is equivalent to having called ::cudaStreamSynchronize()."] - #[doc = ""] - #[doc = " \\param stream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorNotReady,"] - #[doc = " ::cudaErrorInvalidResourceHandle"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaStreamCreate, ::cudaStreamCreateWithFlags, ::cudaStreamWaitEvent, ::cudaStreamSynchronize, ::cudaStreamAddCallback, ::cudaStreamDestroy,"] - #[doc = " ::cuStreamQuery"] pub fn cudaStreamQuery(stream: cudaStream_t) -> cudaError_t; } extern "C" { - #[doc = " \\brief Attach memory to a stream asynchronously"] - #[doc = ""] - #[doc = " Enqueues an operation in \\p stream to specify stream association of"] - #[doc = " \\p length bytes of memory starting from \\p devPtr. This function is a"] - #[doc = " stream-ordered operation, meaning that it is dependent on, and will"] - #[doc = " only take effect when, previous work in stream has completed. Any"] - #[doc = " previous association is automatically replaced."] - #[doc = ""] - #[doc = " \\p devPtr must point to an one of the following types of memories:"] - #[doc = " - managed memory declared using the __managed__ keyword or allocated with"] - #[doc = " ::cudaMallocManaged."] - #[doc = " - a valid host-accessible region of system-allocated pageable memory. This"] - #[doc = " type of memory may only be specified if the device associated with the"] - #[doc = " stream reports a non-zero value for the device attribute"] - #[doc = " ::cudaDevAttrPageableMemoryAccess."] - #[doc = ""] - #[doc = " For managed allocations, \\p length must be either zero or the entire"] - #[doc = " allocation's size. Both indicate that the entire allocation's stream"] - #[doc = " association is being changed. Currently, it is not possible to change stream"] - #[doc = " association for a portion of a managed allocation."] - #[doc = ""] - #[doc = " For pageable allocations, \\p length must be non-zero."] - #[doc = ""] - #[doc = " The stream association is specified using \\p flags which must be"] - #[doc = " one of ::cudaMemAttachGlobal, ::cudaMemAttachHost or ::cudaMemAttachSingle."] - #[doc = " The default value for \\p flags is ::cudaMemAttachSingle"] - #[doc = " If the ::cudaMemAttachGlobal flag is specified, the memory can be accessed"] - #[doc = " by any stream on any device."] - #[doc = " If the ::cudaMemAttachHost flag is specified, the program makes a guarantee"] - #[doc = " that it won't access the memory on the device from any stream on a device that"] - #[doc = " has a zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess."] - #[doc = " If the ::cudaMemAttachSingle flag is specified and \\p stream is associated with"] - #[doc = " a device that has a zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess,"] - #[doc = " the program makes a guarantee that it will only access the memory on the device"] - #[doc = " from \\p stream. It is illegal to attach singly to the NULL stream, because the"] - #[doc = " NULL stream is a virtual global stream and not a specific stream. An error will"] - #[doc = " be returned in this case."] - #[doc = ""] - #[doc = " When memory is associated with a single stream, the Unified Memory system will"] - #[doc = " allow CPU access to this memory region so long as all operations in \\p stream"] - #[doc = " have completed, regardless of whether other streams are active. In effect,"] - #[doc = " this constrains exclusive ownership of the managed memory region by"] - #[doc = " an active GPU to per-stream activity instead of whole-GPU activity."] - #[doc = ""] - #[doc = " Accessing memory on the device from streams that are not associated with"] - #[doc = " it will produce undefined results. No error checking is performed by the"] - #[doc = " Unified Memory system to ensure that kernels launched into other streams"] - #[doc = " do not access this region."] - #[doc = ""] - #[doc = " It is a program's responsibility to order calls to ::cudaStreamAttachMemAsync"] - #[doc = " via events, synchronization or other means to ensure legal access to memory"] - #[doc = " at all times. Data visibility and coherency will be changed appropriately"] - #[doc = " for all kernels which follow a stream-association change."] - #[doc = ""] - #[doc = " If \\p stream is destroyed while data is associated with it, the association is"] - #[doc = " removed and the association reverts to the default visibility of the allocation"] - #[doc = " as specified at ::cudaMallocManaged. For __managed__ variables, the default"] - #[doc = " association is always ::cudaMemAttachGlobal. Note that destroying a stream is an"] - #[doc = " asynchronous operation, and as a result, the change to default association won't"] - #[doc = " happen until all work in the stream has completed."] - #[doc = ""] - #[doc = " \\param stream - Stream in which to enqueue the attach operation"] - #[doc = " \\param devPtr - Pointer to memory (must be a pointer to managed memory or"] - #[doc = " to a valid host-accessible region of system-allocated"] - #[doc = " memory)"] - #[doc = " \\param length - Length of memory (defaults to zero)"] - #[doc = " \\param flags - Must be one of ::cudaMemAttachGlobal, ::cudaMemAttachHost or ::cudaMemAttachSingle (defaults to ::cudaMemAttachSingle)"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorNotReady,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidResourceHandle"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaStreamCreate, ::cudaStreamCreateWithFlags, ::cudaStreamWaitEvent, ::cudaStreamSynchronize, ::cudaStreamAddCallback, ::cudaStreamDestroy, ::cudaMallocManaged,"] - #[doc = " ::cuStreamAttachMemAsync"] pub fn cudaStreamAttachMemAsync( stream: cudaStream_t, devPtr: *mut ::std::os::raw::c_void, @@ -15354,190 +4026,29 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Begins graph capture on a stream"] - #[doc = ""] - #[doc = " Begin graph capture on \\p stream. When a stream is in capture mode, all operations"] - #[doc = " pushed into the stream will not be executed, but will instead be captured into"] - #[doc = " a graph, which will be returned via ::cudaStreamEndCapture. Capture may not be initiated"] - #[doc = " if \\p stream is ::cudaStreamLegacy. Capture must be ended on the same stream in which"] - #[doc = " it was initiated, and it may only be initiated if the stream is not already in capture"] - #[doc = " mode. The capture mode may be queried via ::cudaStreamIsCapturing. A unique id"] - #[doc = " representing the capture sequence may be queried via ::cudaStreamGetCaptureInfo."] - #[doc = ""] - #[doc = " If \\p mode is not ::cudaStreamCaptureModeRelaxed, ::cudaStreamEndCapture must be"] - #[doc = " called on this stream from the same thread."] - #[doc = ""] - #[doc = " \\note Kernels captured using this API must not use texture and surface references."] - #[doc = " Reading or writing through any texture or surface reference is undefined"] - #[doc = " behavior. This restriction does not apply to texture and surface objects."] - #[doc = ""] - #[doc = " \\param stream - Stream in which to initiate capture"] - #[doc = " \\param mode - Controls the interaction of this capture sequence with other API"] - #[doc = " calls that are potentially unsafe. For more details see"] - #[doc = " ::cudaThreadExchangeStreamCaptureMode."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaStreamCreate,"] - #[doc = " ::cudaStreamIsCapturing,"] - #[doc = " ::cudaStreamEndCapture,"] - #[doc = " ::cudaThreadExchangeStreamCaptureMode"] pub fn cudaStreamBeginCapture( stream: cudaStream_t, mode: cudaStreamCaptureMode, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Swaps the stream capture interaction mode for a thread"] - #[doc = ""] - #[doc = " Sets the calling thread's stream capture interaction mode to the value contained"] - #[doc = " in \\p *mode, and overwrites \\p *mode with the previous mode for the thread. To"] - #[doc = " facilitate deterministic behavior across function or module boundaries, callers"] - #[doc = " are encouraged to use this API in a push-pop fashion: \\code"] - #[doc = "cudaStreamCaptureMode mode = desiredMode;"] - #[doc = "cudaThreadExchangeStreamCaptureMode(&mode);"] - #[doc = "..."] - #[doc = "cudaThreadExchangeStreamCaptureMode(&mode); // restore previous mode"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " During stream capture (see ::cudaStreamBeginCapture), some actions, such as a call"] - #[doc = " to ::cudaMalloc, may be unsafe. In the case of ::cudaMalloc, the operation is"] - #[doc = " not enqueued asynchronously to a stream, and is not observed by stream capture."] - #[doc = " Therefore, if the sequence of operations captured via ::cudaStreamBeginCapture"] - #[doc = " depended on the allocation being replayed whenever the graph is launched, the"] - #[doc = " captured graph would be invalid."] - #[doc = ""] - #[doc = " Therefore, stream capture places restrictions on API calls that can be made within"] - #[doc = " or concurrently to a ::cudaStreamBeginCapture-::cudaStreamEndCapture sequence. This"] - #[doc = " behavior can be controlled via this API and flags to ::cudaStreamBeginCapture."] - #[doc = ""] - #[doc = " A thread's mode is one of the following:"] - #[doc = " - \\p cudaStreamCaptureModeGlobal: This is the default mode. If the local thread has"] - #[doc = " an ongoing capture sequence that was not initiated with"] - #[doc = " \\p cudaStreamCaptureModeRelaxed at \\p cuStreamBeginCapture, or if any other thread"] - #[doc = " has a concurrent capture sequence initiated with \\p cudaStreamCaptureModeGlobal,"] - #[doc = " this thread is prohibited from potentially unsafe API calls."] - #[doc = " - \\p cudaStreamCaptureModeThreadLocal: If the local thread has an ongoing capture"] - #[doc = " sequence not initiated with \\p cudaStreamCaptureModeRelaxed, it is prohibited"] - #[doc = " from potentially unsafe API calls. Concurrent capture sequences in other threads"] - #[doc = " are ignored."] - #[doc = " - \\p cudaStreamCaptureModeRelaxed: The local thread is not prohibited from potentially"] - #[doc = " unsafe API calls. Note that the thread is still prohibited from API calls which"] - #[doc = " necessarily conflict with stream capture, for example, attempting ::cudaEventQuery"] - #[doc = " on an event that was last recorded inside a capture sequence."] - #[doc = ""] - #[doc = " \\param mode - Pointer to mode value to swap with the current mode"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaStreamBeginCapture"] pub fn cudaThreadExchangeStreamCaptureMode( mode: *mut cudaStreamCaptureMode, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Ends capture on a stream, returning the captured graph"] - #[doc = ""] - #[doc = " End capture on \\p stream, returning the captured graph via \\p pGraph."] - #[doc = " Capture must have been initiated on \\p stream via a call to ::cudaStreamBeginCapture."] - #[doc = " If capture was invalidated, due to a violation of the rules of stream capture, then"] - #[doc = " a NULL graph will be returned."] - #[doc = ""] - #[doc = " If the \\p mode argument to ::cudaStreamBeginCapture was not"] - #[doc = " ::cudaStreamCaptureModeRelaxed, this call must be from the same thread as"] - #[doc = " ::cudaStreamBeginCapture."] - #[doc = ""] - #[doc = " \\param stream - Stream to query"] - #[doc = " \\param pGraph - The captured graph"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorStreamCaptureWrongThread"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaStreamCreate,"] - #[doc = " ::cudaStreamBeginCapture,"] - #[doc = " ::cudaStreamIsCapturing"] pub fn cudaStreamEndCapture( stream: cudaStream_t, pGraph: *mut cudaGraph_t, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns a stream's capture status"] - #[doc = ""] - #[doc = " Return the capture status of \\p stream via \\p pCaptureStatus. After a successful"] - #[doc = " call, \\p *pCaptureStatus will contain one of the following:"] - #[doc = " - ::cudaStreamCaptureStatusNone: The stream is not capturing."] - #[doc = " - ::cudaStreamCaptureStatusActive: The stream is capturing."] - #[doc = " - ::cudaStreamCaptureStatusInvalidated: The stream was capturing but an error"] - #[doc = " has invalidated the capture sequence. The capture sequence must be terminated"] - #[doc = " with ::cudaStreamEndCapture on the stream where it was initiated in order to"] - #[doc = " continue using \\p stream."] - #[doc = ""] - #[doc = " Note that, if this is called on ::cudaStreamLegacy (the \"null stream\") while"] - #[doc = " a blocking stream on the same device is capturing, it will return"] - #[doc = " ::cudaErrorStreamCaptureImplicit and \\p *pCaptureStatus is unspecified"] - #[doc = " after the call. The blocking stream capture is not invalidated."] - #[doc = ""] - #[doc = " When a blocking stream is capturing, the legacy stream is in an"] - #[doc = " unusable state until the blocking stream capture is terminated. The legacy"] - #[doc = " stream is not supported for stream capture, but attempted use would have an"] - #[doc = " implicit dependency on the capturing stream(s)."] - #[doc = ""] - #[doc = " \\param stream - Stream to query"] - #[doc = " \\param pCaptureStatus - Returns the stream's capture status"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorStreamCaptureImplicit"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaStreamCreate,"] - #[doc = " ::cudaStreamBeginCapture,"] - #[doc = " ::cudaStreamEndCapture"] pub fn cudaStreamIsCapturing( stream: cudaStream_t, pCaptureStatus: *mut cudaStreamCaptureStatus, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Query capture status of a stream"] - #[doc = ""] - #[doc = " Query the capture status of a stream and get a unique id representing"] - #[doc = " the capture sequence over the lifetime of the process."] - #[doc = ""] - #[doc = " If called on ::cudaStreamLegacy (the \"null stream\") while a stream not created"] - #[doc = " with ::cudaStreamNonBlocking is capturing, returns ::cudaErrorStreamCaptureImplicit."] - #[doc = ""] - #[doc = " A valid id is returned only if both of the following are true:"] - #[doc = " - the call returns ::cudaSuccess"] - #[doc = " - captureStatus is set to ::cudaStreamCaptureStatusActive"] - #[doc = ""] - #[doc = " \\param stream - Stream to query"] - #[doc = " \\param pCaptureStatus - Returns the stream's capture status"] - #[doc = " \\param pId - Returns the unique id of the capture sequence"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorStreamCaptureImplicit"] - #[doc = " \\notefnerr"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaStreamBeginCapture,"] - #[doc = " ::cudaStreamIsCapturing"] pub fn cudaStreamGetCaptureInfo( stream: cudaStream_t, pCaptureStatus: *mut cudaStreamCaptureStatus, @@ -15545,233 +4056,30 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Creates an event object"] - #[doc = ""] - #[doc = " Creates an event object for the current device using ::cudaEventDefault."] - #[doc = ""] - #[doc = " \\param event - Newly created event"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorLaunchFailure,"] - #[doc = " ::cudaErrorMemoryAllocation"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa \\ref ::cudaEventCreate(cudaEvent_t*, unsigned int) \"cudaEventCreate (C++ API)\","] - #[doc = " ::cudaEventCreateWithFlags, ::cudaEventRecord, ::cudaEventQuery,"] - #[doc = " ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventElapsedTime,"] - #[doc = " ::cudaStreamWaitEvent,"] - #[doc = " ::cuEventCreate"] pub fn cudaEventCreate(event: *mut cudaEvent_t) -> cudaError_t; } extern "C" { - #[doc = " \\brief Creates an event object with the specified flags"] - #[doc = ""] - #[doc = " Creates an event object for the current device with the specified flags. Valid"] - #[doc = " flags include:"] - #[doc = " - ::cudaEventDefault: Default event creation flag."] - #[doc = " - ::cudaEventBlockingSync: Specifies that event should use blocking"] - #[doc = " synchronization. A host thread that uses ::cudaEventSynchronize() to wait"] - #[doc = " on an event created with this flag will block until the event actually"] - #[doc = " completes."] - #[doc = " - ::cudaEventDisableTiming: Specifies that the created event does not need"] - #[doc = " to record timing data. Events created with this flag specified and"] - #[doc = " the ::cudaEventBlockingSync flag not specified will provide the best"] - #[doc = " performance when used with ::cudaStreamWaitEvent() and ::cudaEventQuery()."] - #[doc = " - ::cudaEventInterprocess: Specifies that the created event may be used as an"] - #[doc = " interprocess event by ::cudaIpcGetEventHandle(). ::cudaEventInterprocess must"] - #[doc = " be specified along with ::cudaEventDisableTiming."] - #[doc = ""] - #[doc = " \\param event - Newly created event"] - #[doc = " \\param flags - Flags for new event"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorLaunchFailure,"] - #[doc = " ::cudaErrorMemoryAllocation"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa \\ref ::cudaEventCreate(cudaEvent_t*) \"cudaEventCreate (C API)\","] - #[doc = " ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventElapsedTime,"] - #[doc = " ::cudaStreamWaitEvent,"] - #[doc = " ::cuEventCreate"] pub fn cudaEventCreateWithFlags( event: *mut cudaEvent_t, flags: ::std::os::raw::c_uint, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Records an event"] - #[doc = ""] - #[doc = " Captures in \\p event the contents of \\p stream at the time of this call."] - #[doc = " \\p event and \\p stream must be on the same device."] - #[doc = " Calls such as ::cudaEventQuery() or ::cudaStreamWaitEvent() will then"] - #[doc = " examine or wait for completion of the work that was captured. Uses of"] - #[doc = " \\p stream after this call do not modify \\p event. See note on default"] - #[doc = " stream behavior for what is captured in the default case."] - #[doc = ""] - #[doc = " ::cudaEventRecord() can be called multiple times on the same event and"] - #[doc = " will overwrite the previously captured state. Other APIs such as"] - #[doc = " ::cudaStreamWaitEvent() use the most recently captured state at the time"] - #[doc = " of the API call, and are not affected by later calls to"] - #[doc = " ::cudaEventRecord(). Before the first call to ::cudaEventRecord(), an"] - #[doc = " event represents an empty set of work, so for example ::cudaEventQuery()"] - #[doc = " would return ::cudaSuccess."] - #[doc = ""] - #[doc = " \\param event - Event to record"] - #[doc = " \\param stream - Stream in which to record event"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidResourceHandle,"] - #[doc = " ::cudaErrorLaunchFailure"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa \\ref ::cudaEventCreate(cudaEvent_t*) \"cudaEventCreate (C API)\","] - #[doc = " ::cudaEventCreateWithFlags, ::cudaEventQuery,"] - #[doc = " ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventElapsedTime,"] - #[doc = " ::cudaStreamWaitEvent,"] - #[doc = " ::cuEventRecord"] pub fn cudaEventRecord( event: cudaEvent_t, stream: cudaStream_t, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Queries an event's status"] - #[doc = ""] - #[doc = " Queries the status of all work currently captured by \\p event. See"] - #[doc = " ::cudaEventRecord() for details on what is captured by an event."] - #[doc = ""] - #[doc = " Returns ::cudaSuccess if all captured work has been completed, or"] - #[doc = " ::cudaErrorNotReady if any captured work is incomplete."] - #[doc = ""] - #[doc = " For the purposes of Unified Memory, a return value of ::cudaSuccess"] - #[doc = " is equivalent to having called ::cudaEventSynchronize()."] - #[doc = ""] - #[doc = " \\param event - Event to query"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorNotReady,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidResourceHandle,"] - #[doc = " ::cudaErrorLaunchFailure"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa \\ref ::cudaEventCreate(cudaEvent_t*) \"cudaEventCreate (C API)\","] - #[doc = " ::cudaEventCreateWithFlags, ::cudaEventRecord,"] - #[doc = " ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventElapsedTime,"] - #[doc = " ::cuEventQuery"] pub fn cudaEventQuery(event: cudaEvent_t) -> cudaError_t; } extern "C" { - #[doc = " \\brief Waits for an event to complete"] - #[doc = ""] - #[doc = " Waits until the completion of all work currently captured in \\p event."] - #[doc = " See ::cudaEventRecord() for details on what is captured by an event."] - #[doc = ""] - #[doc = " Waiting for an event that was created with the ::cudaEventBlockingSync"] - #[doc = " flag will cause the calling CPU thread to block until the event has"] - #[doc = " been completed by the device. If the ::cudaEventBlockingSync flag has"] - #[doc = " not been set, then the CPU thread will busy-wait until the event has"] - #[doc = " been completed by the device."] - #[doc = ""] - #[doc = " \\param event - Event to wait for"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidResourceHandle,"] - #[doc = " ::cudaErrorLaunchFailure"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa \\ref ::cudaEventCreate(cudaEvent_t*) \"cudaEventCreate (C API)\","] - #[doc = " ::cudaEventCreateWithFlags, ::cudaEventRecord,"] - #[doc = " ::cudaEventQuery, ::cudaEventDestroy, ::cudaEventElapsedTime,"] - #[doc = " ::cuEventSynchronize"] pub fn cudaEventSynchronize(event: cudaEvent_t) -> cudaError_t; } extern "C" { - #[doc = " \\brief Destroys an event object"] - #[doc = ""] - #[doc = " Destroys the event specified by \\p event."] - #[doc = ""] - #[doc = " An event may be destroyed before it is complete (i.e., while"] - #[doc = " ::cudaEventQuery() would return ::cudaErrorNotReady). In this case, the"] - #[doc = " call does not block on completion of the event, and any associated"] - #[doc = " resources will automatically be released asynchronously at completion."] - #[doc = ""] - #[doc = " \\param event - Event to destroy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorLaunchFailure"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa \\ref ::cudaEventCreate(cudaEvent_t*) \"cudaEventCreate (C API)\","] - #[doc = " ::cudaEventCreateWithFlags, ::cudaEventQuery,"] - #[doc = " ::cudaEventSynchronize, ::cudaEventRecord, ::cudaEventElapsedTime,"] - #[doc = " ::cuEventDestroy"] pub fn cudaEventDestroy(event: cudaEvent_t) -> cudaError_t; } extern "C" { - #[doc = " \\brief Computes the elapsed time between events"] - #[doc = ""] - #[doc = " Computes the elapsed time between two events (in milliseconds with a"] - #[doc = " resolution of around 0.5 microseconds)."] - #[doc = ""] - #[doc = " If either event was last recorded in a non-NULL stream, the resulting time"] - #[doc = " may be greater than expected (even if both used the same stream handle). This"] - #[doc = " happens because the ::cudaEventRecord() operation takes place asynchronously"] - #[doc = " and there is no guarantee that the measured latency is actually just between"] - #[doc = " the two events. Any number of other different stream operations could execute"] - #[doc = " in between the two measured events, thus altering the timing in a significant"] - #[doc = " way."] - #[doc = ""] - #[doc = " If ::cudaEventRecord() has not been called on either event, then"] - #[doc = " ::cudaErrorInvalidResourceHandle is returned. If ::cudaEventRecord() has been"] - #[doc = " called on both events but one or both of them has not yet been completed"] - #[doc = " (that is, ::cudaEventQuery() would return ::cudaErrorNotReady on at least one"] - #[doc = " of the events), ::cudaErrorNotReady is returned. If either event was created"] - #[doc = " with the ::cudaEventDisableTiming flag, then this function will return"] - #[doc = " ::cudaErrorInvalidResourceHandle."] - #[doc = ""] - #[doc = " \\param ms - Time between \\p start and \\p end in ms"] - #[doc = " \\param start - Starting event"] - #[doc = " \\param end - Ending event"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorNotReady,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidResourceHandle,"] - #[doc = " ::cudaErrorLaunchFailure"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa \\ref ::cudaEventCreate(cudaEvent_t*) \"cudaEventCreate (C API)\","] - #[doc = " ::cudaEventCreateWithFlags, ::cudaEventQuery,"] - #[doc = " ::cudaEventSynchronize, ::cudaEventDestroy, ::cudaEventRecord,"] - #[doc = " ::cuEventElapsedTime"] pub fn cudaEventElapsedTime( ms: *mut f32, start: cudaEvent_t, @@ -15779,183 +4087,12 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Imports an external memory object"] - #[doc = ""] - #[doc = " Imports an externally allocated memory object and returns"] - #[doc = " a handle to that in \\p extMem_out."] - #[doc = ""] - #[doc = " The properties of the handle being imported must be described in"] - #[doc = " \\p memHandleDesc. The ::cudaExternalMemoryHandleDesc structure"] - #[doc = " is defined as follows:"] - #[doc = ""] - #[doc = " \\code"] - #[doc = "typedef struct cudaExternalMemoryHandleDesc_st {"] - #[doc = "cudaExternalMemoryHandleType type;"] - #[doc = "union {"] - #[doc = "int fd;"] - #[doc = "struct {"] - #[doc = "void *handle;"] - #[doc = "const void *name;"] - #[doc = "} win32;"] - #[doc = "} handle;"] - #[doc = "unsigned long long size;"] - #[doc = "unsigned int flags;"] - #[doc = "} cudaExternalMemoryHandleDesc;"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " where ::cudaExternalMemoryHandleDesc::type specifies the type"] - #[doc = " of handle being imported. ::cudaExternalMemoryHandleType is"] - #[doc = " defined as:"] - #[doc = ""] - #[doc = " \\code"] - #[doc = "typedef enum cudaExternalMemoryHandleType_enum {"] - #[doc = "cudaExternalMemoryHandleTypeOpaqueFd = 1,"] - #[doc = "cudaExternalMemoryHandleTypeOpaqueWin32 = 2,"] - #[doc = "cudaExternalMemoryHandleTypeOpaqueWin32Kmt = 3,"] - #[doc = "cudaExternalMemoryHandleTypeD3D12Heap = 4,"] - #[doc = "cudaExternalMemoryHandleTypeD3D12Resource = 5"] - #[doc = "} cudaExternalMemoryHandleType;"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " If ::cudaExternalMemoryHandleDesc::type is"] - #[doc = " ::cudaExternalMemoryHandleTypeOpaqueFd, then"] - #[doc = " ::cudaExternalMemoryHandleDesc::handle::fd must be a valid"] - #[doc = " file descriptor referencing a memory object. Ownership of"] - #[doc = " the file descriptor is transferred to the CUDA driver when the"] - #[doc = " handle is imported successfully. Performing any operations on the"] - #[doc = " file descriptor after it is imported results in undefined behavior."] - #[doc = ""] - #[doc = " If ::cudaExternalMemoryHandleDesc::type is"] - #[doc = " ::cudaExternalMemoryHandleTypeOpaqueWin32, then exactly one"] - #[doc = " of ::cudaExternalMemoryHandleDesc::handle::win32::handle and"] - #[doc = " ::cudaExternalMemoryHandleDesc::handle::win32::name must not be"] - #[doc = " NULL. If ::cudaExternalMemoryHandleDesc::handle::win32::handle"] - #[doc = " is not NULL, then it must represent a valid shared NT handle that"] - #[doc = " references a memory object. Ownership of this handle is"] - #[doc = " not transferred to CUDA after the import operation, so the"] - #[doc = " application must release the handle using the appropriate system"] - #[doc = " call. If ::cudaExternalMemoryHandleDesc::handle::win32::name"] - #[doc = " is not NULL, then it must point to a NULL-terminated array of"] - #[doc = " UTF-16 characters that refers to a memory object."] - #[doc = ""] - #[doc = " If ::cudaExternalMemoryHandleDesc::type is"] - #[doc = " ::cudaExternalMemoryHandleTypeOpaqueWin32Kmt, then"] - #[doc = " ::cudaExternalMemoryHandleDesc::handle::win32::handle must"] - #[doc = " be non-NULL and"] - #[doc = " ::cudaExternalMemoryHandleDesc::handle::win32::name"] - #[doc = " must be NULL. The handle specified must be a globally shared KMT"] - #[doc = " handle. This handle does not hold a reference to the underlying"] - #[doc = " object, and thus will be invalid when all references to the"] - #[doc = " memory object are destroyed."] - #[doc = ""] - #[doc = " If ::cudaExternalMemoryHandleDesc::type is"] - #[doc = " ::cudaExternalMemoryHandleTypeD3D12Heap, then exactly one"] - #[doc = " of ::cudaExternalMemoryHandleDesc::handle::win32::handle and"] - #[doc = " ::cudaExternalMemoryHandleDesc::handle::win32::name must not be"] - #[doc = " NULL. If ::cudaExternalMemoryHandleDesc::handle::win32::handle"] - #[doc = " is not NULL, then it must represent a valid shared NT handle that"] - #[doc = " is returned by ID3DDevice::CreateSharedHandle when referring to a"] - #[doc = " ID3D12Heap object. This handle holds a reference to the underlying"] - #[doc = " object. If ::cudaExternalMemoryHandleDesc::handle::win32::name"] - #[doc = " is not NULL, then it must point to a NULL-terminated array of"] - #[doc = " UTF-16 characters that refers to a ID3D12Heap object."] - #[doc = ""] - #[doc = " If ::cudaExternalMemoryHandleDesc::type is"] - #[doc = " ::cudaExternalMemoryHandleTypeD3D12Resource, then exactly one"] - #[doc = " of ::cudaExternalMemoryHandleDesc::handle::win32::handle and"] - #[doc = " ::cudaExternalMemoryHandleDesc::handle::win32::name must not be"] - #[doc = " NULL. If ::cudaExternalMemoryHandleDesc::handle::win32::handle"] - #[doc = " is not NULL, then it must represent a valid shared NT handle that"] - #[doc = " is returned by ID3DDevice::CreateSharedHandle when referring to a"] - #[doc = " ID3D12Resource object. This handle holds a reference to the"] - #[doc = " underlying object. If"] - #[doc = " ::cudaExternalMemoryHandleDesc::handle::win32::name"] - #[doc = " is not NULL, then it must point to a NULL-terminated array of"] - #[doc = " UTF-16 characters that refers to a ID3D12Resource object."] - #[doc = ""] - #[doc = " The size of the memory object must be specified in"] - #[doc = " ::cudaExternalMemoryHandleDesc::size."] - #[doc = ""] - #[doc = " Specifying the flag ::cudaExternalMemoryDedicated in"] - #[doc = " ::cudaExternalMemoryHandleDesc::flags indicates that the"] - #[doc = " resource is a dedicated resource. The definition of what a"] - #[doc = " dedicated resource is outside the scope of this extension."] - #[doc = ""] - #[doc = " \\param extMem_out - Returned handle to an external memory object"] - #[doc = " \\param memHandleDesc - Memory import handle descriptor"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidResourceHandle"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\note If the Vulkan memory imported into CUDA is mapped on the CPU then the"] - #[doc = " application must use vkInvalidateMappedMemoryRanges/vkFlushMappedMemoryRanges"] - #[doc = " as well as appropriate Vulkan pipeline barriers to maintain coherence between"] - #[doc = " CPU and GPU. For more information on these APIs, please refer to \"Synchronization"] - #[doc = " and Cache Control\" chapter from Vulkan specification."] - #[doc = ""] - #[doc = " \\sa ::cudaDestroyExternalMemory,"] - #[doc = " ::cudaExternalMemoryGetMappedBuffer,"] - #[doc = " ::cudaExternalMemoryGetMappedMipmappedArray"] pub fn cudaImportExternalMemory( extMem_out: *mut cudaExternalMemory_t, memHandleDesc: *const cudaExternalMemoryHandleDesc, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Maps a buffer onto an imported memory object"] - #[doc = ""] - #[doc = " Maps a buffer onto an imported memory object and returns a device"] - #[doc = " pointer in \\p devPtr."] - #[doc = ""] - #[doc = " The properties of the buffer being mapped must be described in"] - #[doc = " \\p bufferDesc. The ::cudaExternalMemoryBufferDesc structure is"] - #[doc = " defined as follows:"] - #[doc = ""] - #[doc = " \\code"] - #[doc = "typedef struct cudaExternalMemoryBufferDesc_st {"] - #[doc = "unsigned long long offset;"] - #[doc = "unsigned long long size;"] - #[doc = "unsigned int flags;"] - #[doc = "} cudaExternalMemoryBufferDesc;"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " where ::cudaExternalMemoryBufferDesc::offset is the offset in"] - #[doc = " the memory object where the buffer's base address is."] - #[doc = " ::cudaExternalMemoryBufferDesc::size is the size of the buffer."] - #[doc = " ::cudaExternalMemoryBufferDesc::flags must be zero."] - #[doc = ""] - #[doc = " The offset and size have to be suitably aligned to match the"] - #[doc = " requirements of the external API. Mapping two buffers whose ranges"] - #[doc = " overlap may or may not result in the same virtual address being"] - #[doc = " returned for the overlapped portion. In such cases, the application"] - #[doc = " must ensure that all accesses to that region from the GPU are"] - #[doc = " volatile. Otherwise writes made via one address are not guaranteed"] - #[doc = " to be visible via the other address, even if they're issued by the"] - #[doc = " same thread. It is recommended that applications map the combined"] - #[doc = " range instead of mapping separate buffers and then apply the"] - #[doc = " appropriate offsets to the returned pointer to derive the"] - #[doc = " individual buffers."] - #[doc = ""] - #[doc = " The returned pointer \\p devPtr must be freed using ::cudaFree."] - #[doc = ""] - #[doc = " \\param devPtr - Returned device pointer to buffer"] - #[doc = " \\param extMem - Handle to external memory object"] - #[doc = " \\param bufferDesc - Buffer descriptor"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidResourceHandle"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaImportExternalMemory"] - #[doc = " ::cudaDestroyExternalMemory,"] - #[doc = " ::cudaExternalMemoryGetMappedMipmappedArray"] pub fn cudaExternalMemoryGetMappedBuffer( devPtr: *mut *mut ::std::os::raw::c_void, extMem: cudaExternalMemory_t, @@ -15963,57 +4100,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Maps a CUDA mipmapped array onto an external memory object"] - #[doc = ""] - #[doc = " Maps a CUDA mipmapped array onto an external object and returns a"] - #[doc = " handle to it in \\p mipmap."] - #[doc = ""] - #[doc = " The properties of the CUDA mipmapped array being mapped must be"] - #[doc = " described in \\p mipmapDesc. The structure"] - #[doc = " ::cudaExternalMemoryMipmappedArrayDesc is defined as follows:"] - #[doc = ""] - #[doc = " \\code"] - #[doc = "typedef struct cudaExternalMemoryMipmappedArrayDesc_st {"] - #[doc = "unsigned long long offset;"] - #[doc = "cudaChannelFormatDesc formatDesc;"] - #[doc = "cudaExtent extent;"] - #[doc = "unsigned int flags;"] - #[doc = "unsigned int numLevels;"] - #[doc = "} cudaExternalMemoryMipmappedArrayDesc;"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " where ::cudaExternalMemoryMipmappedArrayDesc::offset is the"] - #[doc = " offset in the memory object where the base level of the mipmap"] - #[doc = " chain is."] - #[doc = " ::cudaExternalMemoryMipmappedArrayDesc::formatDesc describes the"] - #[doc = " format of the data."] - #[doc = " ::cudaExternalMemoryMipmappedArrayDesc::extent specifies the"] - #[doc = " dimensions of the base level of the mipmap chain."] - #[doc = " ::cudaExternalMemoryMipmappedArrayDesc::flags are flags associated"] - #[doc = " with CUDA mipmapped arrays. For further details, please refer to"] - #[doc = " the documentation for ::cudaMalloc3DArray. Note that if the mipmapped"] - #[doc = " array is bound as a color target in the graphics API, then the flag"] - #[doc = " ::cudaArrayColorAttachment must be specified in"] - #[doc = " ::cudaExternalMemoryMipmappedArrayDesc::flags."] - #[doc = " ::cudaExternalMemoryMipmappedArrayDesc::numLevels specifies"] - #[doc = " the total number of levels in the mipmap chain."] - #[doc = ""] - #[doc = " The returned CUDA mipmapped array must be freed using ::cudaFreeMipmappedArray."] - #[doc = ""] - #[doc = " \\param mipmap - Returned CUDA mipmapped array"] - #[doc = " \\param extMem - Handle to external memory object"] - #[doc = " \\param mipmapDesc - CUDA array descriptor"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidResourceHandle"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaImportExternalMemory"] - #[doc = " ::cudaDestroyExternalMemory,"] - #[doc = " ::cudaExternalMemoryGetMappedBuffer"] pub fn cudaExternalMemoryGetMappedMipmappedArray( mipmap: *mut cudaMipmappedArray_t, extMem: cudaExternalMemory_t, @@ -16021,161 +4107,17 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Destroys an external memory object."] - #[doc = ""] - #[doc = " Destroys the specified external memory object. Any existing buffers"] - #[doc = " and CUDA mipmapped arrays mapped onto this object must no longer be"] - #[doc = " used and must be explicitly freed using ::cudaFree and"] - #[doc = " ::cudaFreeMipmappedArray respectively."] - #[doc = ""] - #[doc = " \\param extMem - External memory object to be destroyed"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidResourceHandle"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaImportExternalMemory"] - #[doc = " ::cudaExternalMemoryGetMappedBuffer,"] - #[doc = " ::cudaExternalMemoryGetMappedMipmappedArray"] pub fn cudaDestroyExternalMemory( extMem: cudaExternalMemory_t, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Imports an external semaphore"] - #[doc = ""] - #[doc = " Imports an externally allocated synchronization object and returns"] - #[doc = " a handle to that in \\p extSem_out."] - #[doc = ""] - #[doc = " The properties of the handle being imported must be described in"] - #[doc = " \\p semHandleDesc. The ::cudaExternalSemaphoreHandleDesc is defined"] - #[doc = " as follows:"] - #[doc = ""] - #[doc = " \\code"] - #[doc = "typedef struct cudaExternalSemaphoreHandleDesc_st {"] - #[doc = "cudaExternalSemaphoreHandleType type;"] - #[doc = "union {"] - #[doc = "int fd;"] - #[doc = "struct {"] - #[doc = "void *handle;"] - #[doc = "const void *name;"] - #[doc = "} win32;"] - #[doc = "} handle;"] - #[doc = "unsigned int flags;"] - #[doc = "} cudaExternalSemaphoreHandleDesc;"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " where ::cudaExternalSemaphoreHandleDesc::type specifies the type of"] - #[doc = " handle being imported. ::cudaExternalSemaphoreHandleType is defined"] - #[doc = " as:"] - #[doc = ""] - #[doc = " \\code"] - #[doc = "typedef enum cudaExternalSemaphoreHandleType_enum {"] - #[doc = "cudaExternalSemaphoreHandleTypeOpaqueFd = 1,"] - #[doc = "cudaExternalSemaphoreHandleTypeOpaqueWin32 = 2,"] - #[doc = "cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt = 3,"] - #[doc = "cudaExternalSemaphoreHandleTypeD3D12Fence = 4"] - #[doc = "} cudaExternalSemaphoreHandleType;"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " If ::cudaExternalSemaphoreHandleDesc::type is"] - #[doc = " ::cudaExternalSemaphoreHandleTypeOpaqueFd, then"] - #[doc = " ::cudaExternalSemaphoreHandleDesc::handle::fd must be a valid file"] - #[doc = " descriptor referencing a synchronization object. Ownership of the"] - #[doc = " file descriptor is transferred to the CUDA driver when the handle"] - #[doc = " is imported successfully. Performing any operations on the file"] - #[doc = " descriptor after it is imported results in undefined behavior."] - #[doc = ""] - #[doc = " If ::cudaExternalSemaphoreHandleDesc::type is"] - #[doc = " ::cudaExternalSemaphoreHandleTypeOpaqueWin32, then exactly one of"] - #[doc = " ::cudaExternalSemaphoreHandleDesc::handle::win32::handle and"] - #[doc = " ::cudaExternalSemaphoreHandleDesc::handle::win32::name must not be"] - #[doc = " NULL. If ::cudaExternalSemaphoreHandleDesc::handle::win32::handle"] - #[doc = " is not NULL, then it must represent a valid shared NT handle that"] - #[doc = " references a synchronization object. Ownership of this handle is"] - #[doc = " not transferred to CUDA after the import operation, so the"] - #[doc = " application must release the handle using the appropriate system"] - #[doc = " call. If ::cudaExternalSemaphoreHandleDesc::handle::win32::name is"] - #[doc = " not NULL, then it must name a valid synchronization object."] - #[doc = ""] - #[doc = " If ::cudaExternalSemaphoreHandleDesc::type is"] - #[doc = " ::cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt, then"] - #[doc = " ::cudaExternalSemaphoreHandleDesc::handle::win32::handle must be"] - #[doc = " non-NULL and ::cudaExternalSemaphoreHandleDesc::handle::win32::name"] - #[doc = " must be NULL. The handle specified must be a globally shared KMT"] - #[doc = " handle. This handle does not hold a reference to the underlying"] - #[doc = " object, and thus will be invalid when all references to the"] - #[doc = " synchronization object are destroyed."] - #[doc = ""] - #[doc = " If ::cudaExternalSemaphoreHandleDesc::type is"] - #[doc = " ::cudaExternalSemaphoreHandleTypeD3D12Fence, then exactly one of"] - #[doc = " ::cudaExternalSemaphoreHandleDesc::handle::win32::handle and"] - #[doc = " ::cudaExternalSemaphoreHandleDesc::handle::win32::name must not be"] - #[doc = " NULL. If ::cudaExternalSemaphoreHandleDesc::handle::win32::handle"] - #[doc = " is not NULL, then it must represent a valid shared NT handle that"] - #[doc = " is returned by ID3DDevice::CreateSharedHandle when referring to a"] - #[doc = " ID3D12Fence object. This handle holds a reference to the underlying"] - #[doc = " object. If ::cudaExternalSemaphoreHandleDesc::handle::win32::name"] - #[doc = " is not NULL, then it must name a valid synchronization object that"] - #[doc = " refers to a valid ID3D12Fence object."] - #[doc = ""] - #[doc = " \\param extSem_out - Returned handle to an external semaphore"] - #[doc = " \\param semHandleDesc - Semaphore import handle descriptor"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidResourceHandle"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaDestroyExternalSemaphore,"] - #[doc = " ::cudaSignalExternalSemaphoresAsync,"] - #[doc = " ::cudaWaitExternalSemaphoresAsync"] pub fn cudaImportExternalSemaphore( extSem_out: *mut cudaExternalSemaphore_t, semHandleDesc: *const cudaExternalSemaphoreHandleDesc, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Signals a set of external semaphore objects"] - #[doc = ""] - #[doc = " Enqueues a signal operation on a set of externally allocated"] - #[doc = " semaphore object in the specified stream. The operations will be"] - #[doc = " executed when all prior operations in the stream complete."] - #[doc = ""] - #[doc = " The exact semantics of signaling a semaphore depends on the type of"] - #[doc = " the object."] - #[doc = ""] - #[doc = " If the semaphore object is any one of the following types:"] - #[doc = " ::cudaExternalSemaphoreHandleTypeOpaqueFd,"] - #[doc = " ::cudaExternalSemaphoreHandleTypeOpaqueWin32,"] - #[doc = " ::cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt"] - #[doc = " then signaling the semaphore will set it to the signaled state."] - #[doc = ""] - #[doc = " If the semaphore object is of the type"] - #[doc = " ::cudaExternalSemaphoreHandleTypeD3D12Fence, then the"] - #[doc = " semaphore will be set to the value specified in"] - #[doc = " ::cudaExternalSemaphoreSignalParams::params::fence::value."] - #[doc = ""] - #[doc = " \\param extSemArray - Set of external semaphores to be signaled"] - #[doc = " \\param paramsArray - Array of semaphore parameters"] - #[doc = " \\param numExtSems - Number of semaphores to signal"] - #[doc = " \\param stream - Stream to enqueue the signal operations in"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidResourceHandle"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaImportExternalSemaphore,"] - #[doc = " ::cudaDestroyExternalSemaphore,"] - #[doc = " ::cudaWaitExternalSemaphoresAsync"] pub fn cudaSignalExternalSemaphoresAsync( extSemArray: *const cudaExternalSemaphore_t, paramsArray: *const cudaExternalSemaphoreSignalParams, @@ -16184,45 +4126,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Waits on a set of external semaphore objects"] - #[doc = ""] - #[doc = " Enqueues a wait operation on a set of externally allocated"] - #[doc = " semaphore object in the specified stream. The operations will be"] - #[doc = " executed when all prior operations in the stream complete."] - #[doc = ""] - #[doc = " The exact semantics of waiting on a semaphore depends on the type"] - #[doc = " of the object."] - #[doc = ""] - #[doc = " If the semaphore object is any one of the following types:"] - #[doc = " ::cudaExternalSemaphoreHandleTypeOpaqueFd,"] - #[doc = " ::cudaExternalSemaphoreHandleTypeOpaqueWin32,"] - #[doc = " ::cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt"] - #[doc = " then waiting on the semaphore will wait until the semaphore reaches"] - #[doc = " the signaled state. The semaphore will then be reset to the"] - #[doc = " unsignaled state. Therefore for every signal operation, there can"] - #[doc = " only be one wait operation."] - #[doc = ""] - #[doc = " If the semaphore object is of the type"] - #[doc = " ::cudaExternalSemaphoreHandleTypeD3D12Fence, then waiting on"] - #[doc = " the semaphore will wait until the value of the semaphore is"] - #[doc = " greater than or equal to"] - #[doc = " ::cudaExternalSemaphoreWaitParams::params::fence::value."] - #[doc = ""] - #[doc = " \\param extSemArray - External semaphores to be waited on"] - #[doc = " \\param paramsArray - Array of semaphore parameters"] - #[doc = " \\param numExtSems - Number of semaphores to wait on"] - #[doc = " \\param stream - Stream to enqueue the wait operations in"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidResourceHandle"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaImportExternalSemaphore,"] - #[doc = " ::cudaDestroyExternalSemaphore,"] - #[doc = " ::cudaSignalExternalSemaphoresAsync"] pub fn cudaWaitExternalSemaphoresAsync( extSemArray: *const cudaExternalSemaphore_t, paramsArray: *const cudaExternalSemaphoreWaitParams, @@ -16231,73 +4134,11 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Destroys an external semaphore"] - #[doc = ""] - #[doc = " Destroys an external semaphore object and releases any references"] - #[doc = " to the underlying resource. Any outstanding signals or waits must"] - #[doc = " have completed before the semaphore is destroyed."] - #[doc = ""] - #[doc = " \\param extSem - External semaphore to be destroyed"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidResourceHandle"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaImportExternalSemaphore,"] - #[doc = " ::cudaSignalExternalSemaphoresAsync,"] - #[doc = " ::cudaWaitExternalSemaphoresAsync"] pub fn cudaDestroyExternalSemaphore( extSem: cudaExternalSemaphore_t, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Launches a device function"] - #[doc = ""] - #[doc = " The function invokes kernel \\p func on \\p gridDim (\\p gridDim.x \u{d7} \\p gridDim.y"] - #[doc = " \u{d7} \\p gridDim.z) grid of blocks. Each block contains \\p blockDim (\\p blockDim.x \u{d7}"] - #[doc = " \\p blockDim.y \u{d7} \\p blockDim.z) threads."] - #[doc = ""] - #[doc = " If the kernel has N parameters the \\p args should point to array of N pointers."] - #[doc = " Each pointer, from args[0] to args[N - 1], point to the region"] - #[doc = " of memory from which the actual parameter will be copied."] - #[doc = ""] - #[doc = " For templated functions, pass the function symbol as follows:"] - #[doc = " func_name"] - #[doc = ""] - #[doc = " \\p sharedMem sets the amount of dynamic shared memory that will be available to"] - #[doc = " each thread block."] - #[doc = ""] - #[doc = " \\p stream specifies a stream the invocation is associated to."] - #[doc = ""] - #[doc = " \\param func - Device function symbol"] - #[doc = " \\param gridDim - Grid dimentions"] - #[doc = " \\param blockDim - Block dimentions"] - #[doc = " \\param args - Arguments"] - #[doc = " \\param sharedMem - Shared memory"] - #[doc = " \\param stream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidDeviceFunction,"] - #[doc = " ::cudaErrorInvalidConfiguration,"] - #[doc = " ::cudaErrorLaunchFailure,"] - #[doc = " ::cudaErrorLaunchTimeout,"] - #[doc = " ::cudaErrorLaunchOutOfResources,"] - #[doc = " ::cudaErrorSharedObjectInitFailed,"] - #[doc = " ::cudaErrorInvalidPtx,"] - #[doc = " ::cudaErrorNoKernelImageForDevice,"] - #[doc = " ::cudaErrorJitCompilerNotFound"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " \\ref ::cudaLaunchKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) \"cudaLaunchKernel (C++ API)\","] - #[doc = " ::cuLaunchKernel"] pub fn cudaLaunchKernel( func: *const ::std::os::raw::c_void, gridDim: dim3, @@ -16308,59 +4149,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Launches a device function where thread blocks can cooperate and synchronize as they execute"] - #[doc = ""] - #[doc = " The function invokes kernel \\p func on \\p gridDim (\\p gridDim.x \u{d7} \\p gridDim.y"] - #[doc = " \u{d7} \\p gridDim.z) grid of blocks. Each block contains \\p blockDim (\\p blockDim.x \u{d7}"] - #[doc = " \\p blockDim.y \u{d7} \\p blockDim.z) threads."] - #[doc = ""] - #[doc = " The device on which this kernel is invoked must have a non-zero value for"] - #[doc = " the device attribute ::cudaDevAttrCooperativeLaunch."] - #[doc = ""] - #[doc = " The total number of blocks launched cannot exceed the maximum number of blocks per"] - #[doc = " multiprocessor as returned by ::cudaOccupancyMaxActiveBlocksPerMultiprocessor (or"] - #[doc = " ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors"] - #[doc = " as specified by the device attribute ::cudaDevAttrMultiProcessorCount."] - #[doc = ""] - #[doc = " The kernel cannot make use of CUDA dynamic parallelism."] - #[doc = ""] - #[doc = " If the kernel has N parameters the \\p args should point to array of N pointers."] - #[doc = " Each pointer, from args[0] to args[N - 1], point to the region"] - #[doc = " of memory from which the actual parameter will be copied."] - #[doc = ""] - #[doc = " For templated functions, pass the function symbol as follows:"] - #[doc = " func_name"] - #[doc = ""] - #[doc = " \\p sharedMem sets the amount of dynamic shared memory that will be available to"] - #[doc = " each thread block."] - #[doc = ""] - #[doc = " \\p stream specifies a stream the invocation is associated to."] - #[doc = ""] - #[doc = " \\param func - Device function symbol"] - #[doc = " \\param gridDim - Grid dimentions"] - #[doc = " \\param blockDim - Block dimentions"] - #[doc = " \\param args - Arguments"] - #[doc = " \\param sharedMem - Shared memory"] - #[doc = " \\param stream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidDeviceFunction,"] - #[doc = " ::cudaErrorInvalidConfiguration,"] - #[doc = " ::cudaErrorLaunchFailure,"] - #[doc = " ::cudaErrorLaunchTimeout,"] - #[doc = " ::cudaErrorLaunchOutOfResources,"] - #[doc = " ::cudaErrorCooperativeLaunchTooLarge,"] - #[doc = " ::cudaErrorSharedObjectInitFailed"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " \\ref ::cudaLaunchCooperativeKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) \"cudaLaunchCooperativeKernel (C++ API)\","] - #[doc = " ::cudaLaunchCooperativeKernelMultiDevice,"] - #[doc = " ::cuLaunchCooperativeKernel"] pub fn cudaLaunchCooperativeKernel( func: *const ::std::os::raw::c_void, gridDim: dim3, @@ -16371,101 +4159,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Launches device functions on multiple devices where thread blocks can cooperate and synchronize as they execute"] - #[doc = ""] - #[doc = " Invokes kernels as specified in the \\p launchParamsList array where each element"] - #[doc = " of the array specifies all the parameters required to perform a single kernel launch."] - #[doc = " These kernels can cooperate and synchronize as they execute. The size of the array is"] - #[doc = " specified by \\p numDevices."] - #[doc = ""] - #[doc = " No two kernels can be launched on the same device. All the devices targeted by this"] - #[doc = " multi-device launch must be identical. All devices must have a non-zero value for the"] - #[doc = " device attribute ::cudaDevAttrCooperativeMultiDeviceLaunch."] - #[doc = ""] - #[doc = " The same kernel must be launched on all devices. Note that any __device__ or __constant__"] - #[doc = " variables are independently instantiated on every device. It is the application's"] - #[doc = " responsiblity to ensure these variables are initialized and used appropriately."] - #[doc = ""] - #[doc = " The size of the grids as specified in blocks, the size of the blocks themselves and the"] - #[doc = " amount of shared memory used by each thread block must also match across all launched kernels."] - #[doc = ""] - #[doc = " The streams used to launch these kernels must have been created via either ::cudaStreamCreate"] - #[doc = " or ::cudaStreamCreateWithPriority or ::cudaStreamCreateWithPriority. The NULL stream or"] - #[doc = " ::cudaStreamLegacy or ::cudaStreamPerThread cannot be used."] - #[doc = ""] - #[doc = " The total number of blocks launched per kernel cannot exceed the maximum number of blocks"] - #[doc = " per multiprocessor as returned by ::cudaOccupancyMaxActiveBlocksPerMultiprocessor (or"] - #[doc = " ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors"] - #[doc = " as specified by the device attribute ::cudaDevAttrMultiProcessorCount. Since the"] - #[doc = " total number of blocks launched per device has to match across all devices, the maximum"] - #[doc = " number of blocks that can be launched per device will be limited by the device with the"] - #[doc = " least number of multiprocessors."] - #[doc = ""] - #[doc = " The kernel cannot make use of CUDA dynamic parallelism."] - #[doc = ""] - #[doc = " The ::cudaLaunchParams structure is defined as:"] - #[doc = " \\code"] - #[doc = "struct cudaLaunchParams"] - #[doc = "{"] - #[doc = "void *func;"] - #[doc = "dim3 gridDim;"] - #[doc = "dim3 blockDim;"] - #[doc = "void **args;"] - #[doc = "size_t sharedMem;"] - #[doc = "cudaStream_t stream;"] - #[doc = "};"] - #[doc = " \\endcode"] - #[doc = " where:"] - #[doc = " - ::cudaLaunchParams::func specifies the kernel to be launched. This same functions must"] - #[doc = " be launched on all devices. For templated functions, pass the function symbol as follows:"] - #[doc = " func_name"] - #[doc = " - ::cudaLaunchParams::gridDim specifies the width, height and depth of the grid in blocks."] - #[doc = " This must match across all kernels launched."] - #[doc = " - ::cudaLaunchParams::blockDim is the width, height and depth of each thread block. This"] - #[doc = " must match across all kernels launched."] - #[doc = " - ::cudaLaunchParams::args specifies the arguments to the kernel. If the kernel has"] - #[doc = " N parameters then ::cudaLaunchParams::args should point to array of N pointers. Each"] - #[doc = " pointer, from ::cudaLaunchParams::args[0] to ::cudaLaunchParams::args[N - 1],"] - #[doc = " point to the region of memory from which the actual parameter will be copied."] - #[doc = " - ::cudaLaunchParams::sharedMem is the dynamic shared-memory size per thread block in bytes."] - #[doc = " This must match across all kernels launched."] - #[doc = " - ::cudaLaunchParams::stream is the handle to the stream to perform the launch in. This cannot"] - #[doc = " be the NULL stream or ::cudaStreamLegacy or ::cudaStreamPerThread."] - #[doc = ""] - #[doc = " By default, the kernel won't begin execution on any GPU until all prior work in all the specified"] - #[doc = " streams has completed. This behavior can be overridden by specifying the flag"] - #[doc = " ::cudaCooperativeLaunchMultiDeviceNoPreSync. When this flag is specified, each kernel"] - #[doc = " will only wait for prior work in the stream corresponding to that GPU to complete before it begins"] - #[doc = " execution."] - #[doc = ""] - #[doc = " Similarly, by default, any subsequent work pushed in any of the specified streams will not begin"] - #[doc = " execution until the kernels on all GPUs have completed. This behavior can be overridden by specifying"] - #[doc = " the flag ::cudaCooperativeLaunchMultiDeviceNoPostSync. When this flag is specified,"] - #[doc = " any subsequent work pushed in any of the specified streams will only wait for the kernel launched"] - #[doc = " on the GPU corresponding to that stream to complete before it begins execution."] - #[doc = ""] - #[doc = " \\param launchParamsList - List of launch parameters, one per device"] - #[doc = " \\param numDevices - Size of the \\p launchParamsList array"] - #[doc = " \\param flags - Flags to control launch behavior"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidDeviceFunction,"] - #[doc = " ::cudaErrorInvalidConfiguration,"] - #[doc = " ::cudaErrorLaunchFailure,"] - #[doc = " ::cudaErrorLaunchTimeout,"] - #[doc = " ::cudaErrorLaunchOutOfResources,"] - #[doc = " ::cudaErrorCooperativeLaunchTooLarge,"] - #[doc = " ::cudaErrorSharedObjectInitFailed"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " \\ref ::cudaLaunchCooperativeKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) \"cudaLaunchCooperativeKernel (C++ API)\","] - #[doc = " ::cudaLaunchCooperativeKernel,"] - #[doc = " ::cuLaunchCooperativeKernelMultiDevice"] pub fn cudaLaunchCooperativeKernelMultiDevice( launchParamsList: *mut cudaLaunchParams, numDevices: ::std::os::raw::c_uint, @@ -16473,185 +4166,24 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Sets the preferred cache configuration for a device function"] - #[doc = ""] - #[doc = " On devices where the L1 cache and shared memory use the same hardware"] - #[doc = " resources, this sets through \\p cacheConfig the preferred cache configuration"] - #[doc = " for the function specified via \\p func. This is only a preference. The"] - #[doc = " runtime will use the requested configuration if possible, but it is free to"] - #[doc = " choose a different configuration if required to execute \\p func."] - #[doc = ""] - #[doc = " \\p func is a device function symbol and must be declared as a"] - #[doc = " \\c __global__ function. If the specified function does not exist,"] - #[doc = " then ::cudaErrorInvalidDeviceFunction is returned. For templated functions,"] - #[doc = " pass the function symbol as follows: func_name"] - #[doc = ""] - #[doc = " This setting does nothing on devices where the size of the L1 cache and"] - #[doc = " shared memory are fixed."] - #[doc = ""] - #[doc = " Launching a kernel with a different preference than the most recent"] - #[doc = " preference setting may insert a device-side synchronization point."] - #[doc = ""] - #[doc = " The supported cache configurations are:"] - #[doc = " - ::cudaFuncCachePreferNone: no preference for shared memory or L1 (default)"] - #[doc = " - ::cudaFuncCachePreferShared: prefer larger shared memory and smaller L1 cache"] - #[doc = " - ::cudaFuncCachePreferL1: prefer larger L1 cache and smaller shared memory"] - #[doc = " - ::cudaFuncCachePreferEqual: prefer equal size L1 cache and shared memory"] - #[doc = ""] - #[doc = " \\param func - Device function symbol"] - #[doc = " \\param cacheConfig - Requested cache configuration"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidDeviceFunction"] - #[doc = " \\notefnerr"] - #[doc = " \\note_string_api_deprecation2"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " \\ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) \"cudaFuncSetCacheConfig (C++ API)\","] - #[doc = " \\ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) \"cudaFuncGetAttributes (C API)\","] - #[doc = " \\ref ::cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) \"cudaLaunchKernel (C API)\","] - #[doc = " ::cudaSetDoubleForDevice,"] - #[doc = " ::cudaSetDoubleForHost,"] - #[doc = " ::cudaThreadGetCacheConfig,"] - #[doc = " ::cudaThreadSetCacheConfig,"] - #[doc = " ::cuFuncSetCacheConfig"] pub fn cudaFuncSetCacheConfig( func: *const ::std::os::raw::c_void, cacheConfig: cudaFuncCache, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Sets the shared memory configuration for a device function"] - #[doc = ""] - #[doc = " On devices with configurable shared memory banks, this function will"] - #[doc = " force all subsequent launches of the specified device function to have"] - #[doc = " the given shared memory bank size configuration. On any given launch of the"] - #[doc = " function, the shared memory configuration of the device will be temporarily"] - #[doc = " changed if needed to suit the function's preferred configuration. Changes in"] - #[doc = " shared memory configuration between subsequent launches of functions,"] - #[doc = " may introduce a device side synchronization point."] - #[doc = ""] - #[doc = " Any per-function setting of shared memory bank size set via"] - #[doc = " ::cudaFuncSetSharedMemConfig will override the device wide setting set by"] - #[doc = " ::cudaDeviceSetSharedMemConfig."] - #[doc = ""] - #[doc = " Changing the shared memory bank size will not increase shared memory usage"] - #[doc = " or affect occupancy of kernels, but may have major effects on performance."] - #[doc = " Larger bank sizes will allow for greater potential bandwidth to shared memory,"] - #[doc = " but will change what kinds of accesses to shared memory will result in bank"] - #[doc = " conflicts."] - #[doc = ""] - #[doc = " This function will do nothing on devices with fixed shared memory bank size."] - #[doc = ""] - #[doc = " For templated functions, pass the function symbol as follows:"] - #[doc = " func_name"] - #[doc = ""] - #[doc = " The supported bank configurations are:"] - #[doc = " - ::cudaSharedMemBankSizeDefault: use the device's shared memory configuration"] - #[doc = " when launching this function."] - #[doc = " - ::cudaSharedMemBankSizeFourByte: set shared memory bank width to be"] - #[doc = " four bytes natively when launching this function."] - #[doc = " - ::cudaSharedMemBankSizeEightByte: set shared memory bank width to be eight"] - #[doc = " bytes natively when launching this function."] - #[doc = ""] - #[doc = " \\param func - Device function symbol"] - #[doc = " \\param config - Requested shared memory configuration"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidDeviceFunction,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " \\notefnerr"] - #[doc = " \\note_string_api_deprecation2"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaDeviceSetSharedMemConfig,"] - #[doc = " ::cudaDeviceGetSharedMemConfig,"] - #[doc = " ::cudaDeviceSetCacheConfig,"] - #[doc = " ::cudaDeviceGetCacheConfig,"] - #[doc = " ::cudaFuncSetCacheConfig,"] - #[doc = " ::cuFuncSetSharedMemConfig"] pub fn cudaFuncSetSharedMemConfig( func: *const ::std::os::raw::c_void, config: cudaSharedMemConfig, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Find out attributes for a given function"] - #[doc = ""] - #[doc = " This function obtains the attributes of a function specified via \\p func."] - #[doc = " \\p func is a device function symbol and must be declared as a"] - #[doc = " \\c __global__ function. The fetched attributes are placed in \\p attr."] - #[doc = " If the specified function does not exist, then"] - #[doc = " ::cudaErrorInvalidDeviceFunction is returned. For templated functions, pass"] - #[doc = " the function symbol as follows: func_name"] - #[doc = ""] - #[doc = " Note that some function attributes such as"] - #[doc = " \\ref ::cudaFuncAttributes::maxThreadsPerBlock \"maxThreadsPerBlock\""] - #[doc = " may vary based on the device that is currently being used."] - #[doc = ""] - #[doc = " \\param attr - Return pointer to function's attributes"] - #[doc = " \\param func - Device function symbol"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidDeviceFunction"] - #[doc = " \\notefnerr"] - #[doc = " \\note_string_api_deprecation2"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " \\ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) \"cudaFuncSetCacheConfig (C API)\","] - #[doc = " \\ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, T*) \"cudaFuncGetAttributes (C++ API)\","] - #[doc = " \\ref ::cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) \"cudaLaunchKernel (C API)\","] - #[doc = " ::cudaSetDoubleForDevice,"] - #[doc = " ::cudaSetDoubleForHost,"] - #[doc = " ::cuFuncGetAttribute"] pub fn cudaFuncGetAttributes( attr: *mut cudaFuncAttributes, func: *const ::std::os::raw::c_void, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Set attributes for a given function"] - #[doc = ""] - #[doc = " This function sets the attributes of a function specified via \\p func."] - #[doc = " The parameter \\p func must be a pointer to a function that executes"] - #[doc = " on the device. The parameter specified by \\p func must be declared as a \\p __global__"] - #[doc = " function. The enumeration defined by \\p attr is set to the value defined by \\p value."] - #[doc = " If the specified function does not exist, then ::cudaErrorInvalidDeviceFunction is returned."] - #[doc = " If the specified attribute cannot be written, or if the value is incorrect,"] - #[doc = " then ::cudaErrorInvalidValue is returned."] - #[doc = ""] - #[doc = " Valid values for \\p attr are:"] - #[doc = " - ::cudaFuncAttributeMaxDynamicSharedMemorySize - The requested maximum size in bytes of dynamically-allocated shared memory. The sum of this value and the function attribute ::sharedSizeBytes"] - #[doc = " cannot exceed the device attribute ::cudaDevAttrMaxSharedMemoryPerBlockOptin. The maximal size of requestable dynamic shared memory may differ by GPU architecture."] - #[doc = " - ::cudaFuncAttributePreferredSharedMemoryCarveout - On devices where the L1 cache and shared memory use the same hardware resources,"] - #[doc = " this sets the shared memory carveout preference, in percent of the total shared memory. See ::cudaDevAttrMaxSharedMemoryPerMultiprocessor."] - #[doc = " This is only a hint, and the driver can choose a different ratio if required to execute the function."] - #[doc = ""] - #[doc = " \\param func - Function to get attributes of"] - #[doc = " \\param attr - Attribute to set"] - #[doc = " \\param value - Value to set"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidDeviceFunction,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\ref ::cudaLaunchKernel(const T *func, dim3 gridDim, dim3 blockDim, void **args, size_t sharedMem, cudaStream_t stream) \"cudaLaunchKernel (C++ API)\","] - #[doc = " \\ref ::cudaFuncSetCacheConfig(T*, enum cudaFuncCache) \"cudaFuncSetCacheConfig (C++ API)\","] - #[doc = " \\ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) \"cudaFuncGetAttributes (C API)\","] - #[doc = " ::cudaSetDoubleForDevice,"] - #[doc = " ::cudaSetDoubleForHost"] pub fn cudaFuncSetAttribute( func: *const ::std::os::raw::c_void, attr: cudaFuncAttribute, @@ -16659,114 +4191,12 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Converts a double argument to be executed on a device"] - #[doc = ""] - #[doc = " \\param d - Double to convert"] - #[doc = ""] - #[doc = " \\deprecated This function is deprecated as of CUDA 7.5"] - #[doc = ""] - #[doc = " Converts the double value of \\p d to an internal float representation if"] - #[doc = " the device does not support double arithmetic. If the device does natively"] - #[doc = " support doubles, then this function does nothing."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " \\ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) \"cudaFuncSetCacheConfig (C API)\","] - #[doc = " \\ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) \"cudaFuncGetAttributes (C API)\","] - #[doc = " ::cudaSetDoubleForHost"] pub fn cudaSetDoubleForDevice(d: *mut f64) -> cudaError_t; } extern "C" { - #[doc = " \\brief Converts a double argument after execution on a device"] - #[doc = ""] - #[doc = " \\deprecated This function is deprecated as of CUDA 7.5"] - #[doc = ""] - #[doc = " Converts the double value of \\p d from a potentially internal float"] - #[doc = " representation if the device does not support double arithmetic. If the"] - #[doc = " device does natively support doubles, then this function does nothing."] - #[doc = ""] - #[doc = " \\param d - Double to convert"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " \\ref ::cudaFuncSetCacheConfig(const void*, enum cudaFuncCache) \"cudaFuncSetCacheConfig (C API)\","] - #[doc = " \\ref ::cudaFuncGetAttributes(struct cudaFuncAttributes*, const void*) \"cudaFuncGetAttributes (C API)\","] - #[doc = " ::cudaSetDoubleForDevice"] pub fn cudaSetDoubleForHost(d: *mut f64) -> cudaError_t; } extern "C" { - #[doc = " \\brief Enqueues a host function call in a stream"] - #[doc = ""] - #[doc = " Enqueues a host function to run in a stream. The function will be called"] - #[doc = " after currently enqueued work and will block work added after it."] - #[doc = ""] - #[doc = " The host function must not make any CUDA API calls. Attempting to use a"] - #[doc = " CUDA API may result in ::cudaErrorNotPermitted, but this is not required."] - #[doc = " The host function must not perform any synchronization that may depend on"] - #[doc = " outstanding CUDA work not mandated to run earlier. Host functions without a"] - #[doc = " mandated order (such as in independent streams) execute in undefined order"] - #[doc = " and may be serialized."] - #[doc = ""] - #[doc = " For the purposes of Unified Memory, execution makes a number of guarantees:"] - #[doc = "
    "] - #[doc = "
  • The stream is considered idle for the duration of the function's"] - #[doc = " execution. Thus, for example, the function may always use memory attached"] - #[doc = " to the stream it was enqueued in.
  • "] - #[doc = "
  • The start of execution of the function has the same effect as"] - #[doc = " synchronizing an event recorded in the same stream immediately prior to"] - #[doc = " the function. It thus synchronizes streams which have been \"joined\""] - #[doc = " prior to the function.
  • "] - #[doc = "
  • Adding device work to any stream does not have the effect of making"] - #[doc = " the stream active until all preceding host functions and stream callbacks"] - #[doc = " have executed. Thus, for"] - #[doc = " example, a function might use global attached memory even if work has"] - #[doc = " been added to another stream, if the work has been ordered behind the"] - #[doc = " function call with an event.
  • "] - #[doc = "
  • Completion of the function does not cause a stream to become"] - #[doc = " active except as described above. The stream will remain idle"] - #[doc = " if no device work follows the function, and will remain idle across"] - #[doc = " consecutive host functions or stream callbacks without device work in"] - #[doc = " between. Thus, for example,"] - #[doc = " stream synchronization can be done by signaling from a host function at the"] - #[doc = " end of the stream.
  • "] - #[doc = "
"] - #[doc = ""] - #[doc = " Note that, in constrast to ::cuStreamAddCallback, the function will not be"] - #[doc = " called in the event of an error in the CUDA context."] - #[doc = ""] - #[doc = " \\param hStream - Stream to enqueue function call in"] - #[doc = " \\param fn - The function to call once preceding stream operations are complete"] - #[doc = " \\param userData - User-specified data to be passed to the function"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidResourceHandle,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorNotSupported"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaStreamCreate,"] - #[doc = " ::cudaStreamQuery,"] - #[doc = " ::cudaStreamSynchronize,"] - #[doc = " ::cudaStreamWaitEvent,"] - #[doc = " ::cudaStreamDestroy,"] - #[doc = " ::cudaMallocManaged,"] - #[doc = " ::cudaStreamAttachMemAsync,"] - #[doc = " ::cudaStreamAddCallback,"] - #[doc = " ::cuLaunchHostFunc"] pub fn cudaLaunchHostFunc( stream: cudaStream_t, fn_: cudaHostFn_t, @@ -16774,32 +4204,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns occupancy for a device function"] - #[doc = ""] - #[doc = " Returns in \\p *numBlocks the maximum number of active blocks per"] - #[doc = " streaming multiprocessor for the device function."] - #[doc = ""] - #[doc = " \\param numBlocks - Returned occupancy"] - #[doc = " \\param func - Kernel function for which occupancy is calculated"] - #[doc = " \\param blockSize - Block size the kernel is intended to be launched with"] - #[doc = " \\param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidDevice,"] - #[doc = " ::cudaErrorInvalidDeviceFunction,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorUnknown,"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags,"] - #[doc = " \\ref ::cudaOccupancyMaxPotentialBlockSize(int*, int*, T, size_t, int) \"cudaOccupancyMaxPotentialBlockSize (C++ API)\","] - #[doc = " \\ref ::cudaOccupancyMaxPotentialBlockSizeWithFlags(int*, int*, T, size_t, int, unsigned int) \"cudaOccupancyMaxPotentialBlockSizeWithFlags (C++ API)\","] - #[doc = " \\ref ::cudaOccupancyMaxPotentialBlockSizeVariableSMem(int*, int*, T, UnaryFunction, int) \"cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API)\","] - #[doc = " \\ref ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(int*, int*, T, UnaryFunction, int, unsigned int) \"cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags (C++ API)\","] - #[doc = " ::cuOccupancyMaxActiveBlocksPerMultiprocessor"] pub fn cudaOccupancyMaxActiveBlocksPerMultiprocessor( numBlocks: *mut ::std::os::raw::c_int, func: *const ::std::os::raw::c_void, @@ -16808,46 +4212,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns occupancy for a device function with the specified flags"] - #[doc = ""] - #[doc = " Returns in \\p *numBlocks the maximum number of active blocks per"] - #[doc = " streaming multiprocessor for the device function."] - #[doc = ""] - #[doc = " The \\p flags parameter controls how special cases are handled. Valid flags include:"] - #[doc = ""] - #[doc = " - ::cudaOccupancyDefault: keeps the default behavior as"] - #[doc = " ::cudaOccupancyMaxActiveBlocksPerMultiprocessor"] - #[doc = ""] - #[doc = " - ::cudaOccupancyDisableCachingOverride: This flag suppresses the default behavior"] - #[doc = " on platform where global caching affects occupancy. On such platforms, if caching"] - #[doc = " is enabled, but per-block SM resource usage would result in zero occupancy, the"] - #[doc = " occupancy calculator will calculate the occupancy as if caching is disabled."] - #[doc = " Setting this flag makes the occupancy calculator to return 0 in such cases."] - #[doc = " More information can be found about this feature in the \"Unified L1/Texture Cache\""] - #[doc = " section of the Maxwell tuning guide."] - #[doc = ""] - #[doc = " \\param numBlocks - Returned occupancy"] - #[doc = " \\param func - Kernel function for which occupancy is calculated"] - #[doc = " \\param blockSize - Block size the kernel is intended to be launched with"] - #[doc = " \\param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes"] - #[doc = " \\param flags - Requested behavior for the occupancy calculator"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidDevice,"] - #[doc = " ::cudaErrorInvalidDeviceFunction,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorUnknown,"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaOccupancyMaxActiveBlocksPerMultiprocessor,"] - #[doc = " \\ref ::cudaOccupancyMaxPotentialBlockSize(int*, int*, T, size_t, int) \"cudaOccupancyMaxPotentialBlockSize (C++ API)\","] - #[doc = " \\ref ::cudaOccupancyMaxPotentialBlockSizeWithFlags(int*, int*, T, size_t, int, unsigned int) \"cudaOccupancyMaxPotentialBlockSizeWithFlags (C++ API)\","] - #[doc = " \\ref ::cudaOccupancyMaxPotentialBlockSizeVariableSMem(int*, int*, T, UnaryFunction, int) \"cudaOccupancyMaxPotentialBlockSizeVariableSMem (C++ API)\","] - #[doc = " \\ref ::cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags(int*, int*, T, UnaryFunction, int, unsigned int) \"cudaOccupancyMaxPotentialBlockSizeVariableSMemWithFlags (C++ API)\","] - #[doc = " ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags"] pub fn cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags( numBlocks: *mut ::std::os::raw::c_int, func: *const ::std::os::raw::c_void, @@ -16857,105 +4221,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Allocates memory that will be automatically managed by the Unified Memory system"] - #[doc = ""] - #[doc = " Allocates \\p size bytes of managed memory on the device and returns in"] - #[doc = " \\p *devPtr a pointer to the allocated memory. If the device doesn't support"] - #[doc = " allocating managed memory, ::cudaErrorNotSupported is returned. Support"] - #[doc = " for managed memory can be queried using the device attribute"] - #[doc = " ::cudaDevAttrManagedMemory. The allocated memory is suitably"] - #[doc = " aligned for any kind of variable. The memory is not cleared. If \\p size"] - #[doc = " is 0, ::cudaMallocManaged returns ::cudaErrorInvalidValue. The pointer"] - #[doc = " is valid on the CPU and on all GPUs in the system that support managed memory."] - #[doc = " All accesses to this pointer must obey the Unified Memory programming model."] - #[doc = ""] - #[doc = " \\p flags specifies the default stream association for this allocation."] - #[doc = " \\p flags must be one of ::cudaMemAttachGlobal or ::cudaMemAttachHost. The"] - #[doc = " default value for \\p flags is ::cudaMemAttachGlobal."] - #[doc = " If ::cudaMemAttachGlobal is specified, then this memory is accessible from"] - #[doc = " any stream on any device. If ::cudaMemAttachHost is specified, then the"] - #[doc = " allocation should not be accessed from devices that have a zero value for the"] - #[doc = " device attribute ::cudaDevAttrConcurrentManagedAccess; an explicit call to"] - #[doc = " ::cudaStreamAttachMemAsync will be required to enable access on such devices."] - #[doc = ""] - #[doc = " If the association is later changed via ::cudaStreamAttachMemAsync to"] - #[doc = " a single stream, the default association, as specifed during ::cudaMallocManaged,"] - #[doc = " is restored when that stream is destroyed. For __managed__ variables, the"] - #[doc = " default association is always ::cudaMemAttachGlobal. Note that destroying a"] - #[doc = " stream is an asynchronous operation, and as a result, the change to default"] - #[doc = " association won't happen until all work in the stream has completed."] - #[doc = ""] - #[doc = " Memory allocated with ::cudaMallocManaged should be released with ::cudaFree."] - #[doc = ""] - #[doc = " Device memory oversubscription is possible for GPUs that have a non-zero value for the"] - #[doc = " device attribute ::cudaDevAttrConcurrentManagedAccess. Managed memory on"] - #[doc = " such GPUs may be evicted from device memory to host memory at any time by the Unified"] - #[doc = " Memory driver in order to make room for other allocations."] - #[doc = ""] - #[doc = " In a multi-GPU system where all GPUs have a non-zero value for the device attribute"] - #[doc = " ::cudaDevAttrConcurrentManagedAccess, managed memory may not be populated when this"] - #[doc = " API returns and instead may be populated on access. In such systems, managed memory can"] - #[doc = " migrate to any processor's memory at any time. The Unified Memory driver will employ heuristics to"] - #[doc = " maintain data locality and prevent excessive page faults to the extent possible. The application"] - #[doc = " can also guide the driver about memory usage patterns via ::cudaMemAdvise. The application"] - #[doc = " can also explicitly migrate memory to a desired processor's memory via"] - #[doc = " ::cudaMemPrefetchAsync."] - #[doc = ""] - #[doc = " In a multi-GPU system where all of the GPUs have a zero value for the device attribute"] - #[doc = " ::cudaDevAttrConcurrentManagedAccess and all the GPUs have peer-to-peer support"] - #[doc = " with each other, the physical storage for managed memory is created on the GPU which is active"] - #[doc = " at the time ::cudaMallocManaged is called. All other GPUs will reference the data at reduced"] - #[doc = " bandwidth via peer mappings over the PCIe bus. The Unified Memory driver does not migrate"] - #[doc = " memory among such GPUs."] - #[doc = ""] - #[doc = " In a multi-GPU system where not all GPUs have peer-to-peer support with each other and"] - #[doc = " where the value of the device attribute ::cudaDevAttrConcurrentManagedAccess"] - #[doc = " is zero for at least one of those GPUs, the location chosen for physical storage of managed"] - #[doc = " memory is system-dependent."] - #[doc = " - On Linux, the location chosen will be device memory as long as the current set of active"] - #[doc = " contexts are on devices that either have peer-to-peer support with each other or have a"] - #[doc = " non-zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess."] - #[doc = " If there is an active context on a GPU that does not have a non-zero value for that device"] - #[doc = " attribute and it does not have peer-to-peer support with the other devices that have active"] - #[doc = " contexts on them, then the location for physical storage will be 'zero-copy' or host memory."] - #[doc = " Note that this means that managed memory that is located in device memory is migrated to"] - #[doc = " host memory if a new context is created on a GPU that doesn't have a non-zero value for"] - #[doc = " the device attribute and does not support peer-to-peer with at least one of the other devices"] - #[doc = " that has an active context. This in turn implies that context creation may fail if there is"] - #[doc = " insufficient host memory to migrate all managed allocations."] - #[doc = " - On Windows, the physical storage is always created in 'zero-copy' or host memory."] - #[doc = " All GPUs will reference the data at reduced bandwidth over the PCIe bus. In these"] - #[doc = " circumstances, use of the environment variable CUDA_VISIBLE_DEVICES is recommended to"] - #[doc = " restrict CUDA to only use those GPUs that have peer-to-peer support."] - #[doc = " Alternatively, users can also set CUDA_MANAGED_FORCE_DEVICE_ALLOC to a non-zero"] - #[doc = " value to force the driver to always use device memory for physical storage."] - #[doc = " When this environment variable is set to a non-zero value, all devices used in"] - #[doc = " that process that support managed memory have to be peer-to-peer compatible"] - #[doc = " with each other. The error ::cudaErrorInvalidDevice will be returned if a device"] - #[doc = " that supports managed memory is used and it is not peer-to-peer compatible with"] - #[doc = " any of the other managed memory supporting devices that were previously used in"] - #[doc = " that process, even if ::cudaDeviceReset has been called on those devices. These"] - #[doc = " environment variables are described in the CUDA programming guide under the"] - #[doc = " \"CUDA environment variables\" section."] - #[doc = ""] - #[doc = " \\param devPtr - Pointer to allocated device memory"] - #[doc = " \\param size - Requested allocation size in bytes"] - #[doc = " \\param flags - Must be either ::cudaMemAttachGlobal or ::cudaMemAttachHost (defaults to ::cudaMemAttachGlobal)"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorMemoryAllocation,"] - #[doc = " ::cudaErrorNotSupported,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMallocPitch, ::cudaFree, ::cudaMallocArray, ::cudaFreeArray,"] - #[doc = " ::cudaMalloc3D, ::cudaMalloc3DArray,"] - #[doc = " \\ref ::cudaMallocHost(void**, size_t) \"cudaMallocHost (C API)\","] - #[doc = " ::cudaFreeHost, ::cudaHostAlloc, ::cudaDeviceGetAttribute, ::cudaStreamAttachMemAsync,"] - #[doc = " ::cuMemAllocManaged"] pub fn cudaMallocManaged( devPtr: *mut *mut ::std::os::raw::c_void, size: usize, @@ -16963,112 +4228,18 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Allocate memory on the device"] - #[doc = ""] - #[doc = " Allocates \\p size bytes of linear memory on the device and returns in"] - #[doc = " \\p *devPtr a pointer to the allocated memory. The allocated memory is"] - #[doc = " suitably aligned for any kind of variable. The memory is not cleared."] - #[doc = " ::cudaMalloc() returns ::cudaErrorMemoryAllocation in case of failure."] - #[doc = ""] - #[doc = " The device version of ::cudaFree cannot be used with a \\p *devPtr"] - #[doc = " allocated using the host API, and vice versa."] - #[doc = ""] - #[doc = " \\param devPtr - Pointer to allocated device memory"] - #[doc = " \\param size - Requested allocation size in bytes"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorMemoryAllocation"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMallocPitch, ::cudaFree, ::cudaMallocArray, ::cudaFreeArray,"] - #[doc = " ::cudaMalloc3D, ::cudaMalloc3DArray,"] - #[doc = " \\ref ::cudaMallocHost(void**, size_t) \"cudaMallocHost (C API)\","] - #[doc = " ::cudaFreeHost, ::cudaHostAlloc,"] - #[doc = " ::cuMemAlloc"] pub fn cudaMalloc( devPtr: *mut *mut ::std::os::raw::c_void, size: usize, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Allocates page-locked memory on the host"] - #[doc = ""] - #[doc = " Allocates \\p size bytes of host memory that is page-locked and accessible"] - #[doc = " to the device. The driver tracks the virtual memory ranges allocated with"] - #[doc = " this function and automatically accelerates calls to functions such as"] - #[doc = " ::cudaMemcpy*(). Since the memory can be accessed directly by the device,"] - #[doc = " it can be read or written with much higher bandwidth than pageable memory"] - #[doc = " obtained with functions such as ::malloc(). Allocating excessive amounts of"] - #[doc = " memory with ::cudaMallocHost() may degrade system performance, since it"] - #[doc = " reduces the amount of memory available to the system for paging. As a"] - #[doc = " result, this function is best used sparingly to allocate staging areas for"] - #[doc = " data exchange between host and device."] - #[doc = ""] - #[doc = " \\param ptr - Pointer to allocated host memory"] - #[doc = " \\param size - Requested allocation size in bytes"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorMemoryAllocation"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMalloc, ::cudaMallocPitch, ::cudaMallocArray, ::cudaMalloc3D,"] - #[doc = " ::cudaMalloc3DArray, ::cudaHostAlloc, ::cudaFree, ::cudaFreeArray,"] - #[doc = " \\ref ::cudaMallocHost(void**, size_t, unsigned int) \"cudaMallocHost (C++ API)\","] - #[doc = " ::cudaFreeHost, ::cudaHostAlloc,"] - #[doc = " ::cuMemAllocHost"] pub fn cudaMallocHost( ptr: *mut *mut ::std::os::raw::c_void, size: usize, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Allocates pitched memory on the device"] - #[doc = ""] - #[doc = " Allocates at least \\p width (in bytes) * \\p height bytes of linear memory"] - #[doc = " on the device and returns in \\p *devPtr a pointer to the allocated memory."] - #[doc = " The function may pad the allocation to ensure that corresponding pointers"] - #[doc = " in any given row will continue to meet the alignment requirements for"] - #[doc = " coalescing as the address is updated from row to row. The pitch returned in"] - #[doc = " \\p *pitch by ::cudaMallocPitch() is the width in bytes of the allocation."] - #[doc = " The intended usage of \\p pitch is as a separate parameter of the allocation,"] - #[doc = " used to compute addresses within the 2D array. Given the row and column of"] - #[doc = " an array element of type \\p T, the address is computed as:"] - #[doc = " \\code"] - #[doc = "T* pElement = (T*)((char*)BaseAddress + Row * pitch) + Column;"] - #[doc = "\\endcode"] - #[doc = ""] - #[doc = " For allocations of 2D arrays, it is recommended that programmers consider"] - #[doc = " performing pitch allocations using ::cudaMallocPitch(). Due to pitch"] - #[doc = " alignment restrictions in the hardware, this is especially true if the"] - #[doc = " application will be performing 2D memory copies between different regions"] - #[doc = " of device memory (whether linear memory or CUDA arrays)."] - #[doc = ""] - #[doc = " \\param devPtr - Pointer to allocated pitched device memory"] - #[doc = " \\param pitch - Pitch for allocation"] - #[doc = " \\param width - Requested pitched allocation width (in bytes)"] - #[doc = " \\param height - Requested pitched allocation height"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorMemoryAllocation"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMalloc, ::cudaFree, ::cudaMallocArray, ::cudaFreeArray,"] - #[doc = " \\ref ::cudaMallocHost(void**, size_t) \"cudaMallocHost (C API)\","] - #[doc = " ::cudaFreeHost, ::cudaMalloc3D, ::cudaMalloc3DArray,"] - #[doc = " ::cudaHostAlloc,"] - #[doc = " ::cuMemAllocPitch"] pub fn cudaMallocPitch( devPtr: *mut *mut ::std::os::raw::c_void, pitch: *mut usize, @@ -17077,48 +4248,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Allocate an array on the device"] - #[doc = ""] - #[doc = " Allocates a CUDA array according to the ::cudaChannelFormatDesc structure"] - #[doc = " \\p desc and returns a handle to the new CUDA array in \\p *array."] - #[doc = ""] - #[doc = " The ::cudaChannelFormatDesc is defined as:"] - #[doc = " \\code"] - #[doc = "struct cudaChannelFormatDesc {"] - #[doc = "int x, y, z, w;"] - #[doc = "enum cudaChannelFormatKind f;"] - #[doc = "};"] - #[doc = "\\endcode"] - #[doc = " where ::cudaChannelFormatKind is one of ::cudaChannelFormatKindSigned,"] - #[doc = " ::cudaChannelFormatKindUnsigned, or ::cudaChannelFormatKindFloat."] - #[doc = ""] - #[doc = " The \\p flags parameter enables different options to be specified that affect"] - #[doc = " the allocation, as follows."] - #[doc = " - ::cudaArrayDefault: This flag's value is defined to be 0 and provides default array allocation"] - #[doc = " - ::cudaArraySurfaceLoadStore: Allocates an array that can be read from or written to using a surface reference"] - #[doc = " - ::cudaArrayTextureGather: This flag indicates that texture gather operations will be performed on the array."] - #[doc = ""] - #[doc = " \\p width and \\p height must meet certain size requirements. See ::cudaMalloc3DArray() for more details."] - #[doc = ""] - #[doc = " \\param array - Pointer to allocated array in device memory"] - #[doc = " \\param desc - Requested channel format"] - #[doc = " \\param width - Requested array allocation width"] - #[doc = " \\param height - Requested array allocation height"] - #[doc = " \\param flags - Requested properties of allocated array"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorMemoryAllocation"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMalloc, ::cudaMallocPitch, ::cudaFree, ::cudaFreeArray,"] - #[doc = " \\ref ::cudaMallocHost(void**, size_t) \"cudaMallocHost (C API)\","] - #[doc = " ::cudaFreeHost, ::cudaMalloc3D, ::cudaMalloc3DArray,"] - #[doc = " ::cudaHostAlloc,"] - #[doc = " ::cuArrayCreate"] pub fn cudaMallocArray( array: *mut cudaArray_t, desc: *const cudaChannelFormatDesc, @@ -17128,163 +4257,20 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Frees memory on the device"] - #[doc = ""] - #[doc = " Frees the memory space pointed to by \\p devPtr, which must have been"] - #[doc = " returned by a previous call to ::cudaMalloc() or ::cudaMallocPitch()."] - #[doc = " Otherwise, or if ::cudaFree(\\p devPtr) has already been called before,"] - #[doc = " an error is returned. If \\p devPtr is 0, no operation is performed."] - #[doc = " ::cudaFree() returns ::cudaErrorValue in case of failure."] - #[doc = ""] - #[doc = " The device version of ::cudaFree cannot be used with a \\p *devPtr"] - #[doc = " allocated using the host API, and vice versa."] - #[doc = ""] - #[doc = " \\param devPtr - Device pointer to memory to free"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMalloc, ::cudaMallocPitch, ::cudaMallocArray, ::cudaFreeArray,"] - #[doc = " \\ref ::cudaMallocHost(void**, size_t) \"cudaMallocHost (C API)\","] - #[doc = " ::cudaFreeHost, ::cudaMalloc3D, ::cudaMalloc3DArray,"] - #[doc = " ::cudaHostAlloc,"] - #[doc = " ::cuMemFree"] pub fn cudaFree(devPtr: *mut ::std::os::raw::c_void) -> cudaError_t; } extern "C" { - #[doc = " \\brief Frees page-locked memory"] - #[doc = ""] - #[doc = " Frees the memory space pointed to by \\p hostPtr, which must have been"] - #[doc = " returned by a previous call to ::cudaMallocHost() or ::cudaHostAlloc()."] - #[doc = ""] - #[doc = " \\param ptr - Pointer to memory to free"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMalloc, ::cudaMallocPitch, ::cudaFree, ::cudaMallocArray,"] - #[doc = " ::cudaFreeArray,"] - #[doc = " \\ref ::cudaMallocHost(void**, size_t) \"cudaMallocHost (C API)\","] - #[doc = " ::cudaMalloc3D, ::cudaMalloc3DArray, ::cudaHostAlloc,"] - #[doc = " ::cuMemFreeHost"] pub fn cudaFreeHost(ptr: *mut ::std::os::raw::c_void) -> cudaError_t; } extern "C" { - #[doc = " \\brief Frees an array on the device"] - #[doc = ""] - #[doc = " Frees the CUDA array \\p array, which must have been returned by a"] - #[doc = " previous call to ::cudaMallocArray(). If \\p devPtr is 0,"] - #[doc = " no operation is performed."] - #[doc = ""] - #[doc = " \\param array - Pointer to array to free"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMalloc, ::cudaMallocPitch, ::cudaFree, ::cudaMallocArray,"] - #[doc = " \\ref ::cudaMallocHost(void**, size_t) \"cudaMallocHost (C API)\","] - #[doc = " ::cudaFreeHost, ::cudaHostAlloc,"] - #[doc = " ::cuArrayDestroy"] pub fn cudaFreeArray(array: cudaArray_t) -> cudaError_t; } extern "C" { - #[doc = " \\brief Frees a mipmapped array on the device"] - #[doc = ""] - #[doc = " Frees the CUDA mipmapped array \\p mipmappedArray, which must have been"] - #[doc = " returned by a previous call to ::cudaMallocMipmappedArray(). If \\p devPtr"] - #[doc = " is 0, no operation is performed."] - #[doc = ""] - #[doc = " \\param mipmappedArray - Pointer to mipmapped array to free"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMalloc, ::cudaMallocPitch, ::cudaFree, ::cudaMallocArray,"] - #[doc = " \\ref ::cudaMallocHost(void**, size_t) \"cudaMallocHost (C API)\","] - #[doc = " ::cudaFreeHost, ::cudaHostAlloc,"] - #[doc = " ::cuMipmappedArrayDestroy"] pub fn cudaFreeMipmappedArray( mipmappedArray: cudaMipmappedArray_t, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Allocates page-locked memory on the host"] - #[doc = ""] - #[doc = " Allocates \\p size bytes of host memory that is page-locked and accessible"] - #[doc = " to the device. The driver tracks the virtual memory ranges allocated with"] - #[doc = " this function and automatically accelerates calls to functions such as"] - #[doc = " ::cudaMemcpy(). Since the memory can be accessed directly by the device, it"] - #[doc = " can be read or written with much higher bandwidth than pageable memory"] - #[doc = " obtained with functions such as ::malloc(). Allocating excessive amounts of"] - #[doc = " pinned memory may degrade system performance, since it reduces the amount"] - #[doc = " of memory available to the system for paging. As a result, this function is"] - #[doc = " best used sparingly to allocate staging areas for data exchange between host"] - #[doc = " and device."] - #[doc = ""] - #[doc = " The \\p flags parameter enables different options to be specified that affect"] - #[doc = " the allocation, as follows."] - #[doc = " - ::cudaHostAllocDefault: This flag's value is defined to be 0 and causes"] - #[doc = " ::cudaHostAlloc() to emulate ::cudaMallocHost()."] - #[doc = " - ::cudaHostAllocPortable: The memory returned by this call will be"] - #[doc = " considered as pinned memory by all CUDA contexts, not just the one that"] - #[doc = " performed the allocation."] - #[doc = " - ::cudaHostAllocMapped: Maps the allocation into the CUDA address space."] - #[doc = " The device pointer to the memory may be obtained by calling"] - #[doc = " ::cudaHostGetDevicePointer()."] - #[doc = " - ::cudaHostAllocWriteCombined: Allocates the memory as write-combined (WC)."] - #[doc = " WC memory can be transferred across the PCI Express bus more quickly on some"] - #[doc = " system configurations, but cannot be read efficiently by most CPUs. WC"] - #[doc = " memory is a good option for buffers that will be written by the CPU and read"] - #[doc = " by the device via mapped pinned memory or host->device transfers."] - #[doc = ""] - #[doc = " All of these flags are orthogonal to one another: a developer may allocate"] - #[doc = " memory that is portable, mapped and/or write-combined with no restrictions."] - #[doc = ""] - #[doc = " In order for the ::cudaHostAllocMapped flag to have any effect, the CUDA context"] - #[doc = " must support the ::cudaDeviceMapHost flag, which can be checked via"] - #[doc = " ::cudaGetDeviceFlags(). The ::cudaDeviceMapHost flag is implicitly set for"] - #[doc = " contexts created via the runtime API."] - #[doc = ""] - #[doc = " The ::cudaHostAllocMapped flag may be specified on CUDA contexts for devices"] - #[doc = " that do not support mapped pinned memory. The failure is deferred to"] - #[doc = " ::cudaHostGetDevicePointer() because the memory may be mapped into other"] - #[doc = " CUDA contexts via the ::cudaHostAllocPortable flag."] - #[doc = ""] - #[doc = " Memory allocated by this function must be freed with ::cudaFreeHost()."] - #[doc = ""] - #[doc = " \\param pHost - Device pointer to allocated memory"] - #[doc = " \\param size - Requested allocation size in bytes"] - #[doc = " \\param flags - Requested properties of allocated memory"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorMemoryAllocation"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaSetDeviceFlags,"] - #[doc = " \\ref ::cudaMallocHost(void**, size_t) \"cudaMallocHost (C API)\","] - #[doc = " ::cudaFreeHost,"] - #[doc = " ::cudaGetDeviceFlags,"] - #[doc = " ::cuMemHostAlloc"] pub fn cudaHostAlloc( pHost: *mut *mut ::std::os::raw::c_void, size: usize, @@ -17292,86 +4278,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Registers an existing host memory range for use by CUDA"] - #[doc = ""] - #[doc = " Page-locks the memory range specified by \\p ptr and \\p size and maps it"] - #[doc = " for the device(s) as specified by \\p flags. This memory range also is added"] - #[doc = " to the same tracking mechanism as ::cudaHostAlloc() to automatically accelerate"] - #[doc = " calls to functions such as ::cudaMemcpy(). Since the memory can be accessed"] - #[doc = " directly by the device, it can be read or written with much higher bandwidth"] - #[doc = " than pageable memory that has not been registered. Page-locking excessive"] - #[doc = " amounts of memory may degrade system performance, since it reduces the amount"] - #[doc = " of memory available to the system for paging. As a result, this function is"] - #[doc = " best used sparingly to register staging areas for data exchange between"] - #[doc = " host and device."] - #[doc = ""] - #[doc = " ::cudaHostRegister is supported only on I/O coherent devices that have a non-zero"] - #[doc = " value for the device attribute ::cudaDevAttrHostRegisterSupported."] - #[doc = ""] - #[doc = " The \\p flags parameter enables different options to be specified that"] - #[doc = " affect the allocation, as follows."] - #[doc = ""] - #[doc = " - ::cudaHostRegisterDefault: On a system with unified virtual addressing,"] - #[doc = " the memory will be both mapped and portable. On a system with no unified"] - #[doc = " virtual addressing, the memory will be neither mapped nor portable."] - #[doc = ""] - #[doc = " - ::cudaHostRegisterPortable: The memory returned by this call will be"] - #[doc = " considered as pinned memory by all CUDA contexts, not just the one that"] - #[doc = " performed the allocation."] - #[doc = ""] - #[doc = " - ::cudaHostRegisterMapped: Maps the allocation into the CUDA address"] - #[doc = " space. The device pointer to the memory may be obtained by calling"] - #[doc = " ::cudaHostGetDevicePointer()."] - #[doc = ""] - #[doc = " - ::cudaHostRegisterIoMemory: The passed memory pointer is treated as"] - #[doc = " pointing to some memory-mapped I/O space, e.g. belonging to a"] - #[doc = " third-party PCIe device, and it will marked as non cache-coherent and"] - #[doc = " contiguous."] - #[doc = ""] - #[doc = " All of these flags are orthogonal to one another: a developer may page-lock"] - #[doc = " memory that is portable or mapped with no restrictions."] - #[doc = ""] - #[doc = " The CUDA context must have been created with the ::cudaMapHost flag in"] - #[doc = " order for the ::cudaHostRegisterMapped flag to have any effect."] - #[doc = ""] - #[doc = " The ::cudaHostRegisterMapped flag may be specified on CUDA contexts for"] - #[doc = " devices that do not support mapped pinned memory. The failure is deferred"] - #[doc = " to ::cudaHostGetDevicePointer() because the memory may be mapped into"] - #[doc = " other CUDA contexts via the ::cudaHostRegisterPortable flag."] - #[doc = ""] - #[doc = " For devices that have a non-zero value for the device attribute"] - #[doc = " ::cudaDevAttrCanUseHostPointerForRegisteredMem, the memory"] - #[doc = " can also be accessed from the device using the host pointer \\p ptr."] - #[doc = " The device pointer returned by ::cudaHostGetDevicePointer() may or may not"] - #[doc = " match the original host pointer \\p ptr and depends on the devices visible to the"] - #[doc = " application. If all devices visible to the application have a non-zero value for the"] - #[doc = " device attribute, the device pointer returned by ::cudaHostGetDevicePointer()"] - #[doc = " will match the original pointer \\p ptr. If any device visible to the application"] - #[doc = " has a zero value for the device attribute, the device pointer returned by"] - #[doc = " ::cudaHostGetDevicePointer() will not match the original host pointer \\p ptr,"] - #[doc = " but it will be suitable for use on all devices provided Unified Virtual Addressing"] - #[doc = " is enabled. In such systems, it is valid to access the memory using either pointer"] - #[doc = " on devices that have a non-zero value for the device attribute. Note however that"] - #[doc = " such devices should access the memory using only of the two pointers and not both."] - #[doc = ""] - #[doc = " The memory page-locked by this function must be unregistered with ::cudaHostUnregister()."] - #[doc = ""] - #[doc = " \\param ptr - Host pointer to memory to page-lock"] - #[doc = " \\param size - Size in bytes of the address range to page-lock in bytes"] - #[doc = " \\param flags - Flags for allocation request"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorMemoryAllocation,"] - #[doc = " ::cudaErrorHostMemoryAlreadyRegistered,"] - #[doc = " ::cudaErrorNotSupported"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaHostUnregister, ::cudaHostGetFlags, ::cudaHostGetDevicePointer,"] - #[doc = " ::cuMemHostRegister"] pub fn cudaHostRegister( ptr: *mut ::std::os::raw::c_void, size: usize, @@ -17379,69 +4285,9 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Unregisters a memory range that was registered with cudaHostRegister"] - #[doc = ""] - #[doc = " Unmaps the memory range whose base address is specified by \\p ptr, and makes"] - #[doc = " it pageable again."] - #[doc = ""] - #[doc = " The base address must be the same one specified to ::cudaHostRegister()."] - #[doc = ""] - #[doc = " \\param ptr - Host pointer to memory to unregister"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorHostMemoryNotRegistered"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaHostUnregister,"] - #[doc = " ::cuMemHostUnregister"] pub fn cudaHostUnregister(ptr: *mut ::std::os::raw::c_void) -> cudaError_t; } extern "C" { - #[doc = " \\brief Passes back device pointer of mapped host memory allocated by"] - #[doc = " cudaHostAlloc or registered by cudaHostRegister"] - #[doc = ""] - #[doc = " Passes back the device pointer corresponding to the mapped, pinned host"] - #[doc = " buffer allocated by ::cudaHostAlloc() or registered by ::cudaHostRegister()."] - #[doc = ""] - #[doc = " ::cudaHostGetDevicePointer() will fail if the ::cudaDeviceMapHost flag was"] - #[doc = " not specified before deferred context creation occurred, or if called on a"] - #[doc = " device that does not support mapped, pinned memory."] - #[doc = ""] - #[doc = " For devices that have a non-zero value for the device attribute"] - #[doc = " ::cudaDevAttrCanUseHostPointerForRegisteredMem, the memory"] - #[doc = " can also be accessed from the device using the host pointer \\p pHost."] - #[doc = " The device pointer returned by ::cudaHostGetDevicePointer() may or may not"] - #[doc = " match the original host pointer \\p pHost and depends on the devices visible to the"] - #[doc = " application. If all devices visible to the application have a non-zero value for the"] - #[doc = " device attribute, the device pointer returned by ::cudaHostGetDevicePointer()"] - #[doc = " will match the original pointer \\p pHost. If any device visible to the application"] - #[doc = " has a zero value for the device attribute, the device pointer returned by"] - #[doc = " ::cudaHostGetDevicePointer() will not match the original host pointer \\p pHost,"] - #[doc = " but it will be suitable for use on all devices provided Unified Virtual Addressing"] - #[doc = " is enabled. In such systems, it is valid to access the memory using either pointer"] - #[doc = " on devices that have a non-zero value for the device attribute. Note however that"] - #[doc = " such devices should access the memory using only of the two pointers and not both."] - #[doc = ""] - #[doc = " \\p flags provides for future releases. For now, it must be set to 0."] - #[doc = ""] - #[doc = " \\param pDevice - Returned device pointer for mapped memory"] - #[doc = " \\param pHost - Requested host pointer mapping"] - #[doc = " \\param flags - Flags for extensions (must be 0 for now)"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorMemoryAllocation"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaSetDeviceFlags, ::cudaHostAlloc,"] - #[doc = " ::cuMemHostGetDevicePointer"] pub fn cudaHostGetDevicePointer( pDevice: *mut *mut ::std::os::raw::c_void, pHost: *mut ::std::os::raw::c_void, @@ -17449,206 +4295,18 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Passes back flags used to allocate pinned host memory allocated by"] - #[doc = " cudaHostAlloc"] - #[doc = ""] - #[doc = " ::cudaHostGetFlags() will fail if the input pointer does not"] - #[doc = " reside in an address range allocated by ::cudaHostAlloc()."] - #[doc = ""] - #[doc = " \\param pFlags - Returned flags word"] - #[doc = " \\param pHost - Host pointer"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaHostAlloc,"] - #[doc = " ::cuMemHostGetFlags"] pub fn cudaHostGetFlags( pFlags: *mut ::std::os::raw::c_uint, pHost: *mut ::std::os::raw::c_void, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Allocates logical 1D, 2D, or 3D memory objects on the device"] - #[doc = ""] - #[doc = " Allocates at least \\p width * \\p height * \\p depth bytes of linear memory"] - #[doc = " on the device and returns a ::cudaPitchedPtr in which \\p ptr is a pointer"] - #[doc = " to the allocated memory. The function may pad the allocation to ensure"] - #[doc = " hardware alignment requirements are met. The pitch returned in the \\p pitch"] - #[doc = " field of \\p pitchedDevPtr is the width in bytes of the allocation."] - #[doc = ""] - #[doc = " The returned ::cudaPitchedPtr contains additional fields \\p xsize and"] - #[doc = " \\p ysize, the logical width and height of the allocation, which are"] - #[doc = " equivalent to the \\p width and \\p height \\p extent parameters provided by"] - #[doc = " the programmer during allocation."] - #[doc = ""] - #[doc = " For allocations of 2D and 3D objects, it is highly recommended that"] - #[doc = " programmers perform allocations using ::cudaMalloc3D() or"] - #[doc = " ::cudaMallocPitch(). Due to alignment restrictions in the hardware, this is"] - #[doc = " especially true if the application will be performing memory copies"] - #[doc = " involving 2D or 3D objects (whether linear memory or CUDA arrays)."] - #[doc = ""] - #[doc = " \\param pitchedDevPtr - Pointer to allocated pitched device memory"] - #[doc = " \\param extent - Requested allocation size (\\p width field in bytes)"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorMemoryAllocation"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMallocPitch, ::cudaFree, ::cudaMemcpy3D, ::cudaMemset3D,"] - #[doc = " ::cudaMalloc3DArray, ::cudaMallocArray, ::cudaFreeArray,"] - #[doc = " \\ref ::cudaMallocHost(void**, size_t) \"cudaMallocHost (C API)\","] - #[doc = " ::cudaFreeHost, ::cudaHostAlloc, ::make_cudaPitchedPtr, ::make_cudaExtent,"] - #[doc = " ::cuMemAllocPitch"] pub fn cudaMalloc3D( pitchedDevPtr: *mut cudaPitchedPtr, extent: cudaExtent, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Allocate an array on the device"] - #[doc = ""] - #[doc = " Allocates a CUDA array according to the ::cudaChannelFormatDesc structure"] - #[doc = " \\p desc and returns a handle to the new CUDA array in \\p *array."] - #[doc = ""] - #[doc = " The ::cudaChannelFormatDesc is defined as:"] - #[doc = " \\code"] - #[doc = "struct cudaChannelFormatDesc {"] - #[doc = "int x, y, z, w;"] - #[doc = "enum cudaChannelFormatKind f;"] - #[doc = "};"] - #[doc = "\\endcode"] - #[doc = " where ::cudaChannelFormatKind is one of ::cudaChannelFormatKindSigned,"] - #[doc = " ::cudaChannelFormatKindUnsigned, or ::cudaChannelFormatKindFloat."] - #[doc = ""] - #[doc = " ::cudaMalloc3DArray() can allocate the following:"] - #[doc = ""] - #[doc = " - A 1D array is allocated if the height and depth extents are both zero."] - #[doc = " - A 2D array is allocated if only the depth extent is zero."] - #[doc = " - A 3D array is allocated if all three extents are non-zero."] - #[doc = " - A 1D layered CUDA array is allocated if only the height extent is zero and"] - #[doc = " the cudaArrayLayered flag is set. Each layer is a 1D array. The number of layers is"] - #[doc = " determined by the depth extent."] - #[doc = " - A 2D layered CUDA array is allocated if all three extents are non-zero and"] - #[doc = " the cudaArrayLayered flag is set. Each layer is a 2D array. The number of layers is"] - #[doc = " determined by the depth extent."] - #[doc = " - A cubemap CUDA array is allocated if all three extents are non-zero and the"] - #[doc = " cudaArrayCubemap flag is set. Width must be equal to height, and depth must be six. A cubemap is"] - #[doc = " a special type of 2D layered CUDA array, where the six layers represent the six faces of a cube."] - #[doc = " The order of the six layers in memory is the same as that listed in ::cudaGraphicsCubeFace."] - #[doc = " - A cubemap layered CUDA array is allocated if all three extents are non-zero, and both,"] - #[doc = " cudaArrayCubemap and cudaArrayLayered flags are set. Width must be equal to height, and depth must be"] - #[doc = " a multiple of six. A cubemap layered CUDA array is a special type of 2D layered CUDA array that consists"] - #[doc = " of a collection of cubemaps. The first six layers represent the first cubemap, the next six layers form"] - #[doc = " the second cubemap, and so on."] - #[doc = ""] - #[doc = ""] - #[doc = " The \\p flags parameter enables different options to be specified that affect"] - #[doc = " the allocation, as follows."] - #[doc = " - ::cudaArrayDefault: This flag's value is defined to be 0 and provides default array allocation"] - #[doc = " - ::cudaArrayLayered: Allocates a layered CUDA array, with the depth extent indicating the number of layers"] - #[doc = " - ::cudaArrayCubemap: Allocates a cubemap CUDA array. Width must be equal to height, and depth must be six."] - #[doc = " If the cudaArrayLayered flag is also set, depth must be a multiple of six."] - #[doc = " - ::cudaArraySurfaceLoadStore: Allocates a CUDA array that could be read from or written to using a surface"] - #[doc = " reference."] - #[doc = " - ::cudaArrayTextureGather: This flag indicates that texture gather operations will be performed on the CUDA"] - #[doc = " array. Texture gather can only be performed on 2D CUDA arrays."] - #[doc = ""] - #[doc = " The width, height and depth extents must meet certain size requirements as listed in the following table."] - #[doc = " All values are specified in elements."] - #[doc = ""] - #[doc = " Note that 2D CUDA arrays have different size requirements if the ::cudaArrayTextureGather flag is set. In that"] - #[doc = " case, the valid range for (width, height, depth) is ((1,maxTexture2DGather[0]), (1,maxTexture2DGather[1]), 0)."] - #[doc = ""] - #[doc = " \\xmlonly"] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " CUDA array type"] - #[doc = " Valid extents that must always be met {(width range in elements),"] - #[doc = " (height range), (depth range)}"] - #[doc = " Valid extents with cudaArraySurfaceLoadStore set {(width range in"] - #[doc = " elements), (height range), (depth range)}"] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " 1D"] - #[doc = " { (1,maxTexture1D), 0, 0 }"] - #[doc = " { (1,maxSurface1D), 0, 0 }"] - #[doc = " "] - #[doc = " "] - #[doc = " 2D"] - #[doc = " { (1,maxTexture2D[0]), (1,maxTexture2D[1]), 0 }"] - #[doc = " { (1,maxSurface2D[0]), (1,maxSurface2D[1]), 0 }"] - #[doc = " "] - #[doc = " "] - #[doc = " 3D"] - #[doc = " { (1,maxTexture3D[0]), (1,maxTexture3D[1]), (1,maxTexture3D[2]) }"] - #[doc = " OR { (1,maxTexture3DAlt[0]), (1,maxTexture3DAlt[1]),"] - #[doc = " (1,maxTexture3DAlt[2]) }"] - #[doc = " { (1,maxSurface3D[0]), (1,maxSurface3D[1]), (1,maxSurface3D[2]) }"] - #[doc = " "] - #[doc = " "] - #[doc = " 1D Layered"] - #[doc = " { (1,maxTexture1DLayered[0]), 0, (1,maxTexture1DLayered[1]) }"] - #[doc = " { (1,maxSurface1DLayered[0]), 0, (1,maxSurface1DLayered[1]) }"] - #[doc = " "] - #[doc = " "] - #[doc = " 2D Layered"] - #[doc = " { (1,maxTexture2DLayered[0]), (1,maxTexture2DLayered[1]),"] - #[doc = " (1,maxTexture2DLayered[2]) }"] - #[doc = " { (1,maxSurface2DLayered[0]), (1,maxSurface2DLayered[1]),"] - #[doc = " (1,maxSurface2DLayered[2]) }"] - #[doc = " "] - #[doc = " "] - #[doc = " Cubemap"] - #[doc = " { (1,maxTextureCubemap), (1,maxTextureCubemap), 6 }"] - #[doc = " { (1,maxSurfaceCubemap), (1,maxSurfaceCubemap), 6 }"] - #[doc = " "] - #[doc = " "] - #[doc = " Cubemap Layered"] - #[doc = " { (1,maxTextureCubemapLayered[0]), (1,maxTextureCubemapLayered[0]),"] - #[doc = " (1,maxTextureCubemapLayered[1]) }"] - #[doc = " { (1,maxSurfaceCubemapLayered[0]), (1,maxSurfaceCubemapLayered[0]),"] - #[doc = " (1,maxSurfaceCubemapLayered[1]) }"] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = "
"] - #[doc = " \\endxmlonly"] - #[doc = ""] - #[doc = " \\param array - Pointer to allocated array in device memory"] - #[doc = " \\param desc - Requested channel format"] - #[doc = " \\param extent - Requested allocation size (\\p width field in elements)"] - #[doc = " \\param flags - Flags for extensions"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorMemoryAllocation"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMalloc3D, ::cudaMalloc, ::cudaMallocPitch, ::cudaFree,"] - #[doc = " ::cudaFreeArray,"] - #[doc = " \\ref ::cudaMallocHost(void**, size_t) \"cudaMallocHost (C API)\","] - #[doc = " ::cudaFreeHost, ::cudaHostAlloc,"] - #[doc = " ::make_cudaExtent,"] - #[doc = " ::cuArray3DCreate"] pub fn cudaMalloc3DArray( array: *mut cudaArray_t, desc: *const cudaChannelFormatDesc, @@ -17657,141 +4315,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Allocate a mipmapped array on the device"] - #[doc = ""] - #[doc = " Allocates a CUDA mipmapped array according to the ::cudaChannelFormatDesc structure"] - #[doc = " \\p desc and returns a handle to the new CUDA mipmapped array in \\p *mipmappedArray."] - #[doc = " \\p numLevels specifies the number of mipmap levels to be allocated. This value is"] - #[doc = " clamped to the range [1, 1 + floor(log2(max(width, height, depth)))]."] - #[doc = ""] - #[doc = " The ::cudaChannelFormatDesc is defined as:"] - #[doc = " \\code"] - #[doc = "struct cudaChannelFormatDesc {"] - #[doc = "int x, y, z, w;"] - #[doc = "enum cudaChannelFormatKind f;"] - #[doc = "};"] - #[doc = "\\endcode"] - #[doc = " where ::cudaChannelFormatKind is one of ::cudaChannelFormatKindSigned,"] - #[doc = " ::cudaChannelFormatKindUnsigned, or ::cudaChannelFormatKindFloat."] - #[doc = ""] - #[doc = " ::cudaMallocMipmappedArray() can allocate the following:"] - #[doc = ""] - #[doc = " - A 1D mipmapped array is allocated if the height and depth extents are both zero."] - #[doc = " - A 2D mipmapped array is allocated if only the depth extent is zero."] - #[doc = " - A 3D mipmapped array is allocated if all three extents are non-zero."] - #[doc = " - A 1D layered CUDA mipmapped array is allocated if only the height extent is zero and"] - #[doc = " the cudaArrayLayered flag is set. Each layer is a 1D mipmapped array. The number of layers is"] - #[doc = " determined by the depth extent."] - #[doc = " - A 2D layered CUDA mipmapped array is allocated if all three extents are non-zero and"] - #[doc = " the cudaArrayLayered flag is set. Each layer is a 2D mipmapped array. The number of layers is"] - #[doc = " determined by the depth extent."] - #[doc = " - A cubemap CUDA mipmapped array is allocated if all three extents are non-zero and the"] - #[doc = " cudaArrayCubemap flag is set. Width must be equal to height, and depth must be six."] - #[doc = " The order of the six layers in memory is the same as that listed in ::cudaGraphicsCubeFace."] - #[doc = " - A cubemap layered CUDA mipmapped array is allocated if all three extents are non-zero, and both,"] - #[doc = " cudaArrayCubemap and cudaArrayLayered flags are set. Width must be equal to height, and depth must be"] - #[doc = " a multiple of six. A cubemap layered CUDA mipmapped array is a special type of 2D layered CUDA mipmapped"] - #[doc = " array that consists of a collection of cubemap mipmapped arrays. The first six layers represent the"] - #[doc = " first cubemap mipmapped array, the next six layers form the second cubemap mipmapped array, and so on."] - #[doc = ""] - #[doc = ""] - #[doc = " The \\p flags parameter enables different options to be specified that affect"] - #[doc = " the allocation, as follows."] - #[doc = " - ::cudaArrayDefault: This flag's value is defined to be 0 and provides default mipmapped array allocation"] - #[doc = " - ::cudaArrayLayered: Allocates a layered CUDA mipmapped array, with the depth extent indicating the number of layers"] - #[doc = " - ::cudaArrayCubemap: Allocates a cubemap CUDA mipmapped array. Width must be equal to height, and depth must be six."] - #[doc = " If the cudaArrayLayered flag is also set, depth must be a multiple of six."] - #[doc = " - ::cudaArraySurfaceLoadStore: This flag indicates that individual mipmap levels of the CUDA mipmapped array"] - #[doc = " will be read from or written to using a surface reference."] - #[doc = " - ::cudaArrayTextureGather: This flag indicates that texture gather operations will be performed on the CUDA"] - #[doc = " array. Texture gather can only be performed on 2D CUDA mipmapped arrays, and the gather operations are"] - #[doc = " performed only on the most detailed mipmap level."] - #[doc = ""] - #[doc = " The width, height and depth extents must meet certain size requirements as listed in the following table."] - #[doc = " All values are specified in elements."] - #[doc = ""] - #[doc = " \\xmlonly"] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " CUDA array type"] - #[doc = " Valid extents that must always be met {(width range in elements),"] - #[doc = " (height range), (depth range)}"] - #[doc = " Valid extents with cudaArraySurfaceLoadStore set {(width range in"] - #[doc = " elements), (height range), (depth range)}"] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = " 1D"] - #[doc = " { (1,maxTexture1DMipmap), 0, 0 }"] - #[doc = " { (1,maxSurface1D), 0, 0 }"] - #[doc = " "] - #[doc = " "] - #[doc = " 2D"] - #[doc = " { (1,maxTexture2DMipmap[0]), (1,maxTexture2DMipmap[1]), 0 }"] - #[doc = " { (1,maxSurface2D[0]), (1,maxSurface2D[1]), 0 }"] - #[doc = " "] - #[doc = " "] - #[doc = " 3D"] - #[doc = " { (1,maxTexture3D[0]), (1,maxTexture3D[1]), (1,maxTexture3D[2]) }"] - #[doc = " OR { (1,maxTexture3DAlt[0]), (1,maxTexture3DAlt[1]),"] - #[doc = " (1,maxTexture3DAlt[2]) }"] - #[doc = " { (1,maxSurface3D[0]), (1,maxSurface3D[1]), (1,maxSurface3D[2]) }"] - #[doc = " "] - #[doc = " "] - #[doc = " 1D Layered"] - #[doc = " { (1,maxTexture1DLayered[0]), 0, (1,maxTexture1DLayered[1]) }"] - #[doc = " { (1,maxSurface1DLayered[0]), 0, (1,maxSurface1DLayered[1]) }"] - #[doc = " "] - #[doc = " "] - #[doc = " 2D Layered"] - #[doc = " { (1,maxTexture2DLayered[0]), (1,maxTexture2DLayered[1]),"] - #[doc = " (1,maxTexture2DLayered[2]) }"] - #[doc = " { (1,maxSurface2DLayered[0]), (1,maxSurface2DLayered[1]),"] - #[doc = " (1,maxSurface2DLayered[2]) }"] - #[doc = " "] - #[doc = " "] - #[doc = " Cubemap"] - #[doc = " { (1,maxTextureCubemap), (1,maxTextureCubemap), 6 }"] - #[doc = " { (1,maxSurfaceCubemap), (1,maxSurfaceCubemap), 6 }"] - #[doc = " "] - #[doc = " "] - #[doc = " Cubemap Layered"] - #[doc = " { (1,maxTextureCubemapLayered[0]), (1,maxTextureCubemapLayered[0]),"] - #[doc = " (1,maxTextureCubemapLayered[1]) }"] - #[doc = " { (1,maxSurfaceCubemapLayered[0]), (1,maxSurfaceCubemapLayered[0]),"] - #[doc = " (1,maxSurfaceCubemapLayered[1]) }"] - #[doc = " "] - #[doc = " "] - #[doc = " "] - #[doc = "
"] - #[doc = " \\endxmlonly"] - #[doc = ""] - #[doc = " \\param mipmappedArray - Pointer to allocated mipmapped array in device memory"] - #[doc = " \\param desc - Requested channel format"] - #[doc = " \\param extent - Requested allocation size (\\p width field in elements)"] - #[doc = " \\param numLevels - Number of mipmap levels to allocate"] - #[doc = " \\param flags - Flags for extensions"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorMemoryAllocation"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMalloc3D, ::cudaMalloc, ::cudaMallocPitch, ::cudaFree,"] - #[doc = " ::cudaFreeArray,"] - #[doc = " \\ref ::cudaMallocHost(void**, size_t) \"cudaMallocHost (C API)\","] - #[doc = " ::cudaFreeHost, ::cudaHostAlloc,"] - #[doc = " ::make_cudaExtent,"] - #[doc = " ::cuMipmappedArrayCreate"] pub fn cudaMallocMipmappedArray( mipmappedArray: *mut cudaMipmappedArray_t, desc: *const cudaChannelFormatDesc, @@ -17801,31 +4324,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Gets a mipmap level of a CUDA mipmapped array"] - #[doc = ""] - #[doc = " Returns in \\p *levelArray a CUDA array that represents a single mipmap level"] - #[doc = " of the CUDA mipmapped array \\p mipmappedArray."] - #[doc = ""] - #[doc = " If \\p level is greater than the maximum number of levels in this mipmapped array,"] - #[doc = " ::cudaErrorInvalidValue is returned."] - #[doc = ""] - #[doc = " \\param levelArray - Returned mipmap level CUDA array"] - #[doc = " \\param mipmappedArray - CUDA mipmapped array"] - #[doc = " \\param level - Mipmap level"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMalloc3D, ::cudaMalloc, ::cudaMallocPitch, ::cudaFree,"] - #[doc = " ::cudaFreeArray,"] - #[doc = " \\ref ::cudaMallocHost(void**, size_t) \"cudaMallocHost (C API)\","] - #[doc = " ::cudaFreeHost, ::cudaHostAlloc,"] - #[doc = " ::make_cudaExtent,"] - #[doc = " ::cuMipmappedArrayGetLevel"] pub fn cudaGetMipmappedArrayLevel( levelArray: *mut cudaArray_t, mipmappedArray: cudaMipmappedArray_const_t, @@ -17833,331 +4331,27 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies data between 3D objects"] - #[doc = ""] - #[doc = "\\code"] - #[doc = "struct cudaExtent {"] - #[doc = "size_t width;"] - #[doc = "size_t height;"] - #[doc = "size_t depth;"] - #[doc = "};"] - #[doc = "struct cudaExtent make_cudaExtent(size_t w, size_t h, size_t d);"] - #[doc = ""] - #[doc = "struct cudaPos {"] - #[doc = "size_t x;"] - #[doc = "size_t y;"] - #[doc = "size_t z;"] - #[doc = "};"] - #[doc = "struct cudaPos make_cudaPos(size_t x, size_t y, size_t z);"] - #[doc = ""] - #[doc = "struct cudaMemcpy3DParms {"] - #[doc = "cudaArray_t srcArray;"] - #[doc = "struct cudaPos srcPos;"] - #[doc = "struct cudaPitchedPtr srcPtr;"] - #[doc = "cudaArray_t dstArray;"] - #[doc = "struct cudaPos dstPos;"] - #[doc = "struct cudaPitchedPtr dstPtr;"] - #[doc = "struct cudaExtent extent;"] - #[doc = "enum cudaMemcpyKind kind;"] - #[doc = "};"] - #[doc = "\\endcode"] - #[doc = ""] - #[doc = " ::cudaMemcpy3D() copies data betwen two 3D objects. The source and"] - #[doc = " destination objects may be in either host memory, device memory, or a CUDA"] - #[doc = " array. The source, destination, extent, and kind of copy performed is"] - #[doc = " specified by the ::cudaMemcpy3DParms struct which should be initialized to"] - #[doc = " zero before use:"] - #[doc = "\\code"] - #[doc = "cudaMemcpy3DParms myParms = {0};"] - #[doc = "\\endcode"] - #[doc = ""] - #[doc = " The struct passed to ::cudaMemcpy3D() must specify one of \\p srcArray or"] - #[doc = " \\p srcPtr and one of \\p dstArray or \\p dstPtr. Passing more than one"] - #[doc = " non-zero source or destination will cause ::cudaMemcpy3D() to return an"] - #[doc = " error."] - #[doc = ""] - #[doc = " The \\p srcPos and \\p dstPos fields are optional offsets into the source and"] - #[doc = " destination objects and are defined in units of each object's elements. The"] - #[doc = " element for a host or device pointer is assumed to be unsigned char."] - #[doc = ""] - #[doc = " The \\p extent field defines the dimensions of the transferred area in"] - #[doc = " elements. If a CUDA array is participating in the copy, the extent is"] - #[doc = " defined in terms of that array's elements. If no CUDA array is"] - #[doc = " participating in the copy then the extents are defined in elements of"] - #[doc = " unsigned char."] - #[doc = ""] - #[doc = " The \\p kind field defines the direction of the copy. It must be one of"] - #[doc = " ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost,"] - #[doc = " ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing"] - #[doc = " ::cudaMemcpyDefault is recommended, in which case the type of transfer is"] - #[doc = " inferred from the pointer values. However, ::cudaMemcpyDefault is only"] - #[doc = " allowed on systems that support unified virtual addressing."] - #[doc = " For ::cudaMemcpyHostToHost or ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost"] - #[doc = " passed as kind and cudaArray type passed as source or destination, if the kind"] - #[doc = " implies cudaArray type to be present on the host, ::cudaMemcpy3D() will"] - #[doc = " disregard that implication and silently correct the kind based on the fact that"] - #[doc = " cudaArray type can only be present on the device."] - #[doc = ""] - #[doc = " If the source and destination are both arrays, ::cudaMemcpy3D() will return"] - #[doc = " an error if they do not have the same element size."] - #[doc = ""] - #[doc = " The source and destination object may not overlap. If overlapping source"] - #[doc = " and destination objects are specified, undefined behavior will result."] - #[doc = ""] - #[doc = " The source object must lie entirely within the region defined by \\p srcPos"] - #[doc = " and \\p extent. The destination object must lie entirely within the region"] - #[doc = " defined by \\p dstPos and \\p extent."] - #[doc = ""] - #[doc = " ::cudaMemcpy3D() returns an error if the pitch of \\p srcPtr or \\p dstPtr"] - #[doc = " exceeds the maximum allowed. The pitch of a ::cudaPitchedPtr allocated"] - #[doc = " with ::cudaMalloc3D() will always be valid."] - #[doc = ""] - #[doc = " \\param p - 3D memory copy parameters"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidPitchValue,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection"] - #[doc = " \\notefnerr"] - #[doc = " \\note_sync"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMalloc3D, ::cudaMalloc3DArray, ::cudaMemset3D, ::cudaMemcpy3DAsync,"] - #[doc = " ::cudaMemcpy, ::cudaMemcpy2D,"] - #[doc = " ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray,"] - #[doc = " ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,"] - #[doc = " ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,"] - #[doc = " ::cudaMemcpy2DToArrayAsync,"] - #[doc = " ::cudaMemcpy2DFromArrayAsync,"] - #[doc = " ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync,"] - #[doc = " ::make_cudaExtent, ::make_cudaPos,"] - #[doc = " ::cuMemcpy3D"] pub fn cudaMemcpy3D(p: *const cudaMemcpy3DParms) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies memory between devices"] - #[doc = ""] - #[doc = " Perform a 3D memory copy according to the parameters specified in"] - #[doc = " \\p p. See the definition of the ::cudaMemcpy3DPeerParms structure"] - #[doc = " for documentation of its parameters."] - #[doc = ""] - #[doc = " Note that this function is synchronous with respect to the host only if"] - #[doc = " the source or destination of the transfer is host memory. Note also"] - #[doc = " that this copy is serialized with respect to all pending and future"] - #[doc = " asynchronous work in to the current device, the copy's source device,"] - #[doc = " and the copy's destination device (use ::cudaMemcpy3DPeerAsync to avoid"] - #[doc = " this synchronization)."] - #[doc = ""] - #[doc = " \\param p - Parameters for the memory copy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidDevice"] - #[doc = " \\notefnerr"] - #[doc = " \\note_sync"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpyPeer, ::cudaMemcpyAsync, ::cudaMemcpyPeerAsync,"] - #[doc = " ::cudaMemcpy3DPeerAsync,"] - #[doc = " ::cuMemcpy3DPeer"] pub fn cudaMemcpy3DPeer(p: *const cudaMemcpy3DPeerParms) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies data between 3D objects"] - #[doc = ""] - #[doc = "\\code"] - #[doc = "struct cudaExtent {"] - #[doc = "size_t width;"] - #[doc = "size_t height;"] - #[doc = "size_t depth;"] - #[doc = "};"] - #[doc = "struct cudaExtent make_cudaExtent(size_t w, size_t h, size_t d);"] - #[doc = ""] - #[doc = "struct cudaPos {"] - #[doc = "size_t x;"] - #[doc = "size_t y;"] - #[doc = "size_t z;"] - #[doc = "};"] - #[doc = "struct cudaPos make_cudaPos(size_t x, size_t y, size_t z);"] - #[doc = ""] - #[doc = "struct cudaMemcpy3DParms {"] - #[doc = "cudaArray_t srcArray;"] - #[doc = "struct cudaPos srcPos;"] - #[doc = "struct cudaPitchedPtr srcPtr;"] - #[doc = "cudaArray_t dstArray;"] - #[doc = "struct cudaPos dstPos;"] - #[doc = "struct cudaPitchedPtr dstPtr;"] - #[doc = "struct cudaExtent extent;"] - #[doc = "enum cudaMemcpyKind kind;"] - #[doc = "};"] - #[doc = "\\endcode"] - #[doc = ""] - #[doc = " ::cudaMemcpy3DAsync() copies data betwen two 3D objects. The source and"] - #[doc = " destination objects may be in either host memory, device memory, or a CUDA"] - #[doc = " array. The source, destination, extent, and kind of copy performed is"] - #[doc = " specified by the ::cudaMemcpy3DParms struct which should be initialized to"] - #[doc = " zero before use:"] - #[doc = "\\code"] - #[doc = "cudaMemcpy3DParms myParms = {0};"] - #[doc = "\\endcode"] - #[doc = ""] - #[doc = " The struct passed to ::cudaMemcpy3DAsync() must specify one of \\p srcArray"] - #[doc = " or \\p srcPtr and one of \\p dstArray or \\p dstPtr. Passing more than one"] - #[doc = " non-zero source or destination will cause ::cudaMemcpy3DAsync() to return an"] - #[doc = " error."] - #[doc = ""] - #[doc = " The \\p srcPos and \\p dstPos fields are optional offsets into the source and"] - #[doc = " destination objects and are defined in units of each object's elements. The"] - #[doc = " element for a host or device pointer is assumed to be unsigned char."] - #[doc = " For CUDA arrays, positions must be in the range [0, 2048) for any"] - #[doc = " dimension."] - #[doc = ""] - #[doc = " The \\p extent field defines the dimensions of the transferred area in"] - #[doc = " elements. If a CUDA array is participating in the copy, the extent is"] - #[doc = " defined in terms of that array's elements. If no CUDA array is"] - #[doc = " participating in the copy then the extents are defined in elements of"] - #[doc = " unsigned char."] - #[doc = ""] - #[doc = " The \\p kind field defines the direction of the copy. It must be one of"] - #[doc = " ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost,"] - #[doc = " ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing"] - #[doc = " ::cudaMemcpyDefault is recommended, in which case the type of transfer is"] - #[doc = " inferred from the pointer values. However, ::cudaMemcpyDefault is only"] - #[doc = " allowed on systems that support unified virtual addressing."] - #[doc = " For ::cudaMemcpyHostToHost or ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost"] - #[doc = " passed as kind and cudaArray type passed as source or destination, if the kind"] - #[doc = " implies cudaArray type to be present on the host, ::cudaMemcpy3DAsync() will"] - #[doc = " disregard that implication and silently correct the kind based on the fact that"] - #[doc = " cudaArray type can only be present on the device."] - #[doc = ""] - #[doc = " If the source and destination are both arrays, ::cudaMemcpy3DAsync() will"] - #[doc = " return an error if they do not have the same element size."] - #[doc = ""] - #[doc = " The source and destination object may not overlap. If overlapping source"] - #[doc = " and destination objects are specified, undefined behavior will result."] - #[doc = ""] - #[doc = " The source object must lie entirely within the region defined by \\p srcPos"] - #[doc = " and \\p extent. The destination object must lie entirely within the region"] - #[doc = " defined by \\p dstPos and \\p extent."] - #[doc = ""] - #[doc = " ::cudaMemcpy3DAsync() returns an error if the pitch of \\p srcPtr or"] - #[doc = " \\p dstPtr exceeds the maximum allowed. The pitch of a"] - #[doc = " ::cudaPitchedPtr allocated with ::cudaMalloc3D() will always be valid."] - #[doc = ""] - #[doc = " ::cudaMemcpy3DAsync() is asynchronous with respect to the host, so"] - #[doc = " the call may return before the copy is complete. The copy can optionally"] - #[doc = " be associated to a stream by passing a non-zero \\p stream argument. If"] - #[doc = " \\p kind is ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost and \\p stream"] - #[doc = " is non-zero, the copy may overlap with operations in other streams."] - #[doc = ""] - #[doc = " The device version of this function only handles device to device copies and"] - #[doc = " cannot be given local or shared pointers."] - #[doc = ""] - #[doc = " \\param p - 3D memory copy parameters"] - #[doc = " \\param stream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidPitchValue,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection"] - #[doc = " \\notefnerr"] - #[doc = " \\note_async"] - #[doc = " \\note_null_stream"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMalloc3D, ::cudaMalloc3DArray, ::cudaMemset3D, ::cudaMemcpy3D,"] - #[doc = " ::cudaMemcpy, ::cudaMemcpy2D,"] - #[doc = " ::cudaMemcpy2DToArray, :::cudaMemcpy2DFromArray,"] - #[doc = " ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,"] - #[doc = " ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,"] - #[doc = " ::cudaMemcpy2DToArrayAsync,"] - #[doc = " ::cudaMemcpy2DFromArrayAsync,"] - #[doc = " ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync,"] - #[doc = " ::make_cudaExtent, ::make_cudaPos,"] - #[doc = " ::cuMemcpy3DAsync"] pub fn cudaMemcpy3DAsync( p: *const cudaMemcpy3DParms, stream: cudaStream_t, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies memory between devices asynchronously."] - #[doc = ""] - #[doc = " Perform a 3D memory copy according to the parameters specified in"] - #[doc = " \\p p. See the definition of the ::cudaMemcpy3DPeerParms structure"] - #[doc = " for documentation of its parameters."] - #[doc = ""] - #[doc = " \\param p - Parameters for the memory copy"] - #[doc = " \\param stream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidDevice"] - #[doc = " \\notefnerr"] - #[doc = " \\note_async"] - #[doc = " \\note_null_stream"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpyPeer, ::cudaMemcpyAsync, ::cudaMemcpyPeerAsync,"] - #[doc = " ::cudaMemcpy3DPeerAsync,"] - #[doc = " ::cuMemcpy3DPeerAsync"] pub fn cudaMemcpy3DPeerAsync( p: *const cudaMemcpy3DPeerParms, stream: cudaStream_t, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Gets free and total device memory"] - #[doc = ""] - #[doc = " Returns in \\p *free and \\p *total respectively, the free and total amount of"] - #[doc = " memory available for allocation by the device in bytes."] - #[doc = ""] - #[doc = " \\param free - Returned free memory in bytes"] - #[doc = " \\param total - Returned total memory in bytes"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorLaunchFailure"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuMemGetInfo"] pub fn cudaMemGetInfo(free: *mut usize, total: *mut usize) -> cudaError_t; } extern "C" { - #[doc = " \\brief Gets info about the specified cudaArray"] - #[doc = ""] - #[doc = " Returns in \\p *desc, \\p *extent and \\p *flags respectively, the type, shape"] - #[doc = " and flags of \\p array."] - #[doc = ""] - #[doc = " Any of \\p *desc, \\p *extent and \\p *flags may be specified as NULL."] - #[doc = ""] - #[doc = " \\param desc - Returned array type"] - #[doc = " \\param extent - Returned array shape. 2D arrays will have depth of zero"] - #[doc = " \\param flags - Returned array flags"] - #[doc = " \\param array - The ::cudaArray to get info for"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuArrayGetDescriptor,"] - #[doc = " ::cuArray3DGetDescriptor"] pub fn cudaArrayGetInfo( desc: *mut cudaChannelFormatDesc, extent: *mut cudaExtent, @@ -18166,45 +4360,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies data between host and device"] - #[doc = ""] - #[doc = " Copies \\p count bytes from the memory area pointed to by \\p src to the"] - #[doc = " memory area pointed to by \\p dst, where \\p kind specifies the direction"] - #[doc = " of the copy, and must be one of ::cudaMemcpyHostToHost,"] - #[doc = " ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost,"] - #[doc = " ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing"] - #[doc = " ::cudaMemcpyDefault is recommended, in which case the type of transfer is"] - #[doc = " inferred from the pointer values. However, ::cudaMemcpyDefault is only"] - #[doc = " allowed on systems that support unified virtual addressing. Calling"] - #[doc = " ::cudaMemcpy() with dst and src pointers that do not match the direction of"] - #[doc = " the copy results in an undefined behavior."] - #[doc = ""] - #[doc = " \\param dst - Destination memory address"] - #[doc = " \\param src - Source memory address"] - #[doc = " \\param count - Size in bytes to copy"] - #[doc = " \\param kind - Type of transfer"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\note_sync"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy2D,"] - #[doc = " ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray,"] - #[doc = " ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,"] - #[doc = " ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,"] - #[doc = " ::cudaMemcpy2DToArrayAsync,"] - #[doc = " ::cudaMemcpy2DFromArrayAsync,"] - #[doc = " ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync,"] - #[doc = " ::cuMemcpyDtoH,"] - #[doc = " ::cuMemcpyHtoD,"] - #[doc = " ::cuMemcpyDtoD,"] - #[doc = " ::cuMemcpy"] pub fn cudaMemcpy( dst: *mut ::std::os::raw::c_void, src: *const ::std::os::raw::c_void, @@ -18213,37 +4368,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies memory between two devices"] - #[doc = ""] - #[doc = " Copies memory from one device to memory on another device. \\p dst is the"] - #[doc = " base device pointer of the destination memory and \\p dstDevice is the"] - #[doc = " destination device. \\p src is the base device pointer of the source memory"] - #[doc = " and \\p srcDevice is the source device. \\p count specifies the number of bytes"] - #[doc = " to copy."] - #[doc = ""] - #[doc = " Note that this function is asynchronous with respect to the host, but"] - #[doc = " serialized with respect all pending and future asynchronous work in to the"] - #[doc = " current device, \\p srcDevice, and \\p dstDevice (use ::cudaMemcpyPeerAsync"] - #[doc = " to avoid this synchronization)."] - #[doc = ""] - #[doc = " \\param dst - Destination device pointer"] - #[doc = " \\param dstDevice - Destination device"] - #[doc = " \\param src - Source device pointer"] - #[doc = " \\param srcDevice - Source device"] - #[doc = " \\param count - Size of memory copy in bytes"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidDevice"] - #[doc = " \\notefnerr"] - #[doc = " \\note_sync"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpyAsync, ::cudaMemcpyPeerAsync,"] - #[doc = " ::cudaMemcpy3DPeerAsync,"] - #[doc = " ::cuMemcpyPeer"] pub fn cudaMemcpyPeer( dst: *mut ::std::os::raw::c_void, dstDevice: ::std::os::raw::c_int, @@ -18253,50 +4377,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies data between host and device"] - #[doc = ""] - #[doc = " Copies a matrix (\\p height rows of \\p width bytes each) from the memory"] - #[doc = " area pointed to by \\p src to the memory area pointed to by \\p dst, where"] - #[doc = " \\p kind specifies the direction of the copy, and must be one of"] - #[doc = " ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost,"] - #[doc = " ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing"] - #[doc = " ::cudaMemcpyDefault is recommended, in which case the type of transfer is"] - #[doc = " inferred from the pointer values. However, ::cudaMemcpyDefault is only"] - #[doc = " allowed on systems that support unified virtual addressing. \\p dpitch and"] - #[doc = " \\p spitch are the widths in memory in bytes of the 2D arrays pointed to by"] - #[doc = " \\p dst and \\p src, including any padding added to the end of each row. The"] - #[doc = " memory areas may not overlap. \\p width must not exceed either \\p dpitch or"] - #[doc = " \\p spitch. Calling ::cudaMemcpy2D() with \\p dst and \\p src pointers that do"] - #[doc = " not match the direction of the copy results in an undefined behavior."] - #[doc = " ::cudaMemcpy2D() returns an error if \\p dpitch or \\p spitch exceeds"] - #[doc = " the maximum allowed."] - #[doc = ""] - #[doc = " \\param dst - Destination memory address"] - #[doc = " \\param dpitch - Pitch of destination memory"] - #[doc = " \\param src - Source memory address"] - #[doc = " \\param spitch - Pitch of source memory"] - #[doc = " \\param width - Width of matrix transfer (columns in bytes)"] - #[doc = " \\param height - Height of matrix transfer (rows)"] - #[doc = " \\param kind - Type of transfer"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidPitchValue,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy,"] - #[doc = " ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray,"] - #[doc = " ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,"] - #[doc = " ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,"] - #[doc = " ::cudaMemcpy2DToArrayAsync,"] - #[doc = " ::cudaMemcpy2DFromArrayAsync,"] - #[doc = " ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync,"] - #[doc = " ::cuMemcpy2D,"] - #[doc = " ::cuMemcpy2DUnaligned"] pub fn cudaMemcpy2D( dst: *mut ::std::os::raw::c_void, dpitch: usize, @@ -18308,51 +4388,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies data between host and device"] - #[doc = ""] - #[doc = " Copies a matrix (\\p height rows of \\p width bytes each) from the memory"] - #[doc = " area pointed to by \\p src to the CUDA array \\p dst starting at the"] - #[doc = " upper left corner (\\p wOffset, \\p hOffset) where \\p kind specifies the"] - #[doc = " direction of the copy, and must be one of ::cudaMemcpyHostToHost,"] - #[doc = " ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost,"] - #[doc = " ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing"] - #[doc = " ::cudaMemcpyDefault is recommended, in which case the type of transfer is"] - #[doc = " inferred from the pointer values. However, ::cudaMemcpyDefault is only"] - #[doc = " allowed on systems that support unified virtual addressing."] - #[doc = " \\p spitch is the width in memory in bytes of the 2D array pointed to by"] - #[doc = " \\p src, including any padding added to the end of each row. \\p wOffset +"] - #[doc = " \\p width must not exceed the width of the CUDA array \\p dst. \\p width must"] - #[doc = " not exceed \\p spitch. ::cudaMemcpy2DToArray() returns an error if \\p spitch"] - #[doc = " exceeds the maximum allowed."] - #[doc = ""] - #[doc = " \\param dst - Destination memory address"] - #[doc = " \\param wOffset - Destination starting X offset"] - #[doc = " \\param hOffset - Destination starting Y offset"] - #[doc = " \\param src - Source memory address"] - #[doc = " \\param spitch - Pitch of source memory"] - #[doc = " \\param width - Width of matrix transfer (columns in bytes)"] - #[doc = " \\param height - Height of matrix transfer (rows)"] - #[doc = " \\param kind - Type of transfer"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidPitchValue,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection"] - #[doc = " \\notefnerr"] - #[doc = " \\note_sync"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpy2D,"] - #[doc = " ::cudaMemcpy2DFromArray,"] - #[doc = " ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,"] - #[doc = " ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,"] - #[doc = " ::cudaMemcpy2DToArrayAsync,"] - #[doc = " ::cudaMemcpy2DFromArrayAsync,"] - #[doc = " ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync,"] - #[doc = " ::cuMemcpy2D,"] - #[doc = " ::cuMemcpy2DUnaligned"] pub fn cudaMemcpy2DToArray( dst: cudaArray_t, wOffset: usize, @@ -18365,51 +4400,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies data between host and device"] - #[doc = ""] - #[doc = " Copies a matrix (\\p height rows of \\p width bytes each) from the CUDA"] - #[doc = " array \\p srcArray starting at the upper left corner"] - #[doc = " (\\p wOffset, \\p hOffset) to the memory area pointed to by \\p dst, where"] - #[doc = " \\p kind specifies the direction of the copy, and must be one of"] - #[doc = " ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost,"] - #[doc = " ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing"] - #[doc = " ::cudaMemcpyDefault is recommended, in which case the type of transfer is"] - #[doc = " inferred from the pointer values. However, ::cudaMemcpyDefault is only"] - #[doc = " allowed on systems that support unified virtual addressing. \\p dpitch is the"] - #[doc = " width in memory in bytes of the 2D array pointed to by \\p dst, including any"] - #[doc = " padding added to the end of each row. \\p wOffset + \\p width must not exceed"] - #[doc = " the width of the CUDA array \\p src. \\p width must not exceed \\p dpitch."] - #[doc = " ::cudaMemcpy2DFromArray() returns an error if \\p dpitch exceeds the maximum"] - #[doc = " allowed."] - #[doc = ""] - #[doc = " \\param dst - Destination memory address"] - #[doc = " \\param dpitch - Pitch of destination memory"] - #[doc = " \\param src - Source memory address"] - #[doc = " \\param wOffset - Source starting X offset"] - #[doc = " \\param hOffset - Source starting Y offset"] - #[doc = " \\param width - Width of matrix transfer (columns in bytes)"] - #[doc = " \\param height - Height of matrix transfer (rows)"] - #[doc = " \\param kind - Type of transfer"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidPitchValue,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection"] - #[doc = " \\notefnerr"] - #[doc = " \\note_sync"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpy2D,"] - #[doc = " ::cudaMemcpy2DToArray,"] - #[doc = " ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,"] - #[doc = " ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,"] - #[doc = " ::cudaMemcpy2DToArrayAsync,"] - #[doc = " ::cudaMemcpy2DFromArrayAsync,"] - #[doc = " ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync,"] - #[doc = " ::cuMemcpy2D,"] - #[doc = " ::cuMemcpy2DUnaligned"] pub fn cudaMemcpy2DFromArray( dst: *mut ::std::os::raw::c_void, dpitch: usize, @@ -18422,49 +4412,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies data between host and device"] - #[doc = ""] - #[doc = " Copies a matrix (\\p height rows of \\p width bytes each) from the CUDA"] - #[doc = " array \\p srcArray starting at the upper left corner"] - #[doc = " (\\p wOffsetSrc, \\p hOffsetSrc) to the CUDA array \\p dst starting at"] - #[doc = " the upper left corner (\\p wOffsetDst, \\p hOffsetDst), where \\p kind"] - #[doc = " specifies the direction of the copy, and must be one of"] - #[doc = " ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost,"] - #[doc = " ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing"] - #[doc = " ::cudaMemcpyDefault is recommended, in which case the type of transfer is"] - #[doc = " inferred from the pointer values. However, ::cudaMemcpyDefault is only"] - #[doc = " allowed on systems that support unified virtual addressing."] - #[doc = " \\p wOffsetDst + \\p width must not exceed the width of the CUDA array \\p dst."] - #[doc = " \\p wOffsetSrc + \\p width must not exceed the width of the CUDA array \\p src."] - #[doc = ""] - #[doc = " \\param dst - Destination memory address"] - #[doc = " \\param wOffsetDst - Destination starting X offset"] - #[doc = " \\param hOffsetDst - Destination starting Y offset"] - #[doc = " \\param src - Source memory address"] - #[doc = " \\param wOffsetSrc - Source starting X offset"] - #[doc = " \\param hOffsetSrc - Source starting Y offset"] - #[doc = " \\param width - Width of matrix transfer (columns in bytes)"] - #[doc = " \\param height - Height of matrix transfer (rows)"] - #[doc = " \\param kind - Type of transfer"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection"] - #[doc = " \\notefnerr"] - #[doc = " \\note_sync"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpy2D,"] - #[doc = " ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray,"] - #[doc = " ::cudaMemcpyToSymbol,"] - #[doc = " ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,"] - #[doc = " ::cudaMemcpy2DToArrayAsync,"] - #[doc = " ::cudaMemcpy2DFromArrayAsync,"] - #[doc = " ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync,"] - #[doc = " ::cuMemcpy2D,"] - #[doc = " ::cuMemcpy2DUnaligned"] pub fn cudaMemcpy2DArrayToArray( dst: cudaArray_t, wOffsetDst: usize, @@ -18478,45 +4425,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies data to the given symbol on the device"] - #[doc = ""] - #[doc = " Copies \\p count bytes from the memory area pointed to by \\p src"] - #[doc = " to the memory area pointed to by \\p offset bytes from the start of symbol"] - #[doc = " \\p symbol. The memory areas may not overlap. \\p symbol is a variable that"] - #[doc = " resides in global or constant memory space. \\p kind can be either"] - #[doc = " ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault."] - #[doc = " Passing ::cudaMemcpyDefault is recommended, in which case the type of"] - #[doc = " transfer is inferred from the pointer values. However, ::cudaMemcpyDefault"] - #[doc = " is only allowed on systems that support unified virtual addressing."] - #[doc = ""] - #[doc = " \\param symbol - Device symbol address"] - #[doc = " \\param src - Source memory address"] - #[doc = " \\param count - Size in bytes to copy"] - #[doc = " \\param offset - Offset from start of symbol in bytes"] - #[doc = " \\param kind - Type of transfer"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidSymbol,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection,"] - #[doc = " ::cudaErrorNoKernelImageForDevice"] - #[doc = " \\notefnerr"] - #[doc = " \\note_sync"] - #[doc = " \\note_string_api_deprecation"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpy2D,"] - #[doc = " ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray,"] - #[doc = " ::cudaMemcpy2DArrayToArray,"] - #[doc = " ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,"] - #[doc = " ::cudaMemcpy2DToArrayAsync,"] - #[doc = " ::cudaMemcpy2DFromArrayAsync,"] - #[doc = " ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync,"] - #[doc = " ::cuMemcpy,"] - #[doc = " ::cuMemcpyHtoD,"] - #[doc = " ::cuMemcpyDtoD"] pub fn cudaMemcpyToSymbol( symbol: *const ::std::os::raw::c_void, src: *const ::std::os::raw::c_void, @@ -18526,45 +4434,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies data from the given symbol on the device"] - #[doc = ""] - #[doc = " Copies \\p count bytes from the memory area pointed to by \\p offset bytes"] - #[doc = " from the start of symbol \\p symbol to the memory area pointed to by \\p dst."] - #[doc = " The memory areas may not overlap. \\p symbol is a variable that"] - #[doc = " resides in global or constant memory space. \\p kind can be either"] - #[doc = " ::cudaMemcpyDeviceToHost, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault."] - #[doc = " Passing ::cudaMemcpyDefault is recommended, in which case the type of"] - #[doc = " transfer is inferred from the pointer values. However, ::cudaMemcpyDefault"] - #[doc = " is only allowed on systems that support unified virtual addressing."] - #[doc = ""] - #[doc = " \\param dst - Destination memory address"] - #[doc = " \\param symbol - Device symbol address"] - #[doc = " \\param count - Size in bytes to copy"] - #[doc = " \\param offset - Offset from start of symbol in bytes"] - #[doc = " \\param kind - Type of transfer"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidSymbol,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection,"] - #[doc = " ::cudaErrorNoKernelImageForDevice"] - #[doc = " \\notefnerr"] - #[doc = " \\note_sync"] - #[doc = " \\note_string_api_deprecation"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpy2D,"] - #[doc = " ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray,"] - #[doc = " ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,"] - #[doc = " ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,"] - #[doc = " ::cudaMemcpy2DToArrayAsync,"] - #[doc = " ::cudaMemcpy2DFromArrayAsync,"] - #[doc = " ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync,"] - #[doc = " ::cuMemcpy,"] - #[doc = " ::cuMemcpyDtoH,"] - #[doc = " ::cuMemcpyDtoD"] pub fn cudaMemcpyFromSymbol( dst: *mut ::std::os::raw::c_void, symbol: *const ::std::os::raw::c_void, @@ -18574,57 +4443,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies data between host and device"] - #[doc = ""] - #[doc = " Copies \\p count bytes from the memory area pointed to by \\p src to the"] - #[doc = " memory area pointed to by \\p dst, where \\p kind specifies the"] - #[doc = " direction of the copy, and must be one of ::cudaMemcpyHostToHost,"] - #[doc = " ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost,"] - #[doc = " ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing"] - #[doc = " ::cudaMemcpyDefault is recommended, in which case the type of transfer is"] - #[doc = " inferred from the pointer values. However, ::cudaMemcpyDefault is only"] - #[doc = " allowed on systems that support unified virtual addressing."] - #[doc = ""] - #[doc = " The memory areas may not overlap. Calling ::cudaMemcpyAsync() with \\p dst and"] - #[doc = " \\p src pointers that do not match the direction of the copy results in an"] - #[doc = " undefined behavior."] - #[doc = ""] - #[doc = " ::cudaMemcpyAsync() is asynchronous with respect to the host, so the call"] - #[doc = " may return before the copy is complete. The copy can optionally be"] - #[doc = " associated to a stream by passing a non-zero \\p stream argument. If \\p kind"] - #[doc = " is ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost and the \\p stream is"] - #[doc = " non-zero, the copy may overlap with operations in other streams."] - #[doc = ""] - #[doc = " The device version of this function only handles device to device copies and"] - #[doc = " cannot be given local or shared pointers."] - #[doc = ""] - #[doc = " \\param dst - Destination memory address"] - #[doc = " \\param src - Source memory address"] - #[doc = " \\param count - Size in bytes to copy"] - #[doc = " \\param kind - Type of transfer"] - #[doc = " \\param stream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection"] - #[doc = " \\notefnerr"] - #[doc = " \\note_async"] - #[doc = " \\note_null_stream"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpy2D,"] - #[doc = " ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray,"] - #[doc = " ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,"] - #[doc = " ::cudaMemcpyFromSymbol, ::cudaMemcpy2DAsync,"] - #[doc = " ::cudaMemcpy2DToArrayAsync,"] - #[doc = " ::cudaMemcpy2DFromArrayAsync,"] - #[doc = " ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync"] - #[doc = " ::cuMemcpyAsync,"] - #[doc = " ::cuMemcpyDtoHAsync,"] - #[doc = " ::cuMemcpyHtoDAsync,"] - #[doc = " ::cuMemcpyDtoDAsync"] pub fn cudaMemcpyAsync( dst: *mut ::std::os::raw::c_void, src: *const ::std::os::raw::c_void, @@ -18634,37 +4452,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies memory between two devices asynchronously."] - #[doc = ""] - #[doc = " Copies memory from one device to memory on another device. \\p dst is the"] - #[doc = " base device pointer of the destination memory and \\p dstDevice is the"] - #[doc = " destination device. \\p src is the base device pointer of the source memory"] - #[doc = " and \\p srcDevice is the source device. \\p count specifies the number of bytes"] - #[doc = " to copy."] - #[doc = ""] - #[doc = " Note that this function is asynchronous with respect to the host and all work"] - #[doc = " on other devices."] - #[doc = ""] - #[doc = " \\param dst - Destination device pointer"] - #[doc = " \\param dstDevice - Destination device"] - #[doc = " \\param src - Source device pointer"] - #[doc = " \\param srcDevice - Source device"] - #[doc = " \\param count - Size of memory copy in bytes"] - #[doc = " \\param stream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidDevice"] - #[doc = " \\notefnerr"] - #[doc = " \\note_async"] - #[doc = " \\note_null_stream"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpyPeer, ::cudaMemcpyAsync,"] - #[doc = " ::cudaMemcpy3DPeerAsync,"] - #[doc = " ::cuMemcpyPeerAsync"] pub fn cudaMemcpyPeerAsync( dst: *mut ::std::os::raw::c_void, dstDevice: ::std::os::raw::c_int, @@ -18675,64 +4462,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies data between host and device"] - #[doc = ""] - #[doc = " Copies a matrix (\\p height rows of \\p width bytes each) from the memory"] - #[doc = " area pointed to by \\p src to the memory area pointed to by \\p dst, where"] - #[doc = " \\p kind specifies the direction of the copy, and must be one of"] - #[doc = " ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost,"] - #[doc = " ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing"] - #[doc = " ::cudaMemcpyDefault is recommended, in which case the type of transfer is"] - #[doc = " inferred from the pointer values. However, ::cudaMemcpyDefault is only"] - #[doc = " allowed on systems that support unified virtual addressing."] - #[doc = " \\p dpitch and \\p spitch are the widths in memory in bytes of the 2D arrays"] - #[doc = " pointed to by \\p dst and \\p src, including any padding added to the end of"] - #[doc = " each row. The memory areas may not overlap. \\p width must not exceed either"] - #[doc = " \\p dpitch or \\p spitch."] - #[doc = ""] - #[doc = " Calling ::cudaMemcpy2DAsync() with \\p dst and \\p src pointers that do not"] - #[doc = " match the direction of the copy results in an undefined behavior."] - #[doc = " ::cudaMemcpy2DAsync() returns an error if \\p dpitch or \\p spitch is greater"] - #[doc = " than the maximum allowed."] - #[doc = ""] - #[doc = " ::cudaMemcpy2DAsync() is asynchronous with respect to the host, so"] - #[doc = " the call may return before the copy is complete. The copy can optionally"] - #[doc = " be associated to a stream by passing a non-zero \\p stream argument. If"] - #[doc = " \\p kind is ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost and"] - #[doc = " \\p stream is non-zero, the copy may overlap with operations in other"] - #[doc = " streams."] - #[doc = ""] - #[doc = " The device version of this function only handles device to device copies and"] - #[doc = " cannot be given local or shared pointers."] - #[doc = ""] - #[doc = " \\param dst - Destination memory address"] - #[doc = " \\param dpitch - Pitch of destination memory"] - #[doc = " \\param src - Source memory address"] - #[doc = " \\param spitch - Pitch of source memory"] - #[doc = " \\param width - Width of matrix transfer (columns in bytes)"] - #[doc = " \\param height - Height of matrix transfer (rows)"] - #[doc = " \\param kind - Type of transfer"] - #[doc = " \\param stream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidPitchValue,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection"] - #[doc = " \\notefnerr"] - #[doc = " \\note_async"] - #[doc = " \\note_null_stream"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpy2D,"] - #[doc = " ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray,"] - #[doc = " ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,"] - #[doc = " ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync,"] - #[doc = " ::cudaMemcpy2DToArrayAsync,"] - #[doc = " ::cudaMemcpy2DFromArrayAsync,"] - #[doc = " ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync,"] - #[doc = " ::cuMemcpy2DAsync"] pub fn cudaMemcpy2DAsync( dst: *mut ::std::os::raw::c_void, dpitch: usize, @@ -18745,59 +4474,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies data between host and device"] - #[doc = ""] - #[doc = " Copies a matrix (\\p height rows of \\p width bytes each) from the memory"] - #[doc = " area pointed to by \\p src to the CUDA array \\p dst starting at the"] - #[doc = " upper left corner (\\p wOffset, \\p hOffset) where \\p kind specifies the"] - #[doc = " direction of the copy, and must be one of ::cudaMemcpyHostToHost,"] - #[doc = " ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost,"] - #[doc = " ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing"] - #[doc = " ::cudaMemcpyDefault is recommended, in which case the type of transfer is"] - #[doc = " inferred from the pointer values. However, ::cudaMemcpyDefault is only"] - #[doc = " allowed on systems that support unified virtual addressing."] - #[doc = " \\p spitch is the width in memory in bytes of the 2D array pointed to by"] - #[doc = " \\p src, including any padding added to the end of each row. \\p wOffset +"] - #[doc = " \\p width must not exceed the width of the CUDA array \\p dst. \\p width must"] - #[doc = " not exceed \\p spitch. ::cudaMemcpy2DToArrayAsync() returns an error if"] - #[doc = " \\p spitch exceeds the maximum allowed."] - #[doc = ""] - #[doc = " ::cudaMemcpy2DToArrayAsync() is asynchronous with respect to the host, so"] - #[doc = " the call may return before the copy is complete. The copy can optionally"] - #[doc = " be associated to a stream by passing a non-zero \\p stream argument. If"] - #[doc = " \\p kind is ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost and"] - #[doc = " \\p stream is non-zero, the copy may overlap with operations in other"] - #[doc = " streams."] - #[doc = ""] - #[doc = " \\param dst - Destination memory address"] - #[doc = " \\param wOffset - Destination starting X offset"] - #[doc = " \\param hOffset - Destination starting Y offset"] - #[doc = " \\param src - Source memory address"] - #[doc = " \\param spitch - Pitch of source memory"] - #[doc = " \\param width - Width of matrix transfer (columns in bytes)"] - #[doc = " \\param height - Height of matrix transfer (rows)"] - #[doc = " \\param kind - Type of transfer"] - #[doc = " \\param stream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidPitchValue,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection"] - #[doc = " \\notefnerr"] - #[doc = " \\note_async"] - #[doc = " \\note_null_stream"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpy2D,"] - #[doc = " ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray,"] - #[doc = " ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,"] - #[doc = " ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,"] - #[doc = ""] - #[doc = " ::cudaMemcpy2DFromArrayAsync,"] - #[doc = " ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync,"] - #[doc = " ::cuMemcpy2DAsync"] pub fn cudaMemcpy2DToArrayAsync( dst: cudaArray_t, wOffset: usize, @@ -18811,58 +4487,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies data between host and device"] - #[doc = ""] - #[doc = " Copies a matrix (\\p height rows of \\p width bytes each) from the CUDA"] - #[doc = " array \\p srcArray starting at the upper left corner"] - #[doc = " (\\p wOffset, \\p hOffset) to the memory area pointed to by \\p dst, where"] - #[doc = " \\p kind specifies the direction of the copy, and must be one of"] - #[doc = " ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost,"] - #[doc = " ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing"] - #[doc = " ::cudaMemcpyDefault is recommended, in which case the type of transfer is"] - #[doc = " inferred from the pointer values. However, ::cudaMemcpyDefault is only"] - #[doc = " allowed on systems that support unified virtual addressing."] - #[doc = " \\p dpitch is the width in memory in bytes of the 2D"] - #[doc = " array pointed to by \\p dst, including any padding added to the end of each"] - #[doc = " row. \\p wOffset + \\p width must not exceed the width of the CUDA array"] - #[doc = " \\p src. \\p width must not exceed \\p dpitch. ::cudaMemcpy2DFromArrayAsync()"] - #[doc = " returns an error if \\p dpitch exceeds the maximum allowed."] - #[doc = ""] - #[doc = " ::cudaMemcpy2DFromArrayAsync() is asynchronous with respect to the host, so"] - #[doc = " the call may return before the copy is complete. The copy can optionally be"] - #[doc = " associated to a stream by passing a non-zero \\p stream argument. If \\p kind"] - #[doc = " is ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost and \\p stream is"] - #[doc = " non-zero, the copy may overlap with operations in other streams."] - #[doc = ""] - #[doc = " \\param dst - Destination memory address"] - #[doc = " \\param dpitch - Pitch of destination memory"] - #[doc = " \\param src - Source memory address"] - #[doc = " \\param wOffset - Source starting X offset"] - #[doc = " \\param hOffset - Source starting Y offset"] - #[doc = " \\param width - Width of matrix transfer (columns in bytes)"] - #[doc = " \\param height - Height of matrix transfer (rows)"] - #[doc = " \\param kind - Type of transfer"] - #[doc = " \\param stream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidPitchValue,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection"] - #[doc = " \\notefnerr"] - #[doc = " \\note_async"] - #[doc = " \\note_null_stream"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpy2D,"] - #[doc = " ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray,"] - #[doc = " ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,"] - #[doc = " ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,"] - #[doc = " ::cudaMemcpy2DToArrayAsync,"] - #[doc = ""] - #[doc = " ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync,"] - #[doc = " ::cuMemcpy2DAsync"] pub fn cudaMemcpy2DFromArrayAsync( dst: *mut ::std::os::raw::c_void, dpitch: usize, @@ -18876,53 +4500,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies data to the given symbol on the device"] - #[doc = ""] - #[doc = " Copies \\p count bytes from the memory area pointed to by \\p src"] - #[doc = " to the memory area pointed to by \\p offset bytes from the start of symbol"] - #[doc = " \\p symbol. The memory areas may not overlap. \\p symbol is a variable that"] - #[doc = " resides in global or constant memory space. \\p kind can be either"] - #[doc = " ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault."] - #[doc = " Passing ::cudaMemcpyDefault is recommended, in which case the type of transfer"] - #[doc = " is inferred from the pointer values. However, ::cudaMemcpyDefault is only"] - #[doc = " allowed on systems that support unified virtual addressing."] - #[doc = ""] - #[doc = " ::cudaMemcpyToSymbolAsync() is asynchronous with respect to the host, so"] - #[doc = " the call may return before the copy is complete. The copy can optionally"] - #[doc = " be associated to a stream by passing a non-zero \\p stream argument. If"] - #[doc = " \\p kind is ::cudaMemcpyHostToDevice and \\p stream is non-zero, the copy"] - #[doc = " may overlap with operations in other streams."] - #[doc = ""] - #[doc = " \\param symbol - Device symbol address"] - #[doc = " \\param src - Source memory address"] - #[doc = " \\param count - Size in bytes to copy"] - #[doc = " \\param offset - Offset from start of symbol in bytes"] - #[doc = " \\param kind - Type of transfer"] - #[doc = " \\param stream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidSymbol,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection,"] - #[doc = " ::cudaErrorNoKernelImageForDevice"] - #[doc = " \\notefnerr"] - #[doc = " \\note_async"] - #[doc = " \\note_null_stream"] - #[doc = " \\note_string_api_deprecation"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpy2D,"] - #[doc = " ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray,"] - #[doc = " ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,"] - #[doc = " ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,"] - #[doc = " ::cudaMemcpy2DToArrayAsync,"] - #[doc = " ::cudaMemcpy2DFromArrayAsync,"] - #[doc = " ::cudaMemcpyFromSymbolAsync,"] - #[doc = " ::cuMemcpyAsync,"] - #[doc = " ::cuMemcpyHtoDAsync,"] - #[doc = " ::cuMemcpyDtoDAsync"] pub fn cudaMemcpyToSymbolAsync( symbol: *const ::std::os::raw::c_void, src: *const ::std::os::raw::c_void, @@ -18933,53 +4510,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies data from the given symbol on the device"] - #[doc = ""] - #[doc = " Copies \\p count bytes from the memory area pointed to by \\p offset bytes"] - #[doc = " from the start of symbol \\p symbol to the memory area pointed to by \\p dst."] - #[doc = " The memory areas may not overlap. \\p symbol is a variable that resides in"] - #[doc = " global or constant memory space. \\p kind can be either"] - #[doc = " ::cudaMemcpyDeviceToHost, ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault."] - #[doc = " Passing ::cudaMemcpyDefault is recommended, in which case the type of transfer"] - #[doc = " is inferred from the pointer values. However, ::cudaMemcpyDefault is only"] - #[doc = " allowed on systems that support unified virtual addressing."] - #[doc = ""] - #[doc = " ::cudaMemcpyFromSymbolAsync() is asynchronous with respect to the host, so"] - #[doc = " the call may return before the copy is complete. The copy can optionally be"] - #[doc = " associated to a stream by passing a non-zero \\p stream argument. If \\p kind"] - #[doc = " is ::cudaMemcpyDeviceToHost and \\p stream is non-zero, the copy may overlap"] - #[doc = " with operations in other streams."] - #[doc = ""] - #[doc = " \\param dst - Destination memory address"] - #[doc = " \\param symbol - Device symbol address"] - #[doc = " \\param count - Size in bytes to copy"] - #[doc = " \\param offset - Offset from start of symbol in bytes"] - #[doc = " \\param kind - Type of transfer"] - #[doc = " \\param stream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidSymbol,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection,"] - #[doc = " ::cudaErrorNoKernelImageForDevice"] - #[doc = " \\notefnerr"] - #[doc = " \\note_async"] - #[doc = " \\note_null_stream"] - #[doc = " \\note_string_api_deprecation"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpy2D,"] - #[doc = " ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray,"] - #[doc = " ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,"] - #[doc = " ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,"] - #[doc = " ::cudaMemcpy2DToArrayAsync,"] - #[doc = " ::cudaMemcpy2DFromArrayAsync,"] - #[doc = " ::cudaMemcpyToSymbolAsync,"] - #[doc = " ::cuMemcpyAsync,"] - #[doc = " ::cuMemcpyDtoHAsync,"] - #[doc = " ::cuMemcpyDtoDAsync"] pub fn cudaMemcpyFromSymbolAsync( dst: *mut ::std::os::raw::c_void, symbol: *const ::std::os::raw::c_void, @@ -18990,30 +4520,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Initializes or sets device memory to a value"] - #[doc = ""] - #[doc = " Fills the first \\p count bytes of the memory area pointed to by \\p devPtr"] - #[doc = " with the constant byte value \\p value."] - #[doc = ""] - #[doc = " Note that this function is asynchronous with respect to the host unless"] - #[doc = " \\p devPtr refers to pinned host memory."] - #[doc = ""] - #[doc = " \\param devPtr - Pointer to device memory"] - #[doc = " \\param value - Value to set for each byte of specified memory"] - #[doc = " \\param count - Size in bytes to set"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " \\notefnerr"] - #[doc = " \\note_memset"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cuMemsetD8,"] - #[doc = " ::cuMemsetD16,"] - #[doc = " ::cuMemsetD32"] pub fn cudaMemset( devPtr: *mut ::std::os::raw::c_void, value: ::std::os::raw::c_int, @@ -19021,36 +4527,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Initializes or sets device memory to a value"] - #[doc = ""] - #[doc = " Sets to the specified value \\p value a matrix (\\p height rows of \\p width"] - #[doc = " bytes each) pointed to by \\p dstPtr. \\p pitch is the width in bytes of the"] - #[doc = " 2D array pointed to by \\p dstPtr, including any padding added to the end"] - #[doc = " of each row. This function performs fastest when the pitch is one that has"] - #[doc = " been passed back by ::cudaMallocPitch()."] - #[doc = ""] - #[doc = " Note that this function is asynchronous with respect to the host unless"] - #[doc = " \\p devPtr refers to pinned host memory."] - #[doc = ""] - #[doc = " \\param devPtr - Pointer to 2D device memory"] - #[doc = " \\param pitch - Pitch in bytes of 2D device memory"] - #[doc = " \\param value - Value to set for each byte of specified memory"] - #[doc = " \\param width - Width of matrix set (columns in bytes)"] - #[doc = " \\param height - Height of matrix set (rows)"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " \\notefnerr"] - #[doc = " \\note_memset"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemset, ::cudaMemset3D, ::cudaMemsetAsync,"] - #[doc = " ::cudaMemset2DAsync, ::cudaMemset3DAsync,"] - #[doc = " ::cuMemsetD2D8,"] - #[doc = " ::cuMemsetD2D16,"] - #[doc = " ::cuMemsetD2D32"] pub fn cudaMemset2D( devPtr: *mut ::std::os::raw::c_void, pitch: usize, @@ -19060,46 +4536,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Initializes or sets device memory to a value"] - #[doc = ""] - #[doc = " Initializes each element of a 3D array to the specified value \\p value."] - #[doc = " The object to initialize is defined by \\p pitchedDevPtr. The \\p pitch field"] - #[doc = " of \\p pitchedDevPtr is the width in memory in bytes of the 3D array pointed"] - #[doc = " to by \\p pitchedDevPtr, including any padding added to the end of each row."] - #[doc = " The \\p xsize field specifies the logical width of each row in bytes, while"] - #[doc = " the \\p ysize field specifies the height of each 2D slice in rows."] - #[doc = ""] - #[doc = " The extents of the initialized region are specified as a \\p width in bytes,"] - #[doc = " a \\p height in rows, and a \\p depth in slices."] - #[doc = ""] - #[doc = " Extents with \\p width greater than or equal to the \\p xsize of"] - #[doc = " \\p pitchedDevPtr may perform significantly faster than extents narrower"] - #[doc = " than the \\p xsize. Secondarily, extents with \\p height equal to the"] - #[doc = " \\p ysize of \\p pitchedDevPtr will perform faster than when the \\p height is"] - #[doc = " shorter than the \\p ysize."] - #[doc = ""] - #[doc = " This function performs fastest when the \\p pitchedDevPtr has been allocated"] - #[doc = " by ::cudaMalloc3D()."] - #[doc = ""] - #[doc = " Note that this function is asynchronous with respect to the host unless"] - #[doc = " \\p pitchedDevPtr refers to pinned host memory."] - #[doc = ""] - #[doc = " \\param pitchedDevPtr - Pointer to pitched device memory"] - #[doc = " \\param value - Value to set for each byte of specified memory"] - #[doc = " \\param extent - Size parameters for where to set device memory (\\p width field in bytes)"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " \\notefnerr"] - #[doc = " \\note_memset"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemset, ::cudaMemset2D,"] - #[doc = " ::cudaMemsetAsync, ::cudaMemset2DAsync, ::cudaMemset3DAsync,"] - #[doc = " ::cudaMalloc3D, ::make_cudaPitchedPtr,"] - #[doc = " ::make_cudaExtent"] pub fn cudaMemset3D( pitchedDevPtr: cudaPitchedPtr, value: ::std::os::raw::c_int, @@ -19107,38 +4543,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Initializes or sets device memory to a value"] - #[doc = ""] - #[doc = " Fills the first \\p count bytes of the memory area pointed to by \\p devPtr"] - #[doc = " with the constant byte value \\p value."] - #[doc = ""] - #[doc = " ::cudaMemsetAsync() is asynchronous with respect to the host, so"] - #[doc = " the call may return before the memset is complete. The operation can optionally"] - #[doc = " be associated to a stream by passing a non-zero \\p stream argument."] - #[doc = " If \\p stream is non-zero, the operation may overlap with operations in other streams."] - #[doc = ""] - #[doc = " The device version of this function only handles device to device copies and"] - #[doc = " cannot be given local or shared pointers."] - #[doc = ""] - #[doc = " \\param devPtr - Pointer to device memory"] - #[doc = " \\param value - Value to set for each byte of specified memory"] - #[doc = " \\param count - Size in bytes to set"] - #[doc = " \\param stream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " \\notefnerr"] - #[doc = " \\note_memset"] - #[doc = " \\note_null_stream"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemset, ::cudaMemset2D, ::cudaMemset3D,"] - #[doc = " ::cudaMemset2DAsync, ::cudaMemset3DAsync,"] - #[doc = " ::cuMemsetD8Async,"] - #[doc = " ::cuMemsetD16Async,"] - #[doc = " ::cuMemsetD32Async"] pub fn cudaMemsetAsync( devPtr: *mut ::std::os::raw::c_void, value: ::std::os::raw::c_int, @@ -19147,43 +4551,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Initializes or sets device memory to a value"] - #[doc = ""] - #[doc = " Sets to the specified value \\p value a matrix (\\p height rows of \\p width"] - #[doc = " bytes each) pointed to by \\p dstPtr. \\p pitch is the width in bytes of the"] - #[doc = " 2D array pointed to by \\p dstPtr, including any padding added to the end"] - #[doc = " of each row. This function performs fastest when the pitch is one that has"] - #[doc = " been passed back by ::cudaMallocPitch()."] - #[doc = ""] - #[doc = " ::cudaMemset2DAsync() is asynchronous with respect to the host, so"] - #[doc = " the call may return before the memset is complete. The operation can optionally"] - #[doc = " be associated to a stream by passing a non-zero \\p stream argument."] - #[doc = " If \\p stream is non-zero, the operation may overlap with operations in other streams."] - #[doc = ""] - #[doc = " The device version of this function only handles device to device copies and"] - #[doc = " cannot be given local or shared pointers."] - #[doc = ""] - #[doc = " \\param devPtr - Pointer to 2D device memory"] - #[doc = " \\param pitch - Pitch in bytes of 2D device memory"] - #[doc = " \\param value - Value to set for each byte of specified memory"] - #[doc = " \\param width - Width of matrix set (columns in bytes)"] - #[doc = " \\param height - Height of matrix set (rows)"] - #[doc = " \\param stream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " \\notefnerr"] - #[doc = " \\note_memset"] - #[doc = " \\note_null_stream"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemset, ::cudaMemset2D, ::cudaMemset3D,"] - #[doc = " ::cudaMemsetAsync, ::cudaMemset3DAsync,"] - #[doc = " ::cuMemsetD2D8Async,"] - #[doc = " ::cuMemsetD2D16Async,"] - #[doc = " ::cuMemsetD2D32Async"] pub fn cudaMemset2DAsync( devPtr: *mut ::std::os::raw::c_void, pitch: usize, @@ -19194,53 +4561,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Initializes or sets device memory to a value"] - #[doc = ""] - #[doc = " Initializes each element of a 3D array to the specified value \\p value."] - #[doc = " The object to initialize is defined by \\p pitchedDevPtr. The \\p pitch field"] - #[doc = " of \\p pitchedDevPtr is the width in memory in bytes of the 3D array pointed"] - #[doc = " to by \\p pitchedDevPtr, including any padding added to the end of each row."] - #[doc = " The \\p xsize field specifies the logical width of each row in bytes, while"] - #[doc = " the \\p ysize field specifies the height of each 2D slice in rows."] - #[doc = ""] - #[doc = " The extents of the initialized region are specified as a \\p width in bytes,"] - #[doc = " a \\p height in rows, and a \\p depth in slices."] - #[doc = ""] - #[doc = " Extents with \\p width greater than or equal to the \\p xsize of"] - #[doc = " \\p pitchedDevPtr may perform significantly faster than extents narrower"] - #[doc = " than the \\p xsize. Secondarily, extents with \\p height equal to the"] - #[doc = " \\p ysize of \\p pitchedDevPtr will perform faster than when the \\p height is"] - #[doc = " shorter than the \\p ysize."] - #[doc = ""] - #[doc = " This function performs fastest when the \\p pitchedDevPtr has been allocated"] - #[doc = " by ::cudaMalloc3D()."] - #[doc = ""] - #[doc = " ::cudaMemset3DAsync() is asynchronous with respect to the host, so"] - #[doc = " the call may return before the memset is complete. The operation can optionally"] - #[doc = " be associated to a stream by passing a non-zero \\p stream argument."] - #[doc = " If \\p stream is non-zero, the operation may overlap with operations in other streams."] - #[doc = ""] - #[doc = " The device version of this function only handles device to device copies and"] - #[doc = " cannot be given local or shared pointers."] - #[doc = ""] - #[doc = " \\param pitchedDevPtr - Pointer to pitched device memory"] - #[doc = " \\param value - Value to set for each byte of specified memory"] - #[doc = " \\param extent - Size parameters for where to set device memory (\\p width field in bytes)"] - #[doc = " \\param stream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " \\notefnerr"] - #[doc = " \\note_memset"] - #[doc = " \\note_null_stream"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemset, ::cudaMemset2D, ::cudaMemset3D,"] - #[doc = " ::cudaMemsetAsync, ::cudaMemset2DAsync,"] - #[doc = " ::cudaMalloc3D, ::make_cudaPitchedPtr,"] - #[doc = " ::make_cudaExtent"] pub fn cudaMemset3DAsync( pitchedDevPtr: cudaPitchedPtr, value: ::std::os::raw::c_int, @@ -19249,131 +4569,18 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Finds the address associated with a CUDA symbol"] - #[doc = ""] - #[doc = " Returns in \\p *devPtr the address of symbol \\p symbol on the device."] - #[doc = " \\p symbol is a variable that resides in global or constant memory space."] - #[doc = " If \\p symbol cannot be found, or if \\p symbol is not declared in the"] - #[doc = " global or constant memory space, \\p *devPtr is unchanged and the error"] - #[doc = " ::cudaErrorInvalidSymbol is returned."] - #[doc = ""] - #[doc = " \\param devPtr - Return device pointer associated with symbol"] - #[doc = " \\param symbol - Device symbol address"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidSymbol,"] - #[doc = " ::cudaErrorNoKernelImageForDevice"] - #[doc = " \\notefnerr"] - #[doc = " \\note_string_api_deprecation"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " \\ref ::cudaGetSymbolAddress(void**, const T&) \"cudaGetSymbolAddress (C++ API)\","] - #[doc = " \\ref ::cudaGetSymbolSize(size_t*, const void*) \"cudaGetSymbolSize (C API)\","] - #[doc = " ::cuModuleGetGlobal"] pub fn cudaGetSymbolAddress( devPtr: *mut *mut ::std::os::raw::c_void, symbol: *const ::std::os::raw::c_void, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Finds the size of the object associated with a CUDA symbol"] - #[doc = ""] - #[doc = " Returns in \\p *size the size of symbol \\p symbol. \\p symbol is a variable that"] - #[doc = " resides in global or constant memory space. If \\p symbol cannot be found, or"] - #[doc = " if \\p symbol is not declared in global or constant memory space, \\p *size is"] - #[doc = " unchanged and the error ::cudaErrorInvalidSymbol is returned."] - #[doc = ""] - #[doc = " \\param size - Size of object associated with symbol"] - #[doc = " \\param symbol - Device symbol address"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidSymbol,"] - #[doc = " ::cudaErrorNoKernelImageForDevice"] - #[doc = " \\notefnerr"] - #[doc = " \\note_string_api_deprecation"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " \\ref ::cudaGetSymbolAddress(void**, const void*) \"cudaGetSymbolAddress (C API)\","] - #[doc = " \\ref ::cudaGetSymbolSize(size_t*, const T&) \"cudaGetSymbolSize (C++ API)\","] - #[doc = " ::cuModuleGetGlobal"] pub fn cudaGetSymbolSize( size: *mut usize, symbol: *const ::std::os::raw::c_void, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Prefetches memory to the specified destination device"] - #[doc = ""] - #[doc = " Prefetches memory to the specified destination device. \\p devPtr is the"] - #[doc = " base device pointer of the memory to be prefetched and \\p dstDevice is the"] - #[doc = " destination device. \\p count specifies the number of bytes to copy. \\p stream"] - #[doc = " is the stream in which the operation is enqueued. The memory range must refer"] - #[doc = " to managed memory allocated via ::cudaMallocManaged or declared via __managed__ variables."] - #[doc = ""] - #[doc = " Passing in cudaCpuDeviceId for \\p dstDevice will prefetch the data to host memory. If"] - #[doc = " \\p dstDevice is a GPU, then the device attribute ::cudaDevAttrConcurrentManagedAccess"] - #[doc = " must be non-zero. Additionally, \\p stream must be associated with a device that has a"] - #[doc = " non-zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess."] - #[doc = ""] - #[doc = " The start address and end address of the memory range will be rounded down and rounded up"] - #[doc = " respectively to be aligned to CPU page size before the prefetch operation is enqueued"] - #[doc = " in the stream."] - #[doc = ""] - #[doc = " If no physical memory has been allocated for this region, then this memory region"] - #[doc = " will be populated and mapped on the destination device. If there's insufficient"] - #[doc = " memory to prefetch the desired region, the Unified Memory driver may evict pages from other"] - #[doc = " ::cudaMallocManaged allocations to host memory in order to make room. Device memory"] - #[doc = " allocated using ::cudaMalloc or ::cudaMallocArray will not be evicted."] - #[doc = ""] - #[doc = " By default, any mappings to the previous location of the migrated pages are removed and"] - #[doc = " mappings for the new location are only setup on \\p dstDevice. The exact behavior however"] - #[doc = " also depends on the settings applied to this memory range via ::cudaMemAdvise as described"] - #[doc = " below:"] - #[doc = ""] - #[doc = " If ::cudaMemAdviseSetReadMostly was set on any subset of this memory range,"] - #[doc = " then that subset will create a read-only copy of the pages on \\p dstDevice."] - #[doc = ""] - #[doc = " If ::cudaMemAdviseSetPreferredLocation was called on any subset of this memory"] - #[doc = " range, then the pages will be migrated to \\p dstDevice even if \\p dstDevice is not the"] - #[doc = " preferred location of any pages in the memory range."] - #[doc = ""] - #[doc = " If ::cudaMemAdviseSetAccessedBy was called on any subset of this memory range,"] - #[doc = " then mappings to those pages from all the appropriate processors are updated to"] - #[doc = " refer to the new location if establishing such a mapping is possible. Otherwise,"] - #[doc = " those mappings are cleared."] - #[doc = ""] - #[doc = " Note that this API is not required for functionality and only serves to improve performance"] - #[doc = " by allowing the application to migrate data to a suitable location before it is accessed."] - #[doc = " Memory accesses to this range are always coherent and are allowed even when the data is"] - #[doc = " actively being migrated."] - #[doc = ""] - #[doc = " Note that this function is asynchronous with respect to the host and all work"] - #[doc = " on other devices."] - #[doc = ""] - #[doc = " \\param devPtr - Pointer to be prefetched"] - #[doc = " \\param count - Size in bytes"] - #[doc = " \\param dstDevice - Destination device to prefetch to"] - #[doc = " \\param stream - Stream to enqueue prefetch operation"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidDevice"] - #[doc = " \\notefnerr"] - #[doc = " \\note_async"] - #[doc = " \\note_null_stream"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpyPeer, ::cudaMemcpyAsync,"] - #[doc = " ::cudaMemcpy3DPeerAsync, ::cudaMemAdvise,"] - #[doc = " ::cuMemPrefetchAsync"] pub fn cudaMemPrefetchAsync( devPtr: *const ::std::os::raw::c_void, count: usize, @@ -19382,118 +4589,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Advise about the usage of a given memory range"] - #[doc = ""] - #[doc = " Advise the Unified Memory subsystem about the usage pattern for the memory range"] - #[doc = " starting at \\p devPtr with a size of \\p count bytes. The start address and end address of the memory"] - #[doc = " range will be rounded down and rounded up respectively to be aligned to CPU page size before the"] - #[doc = " advice is applied. The memory range must refer to managed memory allocated via ::cudaMallocManaged"] - #[doc = " or declared via __managed__ variables. The memory range could also refer to system-allocated pageable"] - #[doc = " memory provided it represents a valid, host-accessible region of memory and all additional constraints"] - #[doc = " imposed by \\p advice as outlined below are also satisfied. Specifying an invalid system-allocated pageable"] - #[doc = " memory range results in an error being returned."] - #[doc = ""] - #[doc = " The \\p advice parameter can take the following values:"] - #[doc = " - ::cudaMemAdviseSetReadMostly: This implies that the data is mostly going to be read"] - #[doc = " from and only occasionally written to. Any read accesses from any processor to this region will create a"] - #[doc = " read-only copy of at least the accessed pages in that processor's memory. Additionally, if ::cudaMemPrefetchAsync"] - #[doc = " is called on this region, it will create a read-only copy of the data on the destination processor."] - #[doc = " If any processor writes to this region, all copies of the corresponding page will be invalidated"] - #[doc = " except for the one where the write occurred. The \\p device argument is ignored for this advice."] - #[doc = " Note that for a page to be read-duplicated, the accessing processor must either be the CPU or a GPU"] - #[doc = " that has a non-zero value for the device attribute ::cudaDevAttrConcurrentManagedAccess."] - #[doc = " Also, if a context is created on a device that does not have the device attribute"] - #[doc = " ::cudaDevAttrConcurrentManagedAccess set, then read-duplication will not occur until"] - #[doc = " all such contexts are destroyed."] - #[doc = " If the memory region refers to valid system-allocated pageable memory, then the accessing device must"] - #[doc = " have a non-zero value for the device attribute ::cudaDevAttrPageableMemoryAccess for a read-only"] - #[doc = " copy to be created on that device. Note however that if the accessing device also has a non-zero value for the"] - #[doc = " device attribute ::cudaDevAttrPageableMemoryAccessUsesHostPageTables, then setting this advice"] - #[doc = " will not create a read-only copy when that device accesses this memory region."] - #[doc = ""] - #[doc = " - ::cudaMemAdviceUnsetReadMostly: Undoes the effect of ::cudaMemAdviceReadMostly and also prevents the"] - #[doc = " Unified Memory driver from attempting heuristic read-duplication on the memory range. Any read-duplicated"] - #[doc = " copies of the data will be collapsed into a single copy. The location for the collapsed"] - #[doc = " copy will be the preferred location if the page has a preferred location and one of the read-duplicated"] - #[doc = " copies was resident at that location. Otherwise, the location chosen is arbitrary."] - #[doc = ""] - #[doc = " - ::cudaMemAdviseSetPreferredLocation: This advice sets the preferred location for the"] - #[doc = " data to be the memory belonging to \\p device. Passing in cudaCpuDeviceId for \\p device sets the"] - #[doc = " preferred location as host memory. If \\p device is a GPU, then it must have a non-zero value for the"] - #[doc = " device attribute ::cudaDevAttrConcurrentManagedAccess. Setting the preferred location"] - #[doc = " does not cause data to migrate to that location immediately. Instead, it guides the migration policy"] - #[doc = " when a fault occurs on that memory region. If the data is already in its preferred location and the"] - #[doc = " faulting processor can establish a mapping without requiring the data to be migrated, then"] - #[doc = " data migration will be avoided. On the other hand, if the data is not in its preferred location"] - #[doc = " or if a direct mapping cannot be established, then it will be migrated to the processor accessing"] - #[doc = " it. It is important to note that setting the preferred location does not prevent data prefetching"] - #[doc = " done using ::cudaMemPrefetchAsync."] - #[doc = " Having a preferred location can override the page thrash detection and resolution logic in the Unified"] - #[doc = " Memory driver. Normally, if a page is detected to be constantly thrashing between for example host and device"] - #[doc = " memory, the page may eventually be pinned to host memory by the Unified Memory driver. But"] - #[doc = " if the preferred location is set as device memory, then the page will continue to thrash indefinitely."] - #[doc = " If ::cudaMemAdviseSetReadMostly is also set on this memory region or any subset of it, then the"] - #[doc = " policies associated with that advice will override the policies of this advice, unless read accesses from"] - #[doc = " \\p device will not result in a read-only copy being created on that device as outlined in description for"] - #[doc = " the advice ::cudaMemAdviseSetReadMostly."] - #[doc = " If the memory region refers to valid system-allocated pageable memory, then \\p device must have a non-zero"] - #[doc = " value for the device attribute ::cudaDevAttrPageableMemoryAccess. Additionally, if \\p device has"] - #[doc = " a non-zero value for the device attribute ::cudaDevAttrPageableMemoryAccessUsesHostPageTables,"] - #[doc = " then this call has no effect. Note however that this behavior may change in the future."] - #[doc = ""] - #[doc = " - ::cudaMemAdviseUnsetPreferredLocation: Undoes the effect of ::cudaMemAdviseSetPreferredLocation"] - #[doc = " and changes the preferred location to none."] - #[doc = ""] - #[doc = " - ::cudaMemAdviseSetAccessedBy: This advice implies that the data will be accessed by \\p device."] - #[doc = " Passing in ::cudaCpuDeviceId for \\p device will set the advice for the CPU. If \\p device is a GPU, then"] - #[doc = " the device attribute ::cudaDevAttrConcurrentManagedAccess must be non-zero."] - #[doc = " This advice does not cause data migration and has no impact on the location of the data per se. Instead,"] - #[doc = " it causes the data to always be mapped in the specified processor's page tables, as long as the"] - #[doc = " location of the data permits a mapping to be established. If the data gets migrated for any reason,"] - #[doc = " the mappings are updated accordingly."] - #[doc = " This advice is recommended in scenarios where data locality is not important, but avoiding faults is."] - #[doc = " Consider for example a system containing multiple GPUs with peer-to-peer access enabled, where the"] - #[doc = " data located on one GPU is occasionally accessed by peer GPUs. In such scenarios, migrating data"] - #[doc = " over to the other GPUs is not as important because the accesses are infrequent and the overhead of"] - #[doc = " migration may be too high. But preventing faults can still help improve performance, and so having"] - #[doc = " a mapping set up in advance is useful. Note that on CPU access of this data, the data may be migrated"] - #[doc = " to host memory because the CPU typically cannot access device memory directly. Any GPU that had the"] - #[doc = " ::cudaMemAdviceSetAccessedBy flag set for this data will now have its mapping updated to point to the"] - #[doc = " page in host memory."] - #[doc = " If ::cudaMemAdviseSetReadMostly is also set on this memory region or any subset of it, then the"] - #[doc = " policies associated with that advice will override the policies of this advice. Additionally, if the"] - #[doc = " preferred location of this memory region or any subset of it is also \\p device, then the policies"] - #[doc = " associated with ::cudaMemAdviseSetPreferredLocation will override the policies of this advice."] - #[doc = " If the memory region refers to valid system-allocated pageable memory, then \\p device must have a non-zero"] - #[doc = " value for the device attribute ::cudaDevAttrPageableMemoryAccess. Additionally, if \\p device has"] - #[doc = " a non-zero value for the device attribute ::cudaDevAttrPageableMemoryAccessUsesHostPageTables,"] - #[doc = " then this call has no effect."] - #[doc = ""] - #[doc = " - ::cudaMemAdviseUnsetAccessedBy: Undoes the effect of ::cudaMemAdviseSetAccessedBy. Any mappings to"] - #[doc = " the data from \\p device may be removed at any time causing accesses to result in non-fatal page faults."] - #[doc = " If the memory region refers to valid system-allocated pageable memory, then \\p device must have a non-zero"] - #[doc = " value for the device attribute ::cudaDevAttrPageableMemoryAccess. Additionally, if \\p device has"] - #[doc = " a non-zero value for the device attribute ::cudaDevAttrPageableMemoryAccessUsesHostPageTables,"] - #[doc = " then this call has no effect."] - #[doc = ""] - #[doc = " \\param devPtr - Pointer to memory to set the advice for"] - #[doc = " \\param count - Size in bytes of the memory range"] - #[doc = " \\param advice - Advice to be applied for the specified memory range"] - #[doc = " \\param device - Device to apply the advice for"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidDevice"] - #[doc = " \\notefnerr"] - #[doc = " \\note_async"] - #[doc = " \\note_null_stream"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpyPeer, ::cudaMemcpyAsync,"] - #[doc = " ::cudaMemcpy3DPeerAsync, ::cudaMemPrefetchAsync,"] - #[doc = " ::cuMemAdvise"] pub fn cudaMemAdvise( devPtr: *const ::std::os::raw::c_void, count: usize, @@ -19502,61 +4597,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Query an attribute of a given memory range"] - #[doc = ""] - #[doc = " Query an attribute about the memory range starting at \\p devPtr with a size of \\p count bytes. The"] - #[doc = " memory range must refer to managed memory allocated via ::cudaMallocManaged or declared via"] - #[doc = " __managed__ variables."] - #[doc = ""] - #[doc = " The \\p attribute parameter can take the following values:"] - #[doc = " - ::cudaMemRangeAttributeReadMostly: If this attribute is specified, \\p data will be interpreted"] - #[doc = " as a 32-bit integer, and \\p dataSize must be 4. The result returned will be 1 if all pages in the given"] - #[doc = " memory range have read-duplication enabled, or 0 otherwise."] - #[doc = " - ::cudaMemRangeAttributePreferredLocation: If this attribute is specified, \\p data will be"] - #[doc = " interpreted as a 32-bit integer, and \\p dataSize must be 4. The result returned will be a GPU device"] - #[doc = " id if all pages in the memory range have that GPU as their preferred location, or it will be cudaCpuDeviceId"] - #[doc = " if all pages in the memory range have the CPU as their preferred location, or it will be cudaInvalidDeviceId"] - #[doc = " if either all the pages don't have the same preferred location or some of the pages don't have a"] - #[doc = " preferred location at all. Note that the actual location of the pages in the memory range at the time of"] - #[doc = " the query may be different from the preferred location."] - #[doc = " - ::cudaMemRangeAttributeAccessedBy: If this attribute is specified, \\p data will be interpreted"] - #[doc = " as an array of 32-bit integers, and \\p dataSize must be a non-zero multiple of 4. The result returned"] - #[doc = " will be a list of device ids that had ::cudaMemAdviceSetAccessedBy set for that entire memory range."] - #[doc = " If any device does not have that advice set for the entire memory range, that device will not be included."] - #[doc = " If \\p data is larger than the number of devices that have that advice set for that memory range,"] - #[doc = " cudaInvalidDeviceId will be returned in all the extra space provided. For ex., if \\p dataSize is 12"] - #[doc = " (i.e. \\p data has 3 elements) and only device 0 has the advice set, then the result returned will be"] - #[doc = " { 0, cudaInvalidDeviceId, cudaInvalidDeviceId }. If \\p data is smaller than the number of devices that have"] - #[doc = " that advice set, then only as many devices will be returned as can fit in the array. There is no"] - #[doc = " guarantee on which specific devices will be returned, however."] - #[doc = " - ::cudaMemRangeAttributeLastPrefetchLocation: If this attribute is specified, \\p data will be"] - #[doc = " interpreted as a 32-bit integer, and \\p dataSize must be 4. The result returned will be the last location"] - #[doc = " to which all pages in the memory range were prefetched explicitly via ::cudaMemPrefetchAsync. This will either be"] - #[doc = " a GPU id or cudaCpuDeviceId depending on whether the last location for prefetch was a GPU or the CPU"] - #[doc = " respectively. If any page in the memory range was never explicitly prefetched or if all pages were not"] - #[doc = " prefetched to the same location, cudaInvalidDeviceId will be returned. Note that this simply returns the"] - #[doc = " last location that the applicaton requested to prefetch the memory range to. It gives no indication as to"] - #[doc = " whether the prefetch operation to that location has completed or even begun."] - #[doc = ""] - #[doc = " \\param data - A pointers to a memory location where the result"] - #[doc = " of each attribute query will be written to."] - #[doc = " \\param dataSize - Array containing the size of data"] - #[doc = " \\param attribute - The attribute to query"] - #[doc = " \\param devPtr - Start of the range to query"] - #[doc = " \\param count - Size of the range to query"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_async"] - #[doc = " \\note_null_stream"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemRangeGetAttributes, ::cudaMemPrefetchAsync,"] - #[doc = " ::cudaMemAdvise,"] - #[doc = " ::cuMemRangeGetAttribute"] pub fn cudaMemRangeGetAttribute( data: *mut ::std::os::raw::c_void, dataSize: usize, @@ -19566,41 +4606,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Query attributes of a given memory range."] - #[doc = ""] - #[doc = " Query attributes of the memory range starting at \\p devPtr with a size of \\p count bytes. The"] - #[doc = " memory range must refer to managed memory allocated via ::cudaMallocManaged or declared via"] - #[doc = " __managed__ variables. The \\p attributes array will be interpreted to have \\p numAttributes"] - #[doc = " entries. The \\p dataSizes array will also be interpreted to have \\p numAttributes entries."] - #[doc = " The results of the query will be stored in \\p data."] - #[doc = ""] - #[doc = " The list of supported attributes are given below. Please refer to ::cudaMemRangeGetAttribute for"] - #[doc = " attribute descriptions and restrictions."] - #[doc = ""] - #[doc = " - ::cudaMemRangeAttributeReadMostly"] - #[doc = " - ::cudaMemRangeAttributePreferredLocation"] - #[doc = " - ::cudaMemRangeAttributeAccessedBy"] - #[doc = " - ::cudaMemRangeAttributeLastPrefetchLocation"] - #[doc = ""] - #[doc = " \\param data - A two-dimensional array containing pointers to memory"] - #[doc = " locations where the result of each attribute query will be written to."] - #[doc = " \\param dataSizes - Array containing the sizes of each result"] - #[doc = " \\param attributes - An array of attributes to query"] - #[doc = " (numAttributes and the number of attributes in this array should match)"] - #[doc = " \\param numAttributes - Number of attributes to query"] - #[doc = " \\param devPtr - Start of the range to query"] - #[doc = " \\param count - Size of the range to query"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemRangeGetAttribute, ::cudaMemAdvise"] - #[doc = " ::cudaMemPrefetchAsync,"] - #[doc = " ::cuMemRangeGetAttributes"] pub fn cudaMemRangeGetAttributes( data: *mut *mut ::std::os::raw::c_void, dataSizes: *mut usize, @@ -19611,45 +4616,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies data between host and device"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Copies \\p count bytes from the memory area pointed to by \\p src to the"] - #[doc = " CUDA array \\p dst starting at the upper left corner"] - #[doc = " (\\p wOffset, \\p hOffset), where \\p kind specifies the direction"] - #[doc = " of the copy, and must be one of ::cudaMemcpyHostToHost,"] - #[doc = " ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost,"] - #[doc = " ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing"] - #[doc = " ::cudaMemcpyDefault is recommended, in which case the type of transfer is"] - #[doc = " inferred from the pointer values. However, ::cudaMemcpyDefault is only"] - #[doc = " allowed on systems that support unified virtual addressing."] - #[doc = ""] - #[doc = " \\param dst - Destination memory address"] - #[doc = " \\param wOffset - Destination starting X offset"] - #[doc = " \\param hOffset - Destination starting Y offset"] - #[doc = " \\param src - Source memory address"] - #[doc = " \\param count - Size in bytes to copy"] - #[doc = " \\param kind - Type of transfer"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection"] - #[doc = " \\notefnerr"] - #[doc = " \\note_sync"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpy2D,"] - #[doc = " ::cudaMemcpy2DToArray, ::cudaMemcpyFromArray, ::cudaMemcpy2DFromArray,"] - #[doc = " ::cudaMemcpyArrayToArray, ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,"] - #[doc = " ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,"] - #[doc = " ::cudaMemcpyToArrayAsync, ::cudaMemcpy2DToArrayAsync,"] - #[doc = " ::cudaMemcpyFromArrayAsync, ::cudaMemcpy2DFromArrayAsync,"] - #[doc = " ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync,"] - #[doc = " ::cuMemcpyHtoA,"] - #[doc = " ::cuMemcpyDtoA"] pub fn cudaMemcpyToArray( dst: cudaArray_t, wOffset: usize, @@ -19660,44 +4626,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies data between host and device"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Copies \\p count bytes from the CUDA array \\p src starting at the upper"] - #[doc = " left corner (\\p wOffset, hOffset) to the memory area pointed to by \\p dst,"] - #[doc = " where \\p kind specifies the direction of the copy, and must be one of"] - #[doc = " ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost,"] - #[doc = " ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing"] - #[doc = " ::cudaMemcpyDefault is recommended, in which case the type of transfer is"] - #[doc = " inferred from the pointer values. However, ::cudaMemcpyDefault is only"] - #[doc = " allowed on systems that support unified virtual addressing."] - #[doc = ""] - #[doc = " \\param dst - Destination memory address"] - #[doc = " \\param src - Source memory address"] - #[doc = " \\param wOffset - Source starting X offset"] - #[doc = " \\param hOffset - Source starting Y offset"] - #[doc = " \\param count - Size in bytes to copy"] - #[doc = " \\param kind - Type of transfer"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection"] - #[doc = " \\notefnerr"] - #[doc = " \\note_sync"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpy2D, ::cudaMemcpyToArray,"] - #[doc = " ::cudaMemcpy2DToArray, ::cudaMemcpy2DFromArray,"] - #[doc = " ::cudaMemcpyArrayToArray, ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,"] - #[doc = " ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,"] - #[doc = " ::cudaMemcpyToArrayAsync, ::cudaMemcpy2DToArrayAsync,"] - #[doc = " ::cudaMemcpyFromArrayAsync, ::cudaMemcpy2DFromArrayAsync,"] - #[doc = " ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync,"] - #[doc = " ::cuMemcpyAtoH,"] - #[doc = " ::cuMemcpyAtoD"] pub fn cudaMemcpyFromArray( dst: *mut ::std::os::raw::c_void, src: cudaArray_const_t, @@ -19708,45 +4636,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies data between host and device"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Copies \\p count bytes from the CUDA array \\p src starting at the upper"] - #[doc = " left corner (\\p wOffsetSrc, \\p hOffsetSrc) to the CUDA array \\p dst"] - #[doc = " starting at the upper left corner (\\p wOffsetDst, \\p hOffsetDst) where"] - #[doc = " \\p kind specifies the direction of the copy, and must be one of"] - #[doc = " ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost,"] - #[doc = " ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing"] - #[doc = " ::cudaMemcpyDefault is recommended, in which case the type of transfer is"] - #[doc = " inferred from the pointer values. However, ::cudaMemcpyDefault is only"] - #[doc = " allowed on systems that support unified virtual addressing."] - #[doc = ""] - #[doc = " \\param dst - Destination memory address"] - #[doc = " \\param wOffsetDst - Destination starting X offset"] - #[doc = " \\param hOffsetDst - Destination starting Y offset"] - #[doc = " \\param src - Source memory address"] - #[doc = " \\param wOffsetSrc - Source starting X offset"] - #[doc = " \\param hOffsetSrc - Source starting Y offset"] - #[doc = " \\param count - Size in bytes to copy"] - #[doc = " \\param kind - Type of transfer"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpy2D, ::cudaMemcpyToArray,"] - #[doc = " ::cudaMemcpy2DToArray, ::cudaMemcpyFromArray, ::cudaMemcpy2DFromArray,"] - #[doc = " ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,"] - #[doc = " ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,"] - #[doc = " ::cudaMemcpyToArrayAsync, ::cudaMemcpy2DToArrayAsync,"] - #[doc = " ::cudaMemcpyFromArrayAsync, ::cudaMemcpy2DFromArrayAsync,"] - #[doc = " ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync,"] - #[doc = " ::cuMemcpyAtoA"] pub fn cudaMemcpyArrayToArray( dst: cudaArray_t, wOffsetDst: usize, @@ -19759,53 +4648,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies data between host and device"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Copies \\p count bytes from the memory area pointed to by \\p src to the"] - #[doc = " CUDA array \\p dst starting at the upper left corner"] - #[doc = " (\\p wOffset, \\p hOffset), where \\p kind specifies the"] - #[doc = " direction of the copy, and must be one of ::cudaMemcpyHostToHost,"] - #[doc = " ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost,"] - #[doc = " ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing"] - #[doc = " ::cudaMemcpyDefault is recommended, in which case the type of transfer is"] - #[doc = " inferred from the pointer values. However, ::cudaMemcpyDefault is only"] - #[doc = " allowed on systems that support unified virtual addressing."] - #[doc = ""] - #[doc = " ::cudaMemcpyToArrayAsync() is asynchronous with respect to the host, so"] - #[doc = " the call may return before the copy is complete. The copy can optionally"] - #[doc = " be associated to a stream by passing a non-zero \\p stream argument. If \\p"] - #[doc = " kind is ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost and \\p stream"] - #[doc = " is non-zero, the copy may overlap with operations in other streams."] - #[doc = ""] - #[doc = " \\param dst - Destination memory address"] - #[doc = " \\param wOffset - Destination starting X offset"] - #[doc = " \\param hOffset - Destination starting Y offset"] - #[doc = " \\param src - Source memory address"] - #[doc = " \\param count - Size in bytes to copy"] - #[doc = " \\param kind - Type of transfer"] - #[doc = " \\param stream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection"] - #[doc = " \\notefnerr"] - #[doc = " \\note_async"] - #[doc = " \\note_null_stream"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpy2D, ::cudaMemcpyToArray,"] - #[doc = " ::cudaMemcpy2DToArray, ::cudaMemcpyFromArray, ::cudaMemcpy2DFromArray,"] - #[doc = " ::cudaMemcpyArrayToArray, ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,"] - #[doc = " ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,"] - #[doc = " ::cudaMemcpy2DToArrayAsync,"] - #[doc = " ::cudaMemcpyFromArrayAsync, ::cudaMemcpy2DFromArrayAsync,"] - #[doc = " ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync,"] - #[doc = " ::cuMemcpyHtoAAsync,"] - #[doc = " ::cuMemcpy2DAsync"] pub fn cudaMemcpyToArrayAsync( dst: cudaArray_t, wOffset: usize, @@ -19817,52 +4659,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Copies data between host and device"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Copies \\p count bytes from the CUDA array \\p src starting at the upper"] - #[doc = " left corner (\\p wOffset, hOffset) to the memory area pointed to by \\p dst,"] - #[doc = " where \\p kind specifies the direction of the copy, and must be one of"] - #[doc = " ::cudaMemcpyHostToHost, ::cudaMemcpyHostToDevice, ::cudaMemcpyDeviceToHost,"] - #[doc = " ::cudaMemcpyDeviceToDevice, or ::cudaMemcpyDefault. Passing"] - #[doc = " ::cudaMemcpyDefault is recommended, in which case the type of transfer is"] - #[doc = " inferred from the pointer values. However, ::cudaMemcpyDefault is only"] - #[doc = " allowed on systems that support unified virtual addressing."] - #[doc = ""] - #[doc = " ::cudaMemcpyFromArrayAsync() is asynchronous with respect to the host, so"] - #[doc = " the call may return before the copy is complete. The copy can optionally"] - #[doc = " be associated to a stream by passing a non-zero \\p stream argument. If \\p"] - #[doc = " kind is ::cudaMemcpyHostToDevice or ::cudaMemcpyDeviceToHost and \\p stream"] - #[doc = " is non-zero, the copy may overlap with operations in other streams."] - #[doc = ""] - #[doc = " \\param dst - Destination memory address"] - #[doc = " \\param src - Source memory address"] - #[doc = " \\param wOffset - Source starting X offset"] - #[doc = " \\param hOffset - Source starting Y offset"] - #[doc = " \\param count - Size in bytes to copy"] - #[doc = " \\param kind - Type of transfer"] - #[doc = " \\param stream - Stream identifier"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidMemcpyDirection"] - #[doc = " \\notefnerr"] - #[doc = " \\note_async"] - #[doc = " \\note_null_stream"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaMemcpy, ::cudaMemcpy2D, ::cudaMemcpyToArray,"] - #[doc = " ::cudaMemcpy2DToArray, ::cudaMemcpyFromArray, ::cudaMemcpy2DFromArray,"] - #[doc = " ::cudaMemcpyArrayToArray, ::cudaMemcpy2DArrayToArray, ::cudaMemcpyToSymbol,"] - #[doc = " ::cudaMemcpyFromSymbol, ::cudaMemcpyAsync, ::cudaMemcpy2DAsync,"] - #[doc = " ::cudaMemcpyToArrayAsync, ::cudaMemcpy2DToArrayAsync,"] - #[doc = " ::cudaMemcpy2DFromArrayAsync,"] - #[doc = " ::cudaMemcpyToSymbolAsync, ::cudaMemcpyFromSymbolAsync,"] - #[doc = " ::cuMemcpyAtoHAsync,"] - #[doc = " ::cuMemcpy2DAsync"] pub fn cudaMemcpyFromArrayAsync( dst: *mut ::std::os::raw::c_void, src: cudaArray_const_t, @@ -19874,104 +4670,12 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns attributes about a specified pointer"] - #[doc = ""] - #[doc = " Returns in \\p *attributes the attributes of the pointer \\p ptr."] - #[doc = " If pointer was not allocated in, mapped by or registered with context"] - #[doc = " supporting unified addressing ::cudaErrorInvalidValue is returned."] - #[doc = ""] - #[doc = " \\note In CUDA 11.0 forward passing host pointer will return ::cudaMemoryTypeUnregistered"] - #[doc = " in ::cudaPointerAttributes::type and call will return ::cudaSuccess."] - #[doc = ""] - #[doc = " The ::cudaPointerAttributes structure is defined as:"] - #[doc = " \\code"] - #[doc = "struct cudaPointerAttributes {"] - #[doc = "enum cudaMemoryType memoryType;"] - #[doc = "enum cudaMemoryType type;"] - #[doc = "int device;"] - #[doc = "void *devicePointer;"] - #[doc = "void *hostPointer;"] - #[doc = "int isManaged;"] - #[doc = "}"] - #[doc = "\\endcode"] - #[doc = " In this structure, the individual fields mean"] - #[doc = ""] - #[doc = " - \\ref ::cudaPointerAttributes::memoryType identifies the"] - #[doc = " location of the memory associated with pointer \\p ptr. It can be"] - #[doc = " ::cudaMemoryTypeHost for host memory or ::cudaMemoryTypeDevice for device"] - #[doc = " and managed memory. It has been deprecated in favour of ::cudaPointerAttributes::type."] - #[doc = ""] - #[doc = " - \\ref ::cudaPointerAttributes::type identifies type of memory. It can be"] - #[doc = " ::cudaMemoryTypeUnregistered for unregistered host memory,"] - #[doc = " ::cudaMemoryTypeHost for registered host memory, ::cudaMemoryTypeDevice for device"] - #[doc = " memory or ::cudaMemoryTypeManaged for managed memory."] - #[doc = ""] - #[doc = " - \\ref ::cudaPointerAttributes::device \"device\" is the device against which"] - #[doc = " \\p ptr was allocated. If \\p ptr has memory type ::cudaMemoryTypeDevice"] - #[doc = " then this identifies the device on which the memory referred to by \\p ptr"] - #[doc = " physically resides. If \\p ptr has memory type ::cudaMemoryTypeHost then this"] - #[doc = " identifies the device which was current when the allocation was made"] - #[doc = " (and if that device is deinitialized then this allocation will vanish"] - #[doc = " with that device's state)."] - #[doc = ""] - #[doc = " - \\ref ::cudaPointerAttributes::devicePointer \"devicePointer\" is"] - #[doc = " the device pointer alias through which the memory referred to by \\p ptr"] - #[doc = " may be accessed on the current device."] - #[doc = " If the memory referred to by \\p ptr cannot be accessed directly by the"] - #[doc = " current device then this is NULL."] - #[doc = ""] - #[doc = " - \\ref ::cudaPointerAttributes::hostPointer \"hostPointer\" is"] - #[doc = " the host pointer alias through which the memory referred to by \\p ptr"] - #[doc = " may be accessed on the host."] - #[doc = " If the memory referred to by \\p ptr cannot be accessed directly by the"] - #[doc = " host then this is NULL."] - #[doc = ""] - #[doc = " - \\ref ::cudaPointerAttributes::isManaged \"isManaged\" indicates if"] - #[doc = " the pointer \\p ptr points to managed memory or not. It has been deprecated"] - #[doc = " in favour of ::cudaPointerAttributes::type."] - #[doc = ""] - #[doc = " \\param attributes - Attributes for the specified pointer"] - #[doc = " \\param ptr - Pointer to get attributes for"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidDevice,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaGetDeviceCount, ::cudaGetDevice, ::cudaSetDevice,"] - #[doc = " ::cudaChooseDevice,"] - #[doc = " ::cuPointerGetAttributes"] pub fn cudaPointerGetAttributes( attributes: *mut cudaPointerAttributes, ptr: *const ::std::os::raw::c_void, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Queries if a device may directly access a peer device's memory."] - #[doc = ""] - #[doc = " Returns in \\p *canAccessPeer a value of 1 if device \\p device is capable of"] - #[doc = " directly accessing memory from \\p peerDevice and 0 otherwise. If direct"] - #[doc = " access of \\p peerDevice from \\p device is possible, then access may be"] - #[doc = " enabled by calling ::cudaDeviceEnablePeerAccess()."] - #[doc = ""] - #[doc = " \\param canAccessPeer - Returned access capability"] - #[doc = " \\param device - Device from which allocations on \\p peerDevice are to"] - #[doc = " be directly accessed."] - #[doc = " \\param peerDevice - Device on which the allocations to be directly accessed"] - #[doc = " by \\p device reside."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidDevice"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaDeviceEnablePeerAccess,"] - #[doc = " ::cudaDeviceDisablePeerAccess,"] - #[doc = " ::cuDeviceCanAccessPeer"] pub fn cudaDeviceCanAccessPeer( canAccessPeer: *mut ::std::os::raw::c_int, device: ::std::os::raw::c_int, @@ -19979,175 +4683,28 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Enables direct access to memory allocations on a peer device."] - #[doc = ""] - #[doc = " On success, all allocations from \\p peerDevice will immediately be accessible by"] - #[doc = " the current device. They will remain accessible until access is explicitly"] - #[doc = " disabled using ::cudaDeviceDisablePeerAccess() or either device is reset using"] - #[doc = " ::cudaDeviceReset()."] - #[doc = ""] - #[doc = " Note that access granted by this call is unidirectional and that in order to access"] - #[doc = " memory on the current device from \\p peerDevice, a separate symmetric call"] - #[doc = " to ::cudaDeviceEnablePeerAccess() is required."] - #[doc = ""] - #[doc = " Note that there are both device-wide and system-wide limitations per system"] - #[doc = " configuration, as noted in the CUDA Programming Guide under the section"] - #[doc = " \"Peer-to-Peer Memory Access\"."] - #[doc = ""] - #[doc = " Returns ::cudaErrorInvalidDevice if ::cudaDeviceCanAccessPeer() indicates"] - #[doc = " that the current device cannot directly access memory from \\p peerDevice."] - #[doc = ""] - #[doc = " Returns ::cudaErrorPeerAccessAlreadyEnabled if direct access of"] - #[doc = " \\p peerDevice from the current device has already been enabled."] - #[doc = ""] - #[doc = " Returns ::cudaErrorInvalidValue if \\p flags is not 0."] - #[doc = ""] - #[doc = " \\param peerDevice - Peer device to enable direct access to from the current device"] - #[doc = " \\param flags - Reserved for future use and must be set to 0"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidDevice,"] - #[doc = " ::cudaErrorPeerAccessAlreadyEnabled,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaDeviceCanAccessPeer,"] - #[doc = " ::cudaDeviceDisablePeerAccess,"] - #[doc = " ::cuCtxEnablePeerAccess"] pub fn cudaDeviceEnablePeerAccess( peerDevice: ::std::os::raw::c_int, flags: ::std::os::raw::c_uint, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Disables direct access to memory allocations on a peer device."] - #[doc = ""] - #[doc = " Returns ::cudaErrorPeerAccessNotEnabled if direct access to memory on"] - #[doc = " \\p peerDevice has not yet been enabled from the current device."] - #[doc = ""] - #[doc = " \\param peerDevice - Peer device to disable direct access to"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorPeerAccessNotEnabled,"] - #[doc = " ::cudaErrorInvalidDevice"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa ::cudaDeviceCanAccessPeer,"] - #[doc = " ::cudaDeviceEnablePeerAccess,"] - #[doc = " ::cuCtxDisablePeerAccess"] pub fn cudaDeviceDisablePeerAccess( peerDevice: ::std::os::raw::c_int, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Unregisters a graphics resource for access by CUDA"] - #[doc = ""] - #[doc = " Unregisters the graphics resource \\p resource so it is not accessible by"] - #[doc = " CUDA unless registered again."] - #[doc = ""] - #[doc = " If \\p resource is invalid then ::cudaErrorInvalidResourceHandle is"] - #[doc = " returned."] - #[doc = ""] - #[doc = " \\param resource - Resource to unregister"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidResourceHandle,"] - #[doc = " ::cudaErrorUnknown"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphicsD3D9RegisterResource,"] - #[doc = " ::cudaGraphicsD3D10RegisterResource,"] - #[doc = " ::cudaGraphicsD3D11RegisterResource,"] - #[doc = " ::cudaGraphicsGLRegisterBuffer,"] - #[doc = " ::cudaGraphicsGLRegisterImage,"] - #[doc = " ::cuGraphicsUnregisterResource"] pub fn cudaGraphicsUnregisterResource( resource: cudaGraphicsResource_t, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Set usage flags for mapping a graphics resource"] - #[doc = ""] - #[doc = " Set \\p flags for mapping the graphics resource \\p resource."] - #[doc = ""] - #[doc = " Changes to \\p flags will take effect the next time \\p resource is mapped."] - #[doc = " The \\p flags argument may be any of the following:"] - #[doc = " - ::cudaGraphicsMapFlagsNone: Specifies no hints about how \\p resource will"] - #[doc = " be used. It is therefore assumed that CUDA may read from or write to \\p resource."] - #[doc = " - ::cudaGraphicsMapFlagsReadOnly: Specifies that CUDA will not write to \\p resource."] - #[doc = " - ::cudaGraphicsMapFlagsWriteDiscard: Specifies CUDA will not read from \\p resource and will"] - #[doc = " write over the entire contents of \\p resource, so none of the data"] - #[doc = " previously stored in \\p resource will be preserved."] - #[doc = ""] - #[doc = " If \\p resource is presently mapped for access by CUDA then ::cudaErrorUnknown is returned."] - #[doc = " If \\p flags is not one of the above values then ::cudaErrorInvalidValue is returned."] - #[doc = ""] - #[doc = " \\param resource - Registered resource to set flags for"] - #[doc = " \\param flags - Parameters for resource mapping"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidResourceHandle,"] - #[doc = " ::cudaErrorUnknown,"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphicsMapResources,"] - #[doc = " ::cuGraphicsResourceSetMapFlags"] pub fn cudaGraphicsResourceSetMapFlags( resource: cudaGraphicsResource_t, flags: ::std::os::raw::c_uint, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Map graphics resources for access by CUDA"] - #[doc = ""] - #[doc = " Maps the \\p count graphics resources in \\p resources for access by CUDA."] - #[doc = ""] - #[doc = " The resources in \\p resources may be accessed by CUDA until they"] - #[doc = " are unmapped. The graphics API from which \\p resources were registered"] - #[doc = " should not access any resources while they are mapped by CUDA. If an"] - #[doc = " application does so, the results are undefined."] - #[doc = ""] - #[doc = " This function provides the synchronization guarantee that any graphics calls"] - #[doc = " issued before ::cudaGraphicsMapResources() will complete before any subsequent CUDA"] - #[doc = " work issued in \\p stream begins."] - #[doc = ""] - #[doc = " If \\p resources contains any duplicate entries then ::cudaErrorInvalidResourceHandle"] - #[doc = " is returned. If any of \\p resources are presently mapped for access by"] - #[doc = " CUDA then ::cudaErrorUnknown is returned."] - #[doc = ""] - #[doc = " \\param count - Number of resources to map"] - #[doc = " \\param resources - Resources to map for CUDA"] - #[doc = " \\param stream - Stream for synchronization"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidResourceHandle,"] - #[doc = " ::cudaErrorUnknown"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphicsResourceGetMappedPointer,"] - #[doc = " ::cudaGraphicsSubResourceGetMappedArray,"] - #[doc = " ::cudaGraphicsUnmapResources,"] - #[doc = " ::cuGraphicsMapResources"] pub fn cudaGraphicsMapResources( count: ::std::os::raw::c_int, resources: *mut cudaGraphicsResource_t, @@ -20155,37 +4712,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Unmap graphics resources."] - #[doc = ""] - #[doc = " Unmaps the \\p count graphics resources in \\p resources."] - #[doc = ""] - #[doc = " Once unmapped, the resources in \\p resources may not be accessed by CUDA"] - #[doc = " until they are mapped again."] - #[doc = ""] - #[doc = " This function provides the synchronization guarantee that any CUDA work issued"] - #[doc = " in \\p stream before ::cudaGraphicsUnmapResources() will complete before any"] - #[doc = " subsequently issued graphics work begins."] - #[doc = ""] - #[doc = " If \\p resources contains any duplicate entries then ::cudaErrorInvalidResourceHandle"] - #[doc = " is returned. If any of \\p resources are not presently mapped for access by"] - #[doc = " CUDA then ::cudaErrorUnknown is returned."] - #[doc = ""] - #[doc = " \\param count - Number of resources to unmap"] - #[doc = " \\param resources - Resources to unmap"] - #[doc = " \\param stream - Stream for synchronization"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidResourceHandle,"] - #[doc = " ::cudaErrorUnknown"] - #[doc = " \\note_null_stream"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphicsMapResources,"] - #[doc = " ::cuGraphicsUnmapResources"] pub fn cudaGraphicsUnmapResources( count: ::std::os::raw::c_int, resources: *mut cudaGraphicsResource_t, @@ -20193,34 +4719,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Get an device pointer through which to access a mapped graphics resource."] - #[doc = ""] - #[doc = " Returns in \\p *devPtr a pointer through which the mapped graphics resource"] - #[doc = " \\p resource may be accessed."] - #[doc = " Returns in \\p *size the size of the memory in bytes which may be accessed from that pointer."] - #[doc = " The value set in \\p devPtr may change every time that \\p resource is mapped."] - #[doc = ""] - #[doc = " If \\p resource is not a buffer then it cannot be accessed via a pointer and"] - #[doc = " ::cudaErrorUnknown is returned."] - #[doc = " If \\p resource is not mapped then ::cudaErrorUnknown is returned."] - #[doc = " *"] - #[doc = " \\param devPtr - Returned pointer through which \\p resource may be accessed"] - #[doc = " \\param size - Returned size of the buffer accessible starting at \\p *devPtr"] - #[doc = " \\param resource - Mapped resource to access"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidResourceHandle,"] - #[doc = " ::cudaErrorUnknown"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphicsMapResources,"] - #[doc = " ::cudaGraphicsSubResourceGetMappedArray,"] - #[doc = " ::cuGraphicsResourceGetMappedPointer"] pub fn cudaGraphicsResourceGetMappedPointer( devPtr: *mut *mut ::std::os::raw::c_void, size: *mut usize, @@ -20228,40 +4726,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Get an array through which to access a subresource of a mapped graphics resource."] - #[doc = ""] - #[doc = " Returns in \\p *array an array through which the subresource of the mapped"] - #[doc = " graphics resource \\p resource which corresponds to array index \\p arrayIndex"] - #[doc = " and mipmap level \\p mipLevel may be accessed. The value set in \\p array may"] - #[doc = " change every time that \\p resource is mapped."] - #[doc = ""] - #[doc = " If \\p resource is not a texture then it cannot be accessed via an array and"] - #[doc = " ::cudaErrorUnknown is returned."] - #[doc = " If \\p arrayIndex is not a valid array index for \\p resource then"] - #[doc = " ::cudaErrorInvalidValue is returned."] - #[doc = " If \\p mipLevel is not a valid mipmap level for \\p resource then"] - #[doc = " ::cudaErrorInvalidValue is returned."] - #[doc = " If \\p resource is not mapped then ::cudaErrorUnknown is returned."] - #[doc = ""] - #[doc = " \\param array - Returned array through which a subresource of \\p resource may be accessed"] - #[doc = " \\param resource - Mapped resource to access"] - #[doc = " \\param arrayIndex - Array index for array textures or cubemap face"] - #[doc = " index as defined by ::cudaGraphicsCubeFace for"] - #[doc = " cubemap textures for the subresource to access"] - #[doc = " \\param mipLevel - Mipmap level for the subresource to access"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidResourceHandle,"] - #[doc = " ::cudaErrorUnknown"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphicsResourceGetMappedPointer,"] - #[doc = " ::cuGraphicsSubResourceGetMappedArray"] pub fn cudaGraphicsSubResourceGetMappedArray( array: *mut cudaArray_t, resource: cudaGraphicsResource_t, @@ -20270,87 +4734,12 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Get a mipmapped array through which to access a mapped graphics resource."] - #[doc = ""] - #[doc = " Returns in \\p *mipmappedArray a mipmapped array through which the mapped"] - #[doc = " graphics resource \\p resource may be accessed. The value set in \\p mipmappedArray may"] - #[doc = " change every time that \\p resource is mapped."] - #[doc = ""] - #[doc = " If \\p resource is not a texture then it cannot be accessed via an array and"] - #[doc = " ::cudaErrorUnknown is returned."] - #[doc = " If \\p resource is not mapped then ::cudaErrorUnknown is returned."] - #[doc = ""] - #[doc = " \\param mipmappedArray - Returned mipmapped array through which \\p resource may be accessed"] - #[doc = " \\param resource - Mapped resource to access"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidResourceHandle,"] - #[doc = " ::cudaErrorUnknown"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphicsResourceGetMappedPointer,"] - #[doc = " ::cuGraphicsResourceGetMappedMipmappedArray"] pub fn cudaGraphicsResourceGetMappedMipmappedArray( mipmappedArray: *mut cudaMipmappedArray_t, resource: cudaGraphicsResource_t, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Binds a memory area to a texture"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Binds \\p size bytes of the memory area pointed to by \\p devPtr to the"] - #[doc = " texture reference \\p texref. \\p desc describes how the memory is interpreted"] - #[doc = " when fetching values from the texture. Any memory previously bound to"] - #[doc = " \\p texref is unbound."] - #[doc = ""] - #[doc = " Since the hardware enforces an alignment requirement on texture base"] - #[doc = " addresses,"] - #[doc = " \\ref ::cudaBindTexture(size_t*, const struct textureReference*, const void*, const struct cudaChannelFormatDesc*, size_t) \"cudaBindTexture()\""] - #[doc = " returns in \\p *offset a byte offset that"] - #[doc = " must be applied to texture fetches in order to read from the desired memory."] - #[doc = " This offset must be divided by the texel size and passed to kernels that"] - #[doc = " read from the texture so they can be applied to the ::tex1Dfetch() function."] - #[doc = " If the device memory pointer was returned from ::cudaMalloc(), the offset is"] - #[doc = " guaranteed to be 0 and NULL may be passed as the \\p offset parameter."] - #[doc = ""] - #[doc = " The total number of elements (or texels) in the linear address range"] - #[doc = " cannot exceed ::cudaDeviceProp::maxTexture1DLinear[0]."] - #[doc = " The number of elements is computed as (\\p size / elementSize),"] - #[doc = " where elementSize is determined from \\p desc."] - #[doc = ""] - #[doc = " \\param offset - Offset in bytes"] - #[doc = " \\param texref - Texture to bind"] - #[doc = " \\param devPtr - Memory area on device"] - #[doc = " \\param desc - Channel format"] - #[doc = " \\param size - Size of the memory area pointed to by devPtr"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidTexture"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa \\ref ::cudaCreateChannelDesc(int, int, int, int, cudaChannelFormatKind) \"cudaCreateChannelDesc (C API)\","] - #[doc = " ::cudaGetChannelDesc, ::cudaGetTextureReference,"] - #[doc = " \\ref ::cudaBindTexture(size_t*, const struct texture< T, dim, readMode>&, const void*, const struct cudaChannelFormatDesc&, size_t) \"cudaBindTexture (C++ API)\","] - #[doc = " \\ref ::cudaBindTexture2D(size_t*, const struct textureReference*, const void*, const struct cudaChannelFormatDesc*, size_t, size_t, size_t) \"cudaBindTexture2D (C API)\","] - #[doc = " \\ref ::cudaBindTextureToArray(const struct textureReference*, cudaArray_const_t, const struct cudaChannelFormatDesc*) \"cudaBindTextureToArray (C API)\","] - #[doc = " \\ref ::cudaUnbindTexture(const struct textureReference*) \"cudaUnbindTexture (C API)\","] - #[doc = " \\ref ::cudaGetTextureAlignmentOffset(size_t*, const struct textureReference*) \"cudaGetTextureAlignmentOffset (C API)\","] - #[doc = " ::cuTexRefSetAddress,"] - #[doc = " ::cuTexRefSetAddressMode,"] - #[doc = " ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefSetFlags,"] - #[doc = " ::cuTexRefSetBorderColor"] pub fn cudaBindTexture( offset: *mut usize, texref: *const textureReference, @@ -20360,61 +4749,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Binds a 2D memory area to a texture"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Binds the 2D memory area pointed to by \\p devPtr to the"] - #[doc = " texture reference \\p texref. The size of the area is constrained by"] - #[doc = " \\p width in texel units, \\p height in texel units, and \\p pitch in byte"] - #[doc = " units. \\p desc describes how the memory is interpreted when fetching values"] - #[doc = " from the texture. Any memory previously bound to \\p texref is unbound."] - #[doc = ""] - #[doc = " Since the hardware enforces an alignment requirement on texture base"] - #[doc = " addresses, ::cudaBindTexture2D() returns in \\p *offset a byte offset that"] - #[doc = " must be applied to texture fetches in order to read from the desired memory."] - #[doc = " This offset must be divided by the texel size and passed to kernels that"] - #[doc = " read from the texture so they can be applied to the ::tex2D() function."] - #[doc = " If the device memory pointer was returned from ::cudaMalloc(), the offset is"] - #[doc = " guaranteed to be 0 and NULL may be passed as the \\p offset parameter."] - #[doc = ""] - #[doc = " \\p width and \\p height, which are specified in elements (or texels), cannot"] - #[doc = " exceed ::cudaDeviceProp::maxTexture2DLinear[0] and ::cudaDeviceProp::maxTexture2DLinear[1]"] - #[doc = " respectively. \\p pitch, which is specified in bytes, cannot exceed"] - #[doc = " ::cudaDeviceProp::maxTexture2DLinear[2]."] - #[doc = ""] - #[doc = " The driver returns ::cudaErrorInvalidValue if \\p pitch is not a multiple of"] - #[doc = " ::cudaDeviceProp::texturePitchAlignment."] - #[doc = ""] - #[doc = " \\param offset - Offset in bytes"] - #[doc = " \\param texref - Texture reference to bind"] - #[doc = " \\param devPtr - 2D memory area on device"] - #[doc = " \\param desc - Channel format"] - #[doc = " \\param width - Width in texel units"] - #[doc = " \\param height - Height in texel units"] - #[doc = " \\param pitch - Pitch in bytes"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidTexture"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa \\ref ::cudaCreateChannelDesc(int, int, int, int, cudaChannelFormatKind) \"cudaCreateChannelDesc (C API)\","] - #[doc = " ::cudaGetChannelDesc, ::cudaGetTextureReference,"] - #[doc = " \\ref ::cudaBindTexture(size_t*, const struct textureReference*, const void*, const struct cudaChannelFormatDesc*, size_t) \"cudaBindTexture (C API)\","] - #[doc = " \\ref ::cudaBindTexture2D(size_t*, const struct texture< T, dim, readMode>&, const void*, const struct cudaChannelFormatDesc&, size_t, size_t, size_t) \"cudaBindTexture2D (C++ API)\","] - #[doc = " \\ref ::cudaBindTexture2D(size_t*, const struct texture&, const void*, size_t, size_t, size_t) \"cudaBindTexture2D (C++ API, inherited channel descriptor)\","] - #[doc = " \\ref ::cudaBindTextureToArray(const struct textureReference*, cudaArray_const_t, const struct cudaChannelFormatDesc*) \"cudaBindTextureToArray (C API)\","] - #[doc = " \\ref ::cudaUnbindTexture(const struct textureReference*) \"cudaBindTextureToArray (C API)\","] - #[doc = " \\ref ::cudaGetTextureAlignmentOffset(size_t*, const struct textureReference*) \"cudaGetTextureAlignmentOffset (C API)\","] - #[doc = " ::cuTexRefSetAddress2D,"] - #[doc = " ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefSetFlags,"] - #[doc = " ::cuTexRefSetAddressMode,"] - #[doc = " ::cuTexRefSetBorderColor"] pub fn cudaBindTexture2D( offset: *mut usize, texref: *const textureReference, @@ -20426,40 +4760,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Binds an array to a texture"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Binds the CUDA array \\p array to the texture reference \\p texref."] - #[doc = " \\p desc describes how the memory is interpreted when fetching values from"] - #[doc = " the texture. Any CUDA array previously bound to \\p texref is unbound."] - #[doc = ""] - #[doc = " \\param texref - Texture to bind"] - #[doc = " \\param array - Memory array on device"] - #[doc = " \\param desc - Channel format"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidTexture"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa \\ref ::cudaCreateChannelDesc(int, int, int, int, cudaChannelFormatKind) \"cudaCreateChannelDesc (C API)\","] - #[doc = " ::cudaGetChannelDesc, ::cudaGetTextureReference,"] - #[doc = " \\ref ::cudaBindTexture(size_t*, const struct textureReference*, const void*, const struct cudaChannelFormatDesc*, size_t) \"cudaBindTexture (C API)\","] - #[doc = " \\ref ::cudaBindTexture2D(size_t*, const struct textureReference*, const void*, const struct cudaChannelFormatDesc*, size_t, size_t, size_t) \"cudaBindTexture2D (C API)\","] - #[doc = " \\ref ::cudaBindTextureToArray(const struct texture< T, dim, readMode>&, cudaArray_const_t, const struct cudaChannelFormatDesc&) \"cudaBindTextureToArray (C++ API)\","] - #[doc = " \\ref ::cudaUnbindTexture(const struct textureReference*) \"cudaUnbindTexture (C API)\","] - #[doc = " \\ref ::cudaGetTextureAlignmentOffset(size_t*, const struct textureReference*) \"cudaGetTextureAlignmentOffset (C API)\","] - #[doc = " ::cuTexRefSetArray,"] - #[doc = " ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefSetFlags,"] - #[doc = " ::cuTexRefSetAddressMode,"] - #[doc = " ::cuTexRefSetFilterMode,"] - #[doc = " ::cuTexRefSetBorderColor,"] - #[doc = " ::cuTexRefSetMaxAnisotropy"] pub fn cudaBindTextureToArray( texref: *const textureReference, array: cudaArray_const_t, @@ -20467,42 +4767,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Binds a mipmapped array to a texture"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Binds the CUDA mipmapped array \\p mipmappedArray to the texture reference \\p texref."] - #[doc = " \\p desc describes how the memory is interpreted when fetching values from"] - #[doc = " the texture. Any CUDA mipmapped array previously bound to \\p texref is unbound."] - #[doc = ""] - #[doc = " \\param texref - Texture to bind"] - #[doc = " \\param mipmappedArray - Memory mipmapped array on device"] - #[doc = " \\param desc - Channel format"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidTexture"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa \\ref ::cudaCreateChannelDesc(int, int, int, int, cudaChannelFormatKind) \"cudaCreateChannelDesc (C API)\","] - #[doc = " ::cudaGetChannelDesc, ::cudaGetTextureReference,"] - #[doc = " \\ref ::cudaBindTexture(size_t*, const struct textureReference*, const void*, const struct cudaChannelFormatDesc*, size_t) \"cudaBindTexture (C API)\","] - #[doc = " \\ref ::cudaBindTexture2D(size_t*, const struct textureReference*, const void*, const struct cudaChannelFormatDesc*, size_t, size_t, size_t) \"cudaBindTexture2D (C API)\","] - #[doc = " \\ref ::cudaBindTextureToArray(const struct texture< T, dim, readMode>&, cudaArray_const_t, const struct cudaChannelFormatDesc&) \"cudaBindTextureToArray (C++ API)\","] - #[doc = " \\ref ::cudaUnbindTexture(const struct textureReference*) \"cudaUnbindTexture (C API)\","] - #[doc = " \\ref ::cudaGetTextureAlignmentOffset(size_t*, const struct textureReference*) \"cudaGetTextureAlignmentOffset (C API)\","] - #[doc = " ::cuTexRefSetMipmappedArray,"] - #[doc = " ::cuTexRefSetMipmapFilterMode"] - #[doc = " ::cuTexRefSetMipmapLevelClamp,"] - #[doc = " ::cuTexRefSetMipmapLevelBias,"] - #[doc = " ::cuTexRefSetFormat,"] - #[doc = " ::cuTexRefSetFlags,"] - #[doc = " ::cuTexRefSetAddressMode,"] - #[doc = " ::cuTexRefSetBorderColor,"] - #[doc = " ::cuTexRefSetMaxAnisotropy"] pub fn cudaBindTextureToMipmappedArray( texref: *const textureReference, mipmappedArray: cudaMipmappedArray_const_t, @@ -20510,118 +4774,21 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Unbinds a texture"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Unbinds the texture bound to \\p texref. If \\p texref is not currently bound, no operation is performed."] - #[doc = ""] - #[doc = " \\param texref - Texture to unbind"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidTexture"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa \\ref ::cudaCreateChannelDesc(int, int, int, int, cudaChannelFormatKind) \"cudaCreateChannelDesc (C API)\","] - #[doc = " ::cudaGetChannelDesc, ::cudaGetTextureReference,"] - #[doc = " \\ref ::cudaBindTexture(size_t*, const struct textureReference*, const void*, const struct cudaChannelFormatDesc*, size_t) \"cudaBindTexture (C API)\","] - #[doc = " \\ref ::cudaBindTexture2D(size_t*, const struct textureReference*, const void*, const struct cudaChannelFormatDesc*, size_t, size_t, size_t) \"cudaBindTexture2D (C API)\","] - #[doc = " \\ref ::cudaBindTextureToArray(const struct textureReference*, cudaArray_const_t, const struct cudaChannelFormatDesc*) \"cudaBindTextureToArray (C API)\","] - #[doc = " \\ref ::cudaUnbindTexture(const struct texture< T, dim, readMode>&) \"cudaUnbindTexture (C++ API)\","] - #[doc = " \\ref ::cudaGetTextureAlignmentOffset(size_t*, const struct textureReference*) \"cudaGetTextureAlignmentOffset (C API)\""] pub fn cudaUnbindTexture(texref: *const textureReference) -> cudaError_t; } extern "C" { - #[doc = " \\brief Get the alignment offset of a texture"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Returns in \\p *offset the offset that was returned when texture reference"] - #[doc = " \\p texref was bound."] - #[doc = ""] - #[doc = " \\param offset - Offset of texture reference in bytes"] - #[doc = " \\param texref - Texture to get offset of"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidTexture,"] - #[doc = " ::cudaErrorInvalidTextureBinding"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa \\ref ::cudaCreateChannelDesc(int, int, int, int, cudaChannelFormatKind) \"cudaCreateChannelDesc (C API)\","] - #[doc = " ::cudaGetChannelDesc, ::cudaGetTextureReference,"] - #[doc = " \\ref ::cudaBindTexture(size_t*, const struct textureReference*, const void*, const struct cudaChannelFormatDesc*, size_t) \"cudaBindTexture (C API)\","] - #[doc = " \\ref ::cudaBindTexture2D(size_t*, const struct textureReference*, const void*, const struct cudaChannelFormatDesc*, size_t, size_t, size_t) \"cudaBindTexture2D (C API)\","] - #[doc = " \\ref ::cudaBindTextureToArray(const struct textureReference*, cudaArray_const_t, const struct cudaChannelFormatDesc*) \"cudaBindTextureToArray (C API)\","] - #[doc = " \\ref ::cudaUnbindTexture(const struct textureReference*) \"cudaUnbindTexture (C API)\","] - #[doc = " \\ref ::cudaGetTextureAlignmentOffset(size_t*, const struct texture< T, dim, readMode>&) \"cudaGetTextureAlignmentOffset (C++ API)\""] pub fn cudaGetTextureAlignmentOffset( offset: *mut usize, texref: *const textureReference, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Get the texture reference associated with a symbol"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Returns in \\p *texref the structure associated to the texture reference"] - #[doc = " defined by symbol \\p symbol."] - #[doc = ""] - #[doc = " \\param texref - Texture reference associated with symbol"] - #[doc = " \\param symbol - Texture to get reference for"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidTexture"] - #[doc = " \\notefnerr"] - #[doc = " \\note_string_api_deprecation_50"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa \\ref ::cudaCreateChannelDesc(int, int, int, int, cudaChannelFormatKind) \"cudaCreateChannelDesc (C API)\","] - #[doc = " ::cudaGetChannelDesc,"] - #[doc = " \\ref ::cudaGetTextureAlignmentOffset(size_t*, const struct textureReference*) \"cudaGetTextureAlignmentOffset (C API)\","] - #[doc = " \\ref ::cudaBindTexture(size_t*, const struct textureReference*, const void*, const struct cudaChannelFormatDesc*, size_t) \"cudaBindTexture (C API)\","] - #[doc = " \\ref ::cudaBindTexture2D(size_t*, const struct textureReference*, const void*, const struct cudaChannelFormatDesc*, size_t, size_t, size_t) \"cudaBindTexture2D (C API)\","] - #[doc = " \\ref ::cudaBindTextureToArray(const struct textureReference*, cudaArray_const_t, const struct cudaChannelFormatDesc*) \"cudaBindTextureToArray (C API)\","] - #[doc = " \\ref ::cudaUnbindTexture(const struct textureReference*) \"cudaUnbindTexture (C API)\","] - #[doc = " ::cuModuleGetTexRef"] pub fn cudaGetTextureReference( texref: *mut *const textureReference, symbol: *const ::std::os::raw::c_void, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Binds an array to a surface"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Binds the CUDA array \\p array to the surface reference \\p surfref."] - #[doc = " \\p desc describes how the memory is interpreted when fetching values from"] - #[doc = " the surface. Any CUDA array previously bound to \\p surfref is unbound."] - #[doc = ""] - #[doc = " \\param surfref - Surface to bind"] - #[doc = " \\param array - Memory array on device"] - #[doc = " \\param desc - Channel format"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidSurface"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa \\ref ::cudaBindSurfaceToArray(const struct surface< T, dim>&, cudaArray_const_t, const struct cudaChannelFormatDesc&) \"cudaBindSurfaceToArray (C++ API)\","] - #[doc = " \\ref ::cudaBindSurfaceToArray(const struct surface< T, dim>&, cudaArray_const_t) \"cudaBindSurfaceToArray (C++ API, inherited channel descriptor)\","] - #[doc = " ::cudaGetSurfaceReference,"] - #[doc = " ::cuSurfRefSetArray"] pub fn cudaBindSurfaceToArray( surfref: *const surfaceReference, array: cudaArray_const_t, @@ -20629,81 +4796,18 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Get the surface reference associated with a symbol"] - #[doc = ""] - #[doc = " \\deprecated"] - #[doc = ""] - #[doc = " Returns in \\p *surfref the structure associated to the surface reference"] - #[doc = " defined by symbol \\p symbol."] - #[doc = ""] - #[doc = " \\param surfref - Surface reference associated with symbol"] - #[doc = " \\param symbol - Surface to get reference for"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidSurface"] - #[doc = " \\notefnerr"] - #[doc = " \\note_string_api_deprecation_50"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " \\ref ::cudaBindSurfaceToArray(const struct surfaceReference*, cudaArray_const_t, const struct cudaChannelFormatDesc*) \"cudaBindSurfaceToArray (C API)\","] - #[doc = " ::cuModuleGetSurfRef"] pub fn cudaGetSurfaceReference( surfref: *mut *const surfaceReference, symbol: *const ::std::os::raw::c_void, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Get the channel descriptor of an array"] - #[doc = ""] - #[doc = " Returns in \\p *desc the channel descriptor of the CUDA array \\p array."] - #[doc = ""] - #[doc = " \\param desc - Channel format"] - #[doc = " \\param array - Memory array on device"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa \\ref ::cudaCreateChannelDesc(int, int, int, int, cudaChannelFormatKind) \"cudaCreateChannelDesc (C API)\","] - #[doc = " ::cudaCreateTextureObject, ::cudaCreateSurfaceObject"] pub fn cudaGetChannelDesc( desc: *mut cudaChannelFormatDesc, array: cudaArray_const_t, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns a channel descriptor using the specified format"] - #[doc = ""] - #[doc = " Returns a channel descriptor with format \\p f and number of bits of each"] - #[doc = " component \\p x, \\p y, \\p z, and \\p w. The ::cudaChannelFormatDesc is"] - #[doc = " defined as:"] - #[doc = " \\code"] - #[doc = "struct cudaChannelFormatDesc {"] - #[doc = "int x, y, z, w;"] - #[doc = "enum cudaChannelFormatKind f;"] - #[doc = "};"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " where ::cudaChannelFormatKind is one of ::cudaChannelFormatKindSigned,"] - #[doc = " ::cudaChannelFormatKindUnsigned, or ::cudaChannelFormatKindFloat."] - #[doc = ""] - #[doc = " \\param x - X component"] - #[doc = " \\param y - Y component"] - #[doc = " \\param z - Z component"] - #[doc = " \\param w - W component"] - #[doc = " \\param f - Channel format"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " Channel descriptor with format \\p f"] - #[doc = ""] - #[doc = " \\sa \\ref ::cudaCreateChannelDesc(void) \"cudaCreateChannelDesc (C++ API)\","] - #[doc = " ::cudaGetChannelDesc, ::cudaCreateTextureObject, ::cudaCreateSurfaceObject"] pub fn cudaCreateChannelDesc( x: ::std::os::raw::c_int, y: ::std::os::raw::c_int, @@ -20713,216 +4817,6 @@ extern "C" { ) -> cudaChannelFormatDesc; } extern "C" { - #[doc = " \\brief Creates a texture object"] - #[doc = ""] - #[doc = " Creates a texture object and returns it in \\p pTexObject. \\p pResDesc describes"] - #[doc = " the data to texture from. \\p pTexDesc describes how the data should be sampled."] - #[doc = " \\p pResViewDesc is an optional argument that specifies an alternate format for"] - #[doc = " the data described by \\p pResDesc, and also describes the subresource region"] - #[doc = " to restrict access to when texturing. \\p pResViewDesc can only be specified if"] - #[doc = " the type of resource is a CUDA array or a CUDA mipmapped array."] - #[doc = ""] - #[doc = " Texture objects are only supported on devices of compute capability 3.0 or higher."] - #[doc = " Additionally, a texture object is an opaque value, and, as such, should only be"] - #[doc = " accessed through CUDA API calls."] - #[doc = ""] - #[doc = " The ::cudaResourceDesc structure is defined as:"] - #[doc = " \\code"] - #[doc = "struct cudaResourceDesc {"] - #[doc = "enum cudaResourceType resType;"] - #[doc = ""] - #[doc = "union {"] - #[doc = "struct {"] - #[doc = "cudaArray_t array;"] - #[doc = "} array;"] - #[doc = "struct {"] - #[doc = "cudaMipmappedArray_t mipmap;"] - #[doc = "} mipmap;"] - #[doc = "struct {"] - #[doc = "void *devPtr;"] - #[doc = "struct cudaChannelFormatDesc desc;"] - #[doc = "size_t sizeInBytes;"] - #[doc = "} linear;"] - #[doc = "struct {"] - #[doc = "void *devPtr;"] - #[doc = "struct cudaChannelFormatDesc desc;"] - #[doc = "size_t width;"] - #[doc = "size_t height;"] - #[doc = "size_t pitchInBytes;"] - #[doc = "} pitch2D;"] - #[doc = "} res;"] - #[doc = "};"] - #[doc = " \\endcode"] - #[doc = " where:"] - #[doc = " - ::cudaResourceDesc::resType specifies the type of resource to texture from."] - #[doc = " CUresourceType is defined as:"] - #[doc = " \\code"] - #[doc = "enum cudaResourceType {"] - #[doc = "cudaResourceTypeArray = 0x00,"] - #[doc = "cudaResourceTypeMipmappedArray = 0x01,"] - #[doc = "cudaResourceTypeLinear = 0x02,"] - #[doc = "cudaResourceTypePitch2D = 0x03"] - #[doc = "};"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " \\par"] - #[doc = " If ::cudaResourceDesc::resType is set to ::cudaResourceTypeArray, ::cudaResourceDesc::res::array::array"] - #[doc = " must be set to a valid CUDA array handle."] - #[doc = ""] - #[doc = " \\par"] - #[doc = " If ::cudaResourceDesc::resType is set to ::cudaResourceTypeMipmappedArray, ::cudaResourceDesc::res::mipmap::mipmap"] - #[doc = " must be set to a valid CUDA mipmapped array handle and ::cudaTextureDesc::normalizedCoords must be set to true."] - #[doc = ""] - #[doc = " \\par"] - #[doc = " If ::cudaResourceDesc::resType is set to ::cudaResourceTypeLinear, ::cudaResourceDesc::res::linear::devPtr"] - #[doc = " must be set to a valid device pointer, that is aligned to ::cudaDeviceProp::textureAlignment."] - #[doc = " ::cudaResourceDesc::res::linear::desc describes the format and the number of components per array element. ::cudaResourceDesc::res::linear::sizeInBytes"] - #[doc = " specifies the size of the array in bytes. The total number of elements in the linear address range cannot exceed"] - #[doc = " ::cudaDeviceProp::maxTexture1DLinear. The number of elements is computed as (sizeInBytes / sizeof(desc))."] - #[doc = ""] - #[doc = " \\par"] - #[doc = " If ::cudaResourceDesc::resType is set to ::cudaResourceTypePitch2D, ::cudaResourceDesc::res::pitch2D::devPtr"] - #[doc = " must be set to a valid device pointer, that is aligned to ::cudaDeviceProp::textureAlignment."] - #[doc = " ::cudaResourceDesc::res::pitch2D::desc describes the format and the number of components per array element. ::cudaResourceDesc::res::pitch2D::width"] - #[doc = " and ::cudaResourceDesc::res::pitch2D::height specify the width and height of the array in elements, and cannot exceed"] - #[doc = " ::cudaDeviceProp::maxTexture2DLinear[0] and ::cudaDeviceProp::maxTexture2DLinear[1] respectively."] - #[doc = " ::cudaResourceDesc::res::pitch2D::pitchInBytes specifies the pitch between two rows in bytes and has to be aligned to"] - #[doc = " ::cudaDeviceProp::texturePitchAlignment. Pitch cannot exceed ::cudaDeviceProp::maxTexture2DLinear[2]."] - #[doc = ""] - #[doc = ""] - #[doc = " The ::cudaTextureDesc struct is defined as"] - #[doc = " \\code"] - #[doc = "struct cudaTextureDesc {"] - #[doc = "enum cudaTextureAddressMode addressMode[3];"] - #[doc = "enum cudaTextureFilterMode filterMode;"] - #[doc = "enum cudaTextureReadMode readMode;"] - #[doc = "int sRGB;"] - #[doc = "float borderColor[4];"] - #[doc = "int normalizedCoords;"] - #[doc = "unsigned int maxAnisotropy;"] - #[doc = "enum cudaTextureFilterMode mipmapFilterMode;"] - #[doc = "float mipmapLevelBias;"] - #[doc = "float minMipmapLevelClamp;"] - #[doc = "float maxMipmapLevelClamp;"] - #[doc = "};"] - #[doc = " \\endcode"] - #[doc = " where"] - #[doc = " - ::cudaTextureDesc::addressMode specifies the addressing mode for each dimension of the texture data. ::cudaTextureAddressMode is defined as:"] - #[doc = " \\code"] - #[doc = "enum cudaTextureAddressMode {"] - #[doc = "cudaAddressModeWrap = 0,"] - #[doc = "cudaAddressModeClamp = 1,"] - #[doc = "cudaAddressModeMirror = 2,"] - #[doc = "cudaAddressModeBorder = 3"] - #[doc = "};"] - #[doc = " \\endcode"] - #[doc = " This is ignored if ::cudaResourceDesc::resType is ::cudaResourceTypeLinear. Also, if ::cudaTextureDesc::normalizedCoords"] - #[doc = " is set to zero, ::cudaAddressModeWrap and ::cudaAddressModeMirror won't be supported and will be switched to ::cudaAddressModeClamp."] - #[doc = ""] - #[doc = " - ::cudaTextureDesc::filterMode specifies the filtering mode to be used when fetching from the texture. ::cudaTextureFilterMode is defined as:"] - #[doc = " \\code"] - #[doc = "enum cudaTextureFilterMode {"] - #[doc = "cudaFilterModePoint = 0,"] - #[doc = "cudaFilterModeLinear = 1"] - #[doc = "};"] - #[doc = " \\endcode"] - #[doc = " This is ignored if ::cudaResourceDesc::resType is ::cudaResourceTypeLinear."] - #[doc = ""] - #[doc = " - ::cudaTextureDesc::readMode specifies whether integer data should be converted to floating point or not. ::cudaTextureReadMode is defined as:"] - #[doc = " \\code"] - #[doc = "enum cudaTextureReadMode {"] - #[doc = "cudaReadModeElementType = 0,"] - #[doc = "cudaReadModeNormalizedFloat = 1"] - #[doc = "};"] - #[doc = " \\endcode"] - #[doc = " Note that this applies only to 8-bit and 16-bit integer formats. 32-bit integer format would not be promoted, regardless of"] - #[doc = " whether or not this ::cudaTextureDesc::readMode is set ::cudaReadModeNormalizedFloat is specified."] - #[doc = ""] - #[doc = " - ::cudaTextureDesc::sRGB specifies whether sRGB to linear conversion should be performed during texture fetch."] - #[doc = ""] - #[doc = " - ::cudaTextureDesc::borderColor specifies the float values of color. where:"] - #[doc = " ::cudaTextureDesc::borderColor[0] contains value of 'R',"] - #[doc = " ::cudaTextureDesc::borderColor[1] contains value of 'G',"] - #[doc = " ::cudaTextureDesc::borderColor[2] contains value of 'B',"] - #[doc = " ::cudaTextureDesc::borderColor[3] contains value of 'A'"] - #[doc = " Note that application using integer border color values will need to these values to float."] - #[doc = " The values are set only when the addressing mode specified by ::cudaTextureDesc::addressMode is cudaAddressModeBorder."] - #[doc = ""] - #[doc = " - ::cudaTextureDesc::normalizedCoords specifies whether the texture coordinates will be normalized or not."] - #[doc = ""] - #[doc = " - ::cudaTextureDesc::maxAnisotropy specifies the maximum anistropy ratio to be used when doing anisotropic filtering. This value will be"] - #[doc = " clamped to the range [1,16]."] - #[doc = ""] - #[doc = " - ::cudaTextureDesc::mipmapFilterMode specifies the filter mode when the calculated mipmap level lies between two defined mipmap levels."] - #[doc = ""] - #[doc = " - ::cudaTextureDesc::mipmapLevelBias specifies the offset to be applied to the calculated mipmap level."] - #[doc = ""] - #[doc = " - ::cudaTextureDesc::minMipmapLevelClamp specifies the lower end of the mipmap level range to clamp access to."] - #[doc = ""] - #[doc = " - ::cudaTextureDesc::maxMipmapLevelClamp specifies the upper end of the mipmap level range to clamp access to."] - #[doc = ""] - #[doc = ""] - #[doc = " The ::cudaResourceViewDesc struct is defined as"] - #[doc = " \\code"] - #[doc = "struct cudaResourceViewDesc {"] - #[doc = "enum cudaResourceViewFormat format;"] - #[doc = "size_t width;"] - #[doc = "size_t height;"] - #[doc = "size_t depth;"] - #[doc = "unsigned int firstMipmapLevel;"] - #[doc = "unsigned int lastMipmapLevel;"] - #[doc = "unsigned int firstLayer;"] - #[doc = "unsigned int lastLayer;"] - #[doc = "};"] - #[doc = " \\endcode"] - #[doc = " where:"] - #[doc = " - ::cudaResourceViewDesc::format specifies how the data contained in the CUDA array or CUDA mipmapped array should"] - #[doc = " be interpreted. Note that this can incur a change in size of the texture data. If the resource view format is a block"] - #[doc = " compressed format, then the underlying CUDA array or CUDA mipmapped array has to have a 32-bit unsigned integer format"] - #[doc = " with 2 or 4 channels, depending on the block compressed format. For ex., BC1 and BC4 require the underlying CUDA array to have"] - #[doc = " a 32-bit unsigned int with 2 channels. The other BC formats require the underlying resource to have the same 32-bit unsigned int"] - #[doc = " format but with 4 channels."] - #[doc = ""] - #[doc = " - ::cudaResourceViewDesc::width specifies the new width of the texture data. If the resource view format is a block"] - #[doc = " compressed format, this value has to be 4 times the original width of the resource. For non block compressed formats,"] - #[doc = " this value has to be equal to that of the original resource."] - #[doc = ""] - #[doc = " - ::cudaResourceViewDesc::height specifies the new height of the texture data. If the resource view format is a block"] - #[doc = " compressed format, this value has to be 4 times the original height of the resource. For non block compressed formats,"] - #[doc = " this value has to be equal to that of the original resource."] - #[doc = ""] - #[doc = " - ::cudaResourceViewDesc::depth specifies the new depth of the texture data. This value has to be equal to that of the"] - #[doc = " original resource."] - #[doc = ""] - #[doc = " - ::cudaResourceViewDesc::firstMipmapLevel specifies the most detailed mipmap level. This will be the new mipmap level zero."] - #[doc = " For non-mipmapped resources, this value has to be zero.::cudaTextureDesc::minMipmapLevelClamp and ::cudaTextureDesc::maxMipmapLevelClamp"] - #[doc = " will be relative to this value. For ex., if the firstMipmapLevel is set to 2, and a minMipmapLevelClamp of 1.2 is specified,"] - #[doc = " then the actual minimum mipmap level clamp will be 3.2."] - #[doc = ""] - #[doc = " - ::cudaResourceViewDesc::lastMipmapLevel specifies the least detailed mipmap level. For non-mipmapped resources, this value"] - #[doc = " has to be zero."] - #[doc = ""] - #[doc = " - ::cudaResourceViewDesc::firstLayer specifies the first layer index for layered textures. This will be the new layer zero."] - #[doc = " For non-layered resources, this value has to be zero."] - #[doc = ""] - #[doc = " - ::cudaResourceViewDesc::lastLayer specifies the last layer index for layered textures. For non-layered resources,"] - #[doc = " this value has to be zero."] - #[doc = ""] - #[doc = ""] - #[doc = " \\param pTexObject - Texture object to create"] - #[doc = " \\param pResDesc - Resource descriptor"] - #[doc = " \\param pTexDesc - Texture descriptor"] - #[doc = " \\param pResViewDesc - Resource view descriptor"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaDestroyTextureObject,"] - #[doc = " ::cuTexObjectCreate"] pub fn cudaCreateTextureObject( pTexObject: *mut cudaTextureObject_t, pResDesc: *const cudaResourceDesc, @@ -20931,346 +4825,62 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Destroys a texture object"] - #[doc = ""] - #[doc = " Destroys the texture object specified by \\p texObject."] - #[doc = ""] - #[doc = " \\param texObject - Texture object to destroy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaCreateTextureObject,"] - #[doc = " ::cuTexObjectDestroy"] pub fn cudaDestroyTextureObject( texObject: cudaTextureObject_t, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns a texture object's resource descriptor"] - #[doc = ""] - #[doc = " Returns the resource descriptor for the texture object specified by \\p texObject."] - #[doc = ""] - #[doc = " \\param pResDesc - Resource descriptor"] - #[doc = " \\param texObject - Texture object"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaCreateTextureObject,"] - #[doc = " ::cuTexObjectGetResourceDesc"] pub fn cudaGetTextureObjectResourceDesc( pResDesc: *mut cudaResourceDesc, texObject: cudaTextureObject_t, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns a texture object's texture descriptor"] - #[doc = ""] - #[doc = " Returns the texture descriptor for the texture object specified by \\p texObject."] - #[doc = ""] - #[doc = " \\param pTexDesc - Texture descriptor"] - #[doc = " \\param texObject - Texture object"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaCreateTextureObject,"] - #[doc = " ::cuTexObjectGetTextureDesc"] pub fn cudaGetTextureObjectTextureDesc( pTexDesc: *mut cudaTextureDesc, texObject: cudaTextureObject_t, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns a texture object's resource view descriptor"] - #[doc = ""] - #[doc = " Returns the resource view descriptor for the texture object specified by \\p texObject."] - #[doc = " If no resource view was specified, ::cudaErrorInvalidValue is returned."] - #[doc = ""] - #[doc = " \\param pResViewDesc - Resource view descriptor"] - #[doc = " \\param texObject - Texture object"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaCreateTextureObject,"] - #[doc = " ::cuTexObjectGetResourceViewDesc"] pub fn cudaGetTextureObjectResourceViewDesc( pResViewDesc: *mut cudaResourceViewDesc, texObject: cudaTextureObject_t, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Creates a surface object"] - #[doc = ""] - #[doc = " Creates a surface object and returns it in \\p pSurfObject. \\p pResDesc describes"] - #[doc = " the data to perform surface load/stores on. ::cudaResourceDesc::resType must be"] - #[doc = " ::cudaResourceTypeArray and ::cudaResourceDesc::res::array::array"] - #[doc = " must be set to a valid CUDA array handle."] - #[doc = ""] - #[doc = " Surface objects are only supported on devices of compute capability 3.0 or higher."] - #[doc = " Additionally, a surface object is an opaque value, and, as such, should only be"] - #[doc = " accessed through CUDA API calls."] - #[doc = ""] - #[doc = " \\param pSurfObject - Surface object to create"] - #[doc = " \\param pResDesc - Resource descriptor"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidChannelDescriptor,"] - #[doc = " ::cudaErrorInvalidResourceHandle"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaDestroySurfaceObject,"] - #[doc = " ::cuSurfObjectCreate"] pub fn cudaCreateSurfaceObject( pSurfObject: *mut cudaSurfaceObject_t, pResDesc: *const cudaResourceDesc, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Destroys a surface object"] - #[doc = ""] - #[doc = " Destroys the surface object specified by \\p surfObject."] - #[doc = ""] - #[doc = " \\param surfObject - Surface object to destroy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaCreateSurfaceObject,"] - #[doc = " ::cuSurfObjectDestroy"] pub fn cudaDestroySurfaceObject( surfObject: cudaSurfaceObject_t, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns a surface object's resource descriptor"] - #[doc = " Returns the resource descriptor for the surface object specified by \\p surfObject."] - #[doc = ""] - #[doc = " \\param pResDesc - Resource descriptor"] - #[doc = " \\param surfObject - Surface object"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaCreateSurfaceObject,"] - #[doc = " ::cuSurfObjectGetResourceDesc"] pub fn cudaGetSurfaceObjectResourceDesc( pResDesc: *mut cudaResourceDesc, surfObject: cudaSurfaceObject_t, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns the latest version of CUDA supported by the driver"] - #[doc = ""] - #[doc = " Returns in \\p *driverVersion the latest version of CUDA supported by"] - #[doc = " the driver. The version is returned as (1000 × major + 10 × minor)."] - #[doc = " For example, CUDA 9.2 would be represented by 9020. If no driver is installed,"] - #[doc = " then 0 is returned as the driver version."] - #[doc = ""] - #[doc = " This function automatically returns ::cudaErrorInvalidValue"] - #[doc = " if \\p driverVersion is NULL."] - #[doc = ""] - #[doc = " \\param driverVersion - Returns the CUDA driver version."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaRuntimeGetVersion,"] - #[doc = " ::cuDriverGetVersion"] pub fn cudaDriverGetVersion( driverVersion: *mut ::std::os::raw::c_int, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns the CUDA Runtime version"] - #[doc = ""] - #[doc = " Returns in \\p *runtimeVersion the version number of the current CUDA"] - #[doc = " Runtime instance. The version is returned as"] - #[doc = " (1000 × major + 10 × minor). For example,"] - #[doc = " CUDA 9.2 would be represented by 9020."] - #[doc = ""] - #[doc = " This function automatically returns ::cudaErrorInvalidValue if"] - #[doc = " the \\p runtimeVersion argument is NULL."] - #[doc = ""] - #[doc = " \\param runtimeVersion - Returns the CUDA Runtime version."] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaDriverGetVersion,"] - #[doc = " ::cuDriverGetVersion"] pub fn cudaRuntimeGetVersion( runtimeVersion: *mut ::std::os::raw::c_int, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Creates a graph"] - #[doc = ""] - #[doc = " Creates an empty graph, which is returned via \\p pGraph."] - #[doc = ""] - #[doc = " \\param pGraph - Returns newly created graph"] - #[doc = " \\param flags - Graph creation flags, must be 0"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorMemoryAllocation"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphAddChildGraphNode,"] - #[doc = " ::cudaGraphAddEmptyNode,"] - #[doc = " ::cudaGraphAddKernelNode,"] - #[doc = " ::cudaGraphAddHostNode,"] - #[doc = " ::cudaGraphAddMemcpyNode,"] - #[doc = " ::cudaGraphAddMemsetNode,"] - #[doc = " ::cudaGraphInstantiate,"] - #[doc = " ::cudaGraphDestroy,"] - #[doc = " ::cudaGraphGetNodes,"] - #[doc = " ::cudaGraphGetRootNodes,"] - #[doc = " ::cudaGraphGetEdges,"] - #[doc = " ::cudaGraphClone"] pub fn cudaGraphCreate( pGraph: *mut cudaGraph_t, flags: ::std::os::raw::c_uint, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Creates a kernel execution node and adds it to a graph"] - #[doc = ""] - #[doc = " Creates a new kernel execution node and adds it to \\p graph with \\p numDependencies"] - #[doc = " dependencies specified via \\p pDependencies and arguments specified in \\p pNodeParams."] - #[doc = " It is possible for \\p numDependencies to be 0, in which case the node will be placed"] - #[doc = " at the root of the graph. \\p pDependencies may not have any duplicate entries."] - #[doc = " A handle to the new node will be returned in \\p pGraphNode."] - #[doc = ""] - #[doc = " The cudaKernelNodeParams structure is defined as:"] - #[doc = ""] - #[doc = " \\code"] - #[doc = " struct cudaKernelNodeParams"] - #[doc = " {"] - #[doc = " void* func;"] - #[doc = " dim3 gridDim;"] - #[doc = " dim3 blockDim;"] - #[doc = " unsigned int sharedMemBytes;"] - #[doc = " void **kernelParams;"] - #[doc = " void **extra;"] - #[doc = " };"] - #[doc = " \\endcode"] - #[doc = ""] - #[doc = " When the graph is launched, the node will invoke kernel \\p func on a (\\p gridDim.x x"] - #[doc = " \\p gridDim.y x \\p gridDim.z) grid of blocks. Each block contains"] - #[doc = " (\\p blockDim.x x \\p blockDim.y x \\p blockDim.z) threads."] - #[doc = ""] - #[doc = " \\p sharedMem sets the amount of dynamic shared memory that will be"] - #[doc = " available to each thread block."] - #[doc = ""] - #[doc = " Kernel parameters to \\p func can be specified in one of two ways:"] - #[doc = ""] - #[doc = " 1) Kernel parameters can be specified via \\p kernelParams. If the kernel has N"] - #[doc = " parameters, then \\p kernelParams needs to be an array of N pointers. Each pointer,"] - #[doc = " from \\p kernelParams[0] to \\p kernelParams[N-1], points to the region of memory from which the actual"] - #[doc = " parameter will be copied. The number of kernel parameters and their offsets and sizes do not need"] - #[doc = " to be specified as that information is retrieved directly from the kernel's image."] - #[doc = ""] - #[doc = " 2) Kernel parameters can also be packaged by the application into a single buffer that is passed in"] - #[doc = " via \\p extra. This places the burden on the application of knowing each kernel"] - #[doc = " parameter's size and alignment/padding within the buffer. The \\p extra parameter exists"] - #[doc = " to allow this function to take additional less commonly used arguments. \\p extra specifies"] - #[doc = " a list of names of extra settings and their corresponding values. Each extra setting name is"] - #[doc = " immediately followed by the corresponding value. The list must be terminated with either NULL or"] - #[doc = " CU_LAUNCH_PARAM_END."] - #[doc = ""] - #[doc = " - ::CU_LAUNCH_PARAM_END, which indicates the end of the \\p extra"] - #[doc = " array;"] - #[doc = " - ::CU_LAUNCH_PARAM_BUFFER_POINTER, which specifies that the next"] - #[doc = " value in \\p extra will be a pointer to a buffer"] - #[doc = " containing all the kernel parameters for launching kernel"] - #[doc = " \\p func;"] - #[doc = " - ::CU_LAUNCH_PARAM_BUFFER_SIZE, which specifies that the next"] - #[doc = " value in \\p extra will be a pointer to a size_t"] - #[doc = " containing the size of the buffer specified with"] - #[doc = " ::CU_LAUNCH_PARAM_BUFFER_POINTER;"] - #[doc = ""] - #[doc = " The error ::cudaErrorInvalidValue will be returned if kernel parameters are specified with both"] - #[doc = " \\p kernelParams and \\p extra (i.e. both \\p kernelParams and"] - #[doc = " \\p extra are non-NULL)."] - #[doc = ""] - #[doc = " The \\p kernelParams or \\p extra array, as well as the argument values it points to,"] - #[doc = " are copied during this call."] - #[doc = ""] - #[doc = " \\note Kernels launched using graphs must not use texture and surface references. Reading or"] - #[doc = " writing through any texture or surface reference is undefined behavior."] - #[doc = " This restriction does not apply to texture and surface objects."] - #[doc = ""] - #[doc = " \\param pGraphNode - Returns newly created node"] - #[doc = " \\param graph - Graph to which to add the node"] - #[doc = " \\param pDependencies - Dependencies of the node"] - #[doc = " \\param numDependencies - Number of dependencies"] - #[doc = " \\param pNodeParams - Parameters for the GPU execution node"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidDeviceFunction"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaLaunchKernel,"] - #[doc = " ::cudaGraphKernelNodeGetParams,"] - #[doc = " ::cudaGraphKernelNodeSetParams,"] - #[doc = " ::cudaGraphCreate,"] - #[doc = " ::cudaGraphDestroyNode,"] - #[doc = " ::cudaGraphAddChildGraphNode,"] - #[doc = " ::cudaGraphAddEmptyNode,"] - #[doc = " ::cudaGraphAddHostNode,"] - #[doc = " ::cudaGraphAddMemcpyNode,"] - #[doc = " ::cudaGraphAddMemsetNode"] pub fn cudaGraphAddKernelNode( pGraphNode: *mut cudaGraphNode_t, graph: cudaGraph_t, @@ -21280,108 +4890,18 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns a kernel node's parameters"] - #[doc = ""] - #[doc = " Returns the parameters of kernel node \\p node in \\p pNodeParams."] - #[doc = " The \\p kernelParams or \\p extra array returned in \\p pNodeParams,"] - #[doc = " as well as the argument values it points to, are owned by the node."] - #[doc = " This memory remains valid until the node is destroyed or its"] - #[doc = " parameters are modified, and should not be modified"] - #[doc = " directly. Use ::cudaGraphKernelNodeSetParams to update the"] - #[doc = " parameters of this node."] - #[doc = ""] - #[doc = " The params will contain either \\p kernelParams or \\p extra,"] - #[doc = " according to which of these was most recently set on the node."] - #[doc = ""] - #[doc = " \\param node - Node to get the parameters for"] - #[doc = " \\param pNodeParams - Pointer to return the parameters"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidDeviceFunction"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaLaunchKernel,"] - #[doc = " ::cudaGraphAddKernelNode,"] - #[doc = " ::cudaGraphKernelNodeSetParams"] pub fn cudaGraphKernelNodeGetParams( node: cudaGraphNode_t, pNodeParams: *mut cudaKernelNodeParams, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Sets a kernel node's parameters"] - #[doc = ""] - #[doc = " Sets the parameters of kernel node \\p node to \\p pNodeParams."] - #[doc = ""] - #[doc = " \\param node - Node to set the parameters for"] - #[doc = " \\param pNodeParams - Parameters to copy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidResourceHandle,"] - #[doc = " ::cudaErrorMemoryAllocation"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaLaunchKernel,"] - #[doc = " ::cudaGraphAddKernelNode,"] - #[doc = " ::cudaGraphKernelNodeGetParams"] pub fn cudaGraphKernelNodeSetParams( node: cudaGraphNode_t, pNodeParams: *const cudaKernelNodeParams, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Creates a memcpy node and adds it to a graph"] - #[doc = ""] - #[doc = " Creates a new memcpy node and adds it to \\p graph with \\p numDependencies"] - #[doc = " dependencies specified via \\p pDependencies."] - #[doc = " It is possible for \\p numDependencies to be 0, in which case the node will be placed"] - #[doc = " at the root of the graph. \\p pDependencies may not have any duplicate entries."] - #[doc = " A handle to the new node will be returned in \\p pGraphNode."] - #[doc = ""] - #[doc = " When the graph is launched, the node will perform the memcpy described by \\p pCopyParams."] - #[doc = " See ::cudaMemcpy3D() for a description of the structure and its restrictions."] - #[doc = ""] - #[doc = " Memcpy nodes have some additional restrictions with regards to managed memory, if the"] - #[doc = " system contains at least one device which has a zero value for the device attribute"] - #[doc = " ::cudaDevAttrConcurrentManagedAccess."] - #[doc = ""] - #[doc = " \\param pGraphNode - Returns newly created node"] - #[doc = " \\param graph - Graph to which to add the node"] - #[doc = " \\param pDependencies - Dependencies of the node"] - #[doc = " \\param numDependencies - Number of dependencies"] - #[doc = " \\param pCopyParams - Parameters for the memory copy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaMemcpy3D,"] - #[doc = " ::cudaGraphMemcpyNodeGetParams,"] - #[doc = " ::cudaGraphMemcpyNodeSetParams,"] - #[doc = " ::cudaGraphCreate,"] - #[doc = " ::cudaGraphDestroyNode,"] - #[doc = " ::cudaGraphAddChildGraphNode,"] - #[doc = " ::cudaGraphAddEmptyNode,"] - #[doc = " ::cudaGraphAddKernelNode,"] - #[doc = " ::cudaGraphAddHostNode,"] - #[doc = " ::cudaGraphAddMemsetNode"] pub fn cudaGraphAddMemcpyNode( pGraphNode: *mut cudaGraphNode_t, graph: cudaGraph_t, @@ -21391,93 +4911,18 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns a memcpy node's parameters"] - #[doc = ""] - #[doc = " Returns the parameters of memcpy node \\p node in \\p pNodeParams."] - #[doc = ""] - #[doc = " \\param node - Node to get the parameters for"] - #[doc = " \\param pNodeParams - Pointer to return the parameters"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaMemcpy3D,"] - #[doc = " ::cudaGraphAddMemcpyNode,"] - #[doc = " ::cudaGraphMemcpyNodeSetParams"] pub fn cudaGraphMemcpyNodeGetParams( node: cudaGraphNode_t, pNodeParams: *mut cudaMemcpy3DParms, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Sets a memcpy node's parameters"] - #[doc = ""] - #[doc = " Sets the parameters of memcpy node \\p node to \\p pNodeParams."] - #[doc = ""] - #[doc = " \\param node - Node to set the parameters for"] - #[doc = " \\param pNodeParams - Parameters to copy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaMemcpy3D,"] - #[doc = " ::cudaGraphAddMemcpyNode,"] - #[doc = " ::cudaGraphMemcpyNodeGetParams"] pub fn cudaGraphMemcpyNodeSetParams( node: cudaGraphNode_t, pNodeParams: *const cudaMemcpy3DParms, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Creates a memset node and adds it to a graph"] - #[doc = ""] - #[doc = " Creates a new memset node and adds it to \\p graph with \\p numDependencies"] - #[doc = " dependencies specified via \\p pDependencies."] - #[doc = " It is possible for \\p numDependencies to be 0, in which case the node will be placed"] - #[doc = " at the root of the graph. \\p pDependencies may not have any duplicate entries."] - #[doc = " A handle to the new node will be returned in \\p pGraphNode."] - #[doc = ""] - #[doc = " The element size must be 1, 2, or 4 bytes."] - #[doc = " When the graph is launched, the node will perform the memset described by \\p pMemsetParams."] - #[doc = ""] - #[doc = " \\param pGraphNode - Returns newly created node"] - #[doc = " \\param graph - Graph to which to add the node"] - #[doc = " \\param pDependencies - Dependencies of the node"] - #[doc = " \\param numDependencies - Number of dependencies"] - #[doc = " \\param pMemsetParams - Parameters for the memory set"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorInvalidDevice"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaMemset2D,"] - #[doc = " ::cudaGraphMemsetNodeGetParams,"] - #[doc = " ::cudaGraphMemsetNodeSetParams,"] - #[doc = " ::cudaGraphCreate,"] - #[doc = " ::cudaGraphDestroyNode,"] - #[doc = " ::cudaGraphAddChildGraphNode,"] - #[doc = " ::cudaGraphAddEmptyNode,"] - #[doc = " ::cudaGraphAddKernelNode,"] - #[doc = " ::cudaGraphAddHostNode,"] - #[doc = " ::cudaGraphAddMemcpyNode"] pub fn cudaGraphAddMemsetNode( pGraphNode: *mut cudaGraphNode_t, graph: cudaGraph_t, @@ -21487,93 +4932,18 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns a memset node's parameters"] - #[doc = ""] - #[doc = " Returns the parameters of memset node \\p node in \\p pNodeParams."] - #[doc = ""] - #[doc = " \\param node - Node to get the parameters for"] - #[doc = " \\param pNodeParams - Pointer to return the parameters"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaMemset2D,"] - #[doc = " ::cudaGraphAddMemsetNode,"] - #[doc = " ::cudaGraphMemsetNodeSetParams"] pub fn cudaGraphMemsetNodeGetParams( node: cudaGraphNode_t, pNodeParams: *mut cudaMemsetParams, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Sets a memset node's parameters"] - #[doc = ""] - #[doc = " Sets the parameters of memset node \\p node to \\p pNodeParams."] - #[doc = ""] - #[doc = " \\param node - Node to set the parameters for"] - #[doc = " \\param pNodeParams - Parameters to copy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaMemset2D,"] - #[doc = " ::cudaGraphAddMemsetNode,"] - #[doc = " ::cudaGraphMemsetNodeGetParams"] pub fn cudaGraphMemsetNodeSetParams( node: cudaGraphNode_t, pNodeParams: *const cudaMemsetParams, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Creates a host execution node and adds it to a graph"] - #[doc = ""] - #[doc = " Creates a new CPU execution node and adds it to \\p graph with \\p numDependencies"] - #[doc = " dependencies specified via \\p pDependencies and arguments specified in \\p pNodeParams."] - #[doc = " It is possible for \\p numDependencies to be 0, in which case the node will be placed"] - #[doc = " at the root of the graph. \\p pDependencies may not have any duplicate entries."] - #[doc = " A handle to the new node will be returned in \\p pGraphNode."] - #[doc = ""] - #[doc = " When the graph is launched, the node will invoke the specified CPU function."] - #[doc = " Host nodes are not supported under MPS with pre-Volta GPUs."] - #[doc = ""] - #[doc = " \\param pGraphNode - Returns newly created node"] - #[doc = " \\param graph - Graph to which to add the node"] - #[doc = " \\param pDependencies - Dependencies of the node"] - #[doc = " \\param numDependencies - Number of dependencies"] - #[doc = " \\param pNodeParams - Parameters for the host node"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorNotSupported,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaLaunchHostFunc,"] - #[doc = " ::cudaGraphHostNodeGetParams,"] - #[doc = " ::cudaGraphHostNodeSetParams,"] - #[doc = " ::cudaGraphCreate,"] - #[doc = " ::cudaGraphDestroyNode,"] - #[doc = " ::cudaGraphAddChildGraphNode,"] - #[doc = " ::cudaGraphAddEmptyNode,"] - #[doc = " ::cudaGraphAddKernelNode,"] - #[doc = " ::cudaGraphAddMemcpyNode,"] - #[doc = " ::cudaGraphAddMemsetNode"] pub fn cudaGraphAddHostNode( pGraphNode: *mut cudaGraphNode_t, graph: cudaGraph_t, @@ -21583,90 +4953,18 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns a host node's parameters"] - #[doc = ""] - #[doc = " Returns the parameters of host node \\p node in \\p pNodeParams."] - #[doc = ""] - #[doc = " \\param node - Node to get the parameters for"] - #[doc = " \\param pNodeParams - Pointer to return the parameters"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaLaunchHostFunc,"] - #[doc = " ::cudaGraphAddHostNode,"] - #[doc = " ::cudaGraphHostNodeSetParams"] pub fn cudaGraphHostNodeGetParams( node: cudaGraphNode_t, pNodeParams: *mut cudaHostNodeParams, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Sets a host node's parameters"] - #[doc = ""] - #[doc = " Sets the parameters of host node \\p node to \\p nodeParams."] - #[doc = ""] - #[doc = " \\param node - Node to set the parameters for"] - #[doc = " \\param pNodeParams - Parameters to copy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaLaunchHostFunc,"] - #[doc = " ::cudaGraphAddHostNode,"] - #[doc = " ::cudaGraphHostNodeGetParams"] pub fn cudaGraphHostNodeSetParams( node: cudaGraphNode_t, pNodeParams: *const cudaHostNodeParams, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Creates a child graph node and adds it to a graph"] - #[doc = ""] - #[doc = " Creates a new node which executes an embedded graph, and adds it to \\p graph with"] - #[doc = " \\p numDependencies dependencies specified via \\p pDependencies."] - #[doc = " It is possible for \\p numDependencies to be 0, in which case the node will be placed"] - #[doc = " at the root of the graph. \\p pDependencies may not have any duplicate entries."] - #[doc = " A handle to the new node will be returned in \\p pGraphNode."] - #[doc = ""] - #[doc = " The node executes an embedded child graph. The child graph is cloned in this call."] - #[doc = ""] - #[doc = " \\param pGraphNode - Returns newly created node"] - #[doc = " \\param graph - Graph to which to add the node"] - #[doc = " \\param pDependencies - Dependencies of the node"] - #[doc = " \\param numDependencies - Number of dependencies"] - #[doc = " \\param childGraph - The graph to clone into this node"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphChildGraphNodeGetGraph,"] - #[doc = " ::cudaGraphCreate,"] - #[doc = " ::cudaGraphDestroyNode,"] - #[doc = " ::cudaGraphAddEmptyNode,"] - #[doc = " ::cudaGraphAddKernelNode,"] - #[doc = " ::cudaGraphAddHostNode,"] - #[doc = " ::cudaGraphAddMemcpyNode,"] - #[doc = " ::cudaGraphAddMemsetNode,"] - #[doc = " ::cudaGraphClone"] pub fn cudaGraphAddChildGraphNode( pGraphNode: *mut cudaGraphNode_t, graph: cudaGraph_t, @@ -21676,65 +4974,12 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Gets a handle to the embedded graph of a child graph node"] - #[doc = ""] - #[doc = " Gets a handle to the embedded graph in a child graph node. This call"] - #[doc = " does not clone the graph. Changes to the graph will be reflected in"] - #[doc = " the node, and the node retains ownership of the graph."] - #[doc = ""] - #[doc = " \\param node - Node to get the embedded graph for"] - #[doc = " \\param pGraph - Location to store a handle to the graph"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphAddChildGraphNode,"] - #[doc = " ::cudaGraphNodeFindInClone"] pub fn cudaGraphChildGraphNodeGetGraph( node: cudaGraphNode_t, pGraph: *mut cudaGraph_t, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Creates an empty node and adds it to a graph"] - #[doc = ""] - #[doc = " Creates a new node which performs no operation, and adds it to \\p graph with"] - #[doc = " \\p numDependencies dependencies specified via \\p pDependencies."] - #[doc = " It is possible for \\p numDependencies to be 0, in which case the node will be placed"] - #[doc = " at the root of the graph. \\p pDependencies may not have any duplicate entries."] - #[doc = " A handle to the new node will be returned in \\p pGraphNode."] - #[doc = ""] - #[doc = " An empty node performs no operation during execution, but can be used for"] - #[doc = " transitive ordering. For example, a phased execution graph with 2 groups of n"] - #[doc = " nodes with a barrier between them can be represented using an empty node and"] - #[doc = " 2*n dependency edges, rather than no empty node and n^2 dependency edges."] - #[doc = ""] - #[doc = " \\param pGraphNode - Returns newly created node"] - #[doc = " \\param graph - Graph to which to add the node"] - #[doc = " \\param pDependencies - Dependencies of the node"] - #[doc = " \\param numDependencies - Number of dependencies"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphCreate,"] - #[doc = " ::cudaGraphDestroyNode,"] - #[doc = " ::cudaGraphAddChildGraphNode,"] - #[doc = " ::cudaGraphAddKernelNode,"] - #[doc = " ::cudaGraphAddHostNode,"] - #[doc = " ::cudaGraphAddMemcpyNode,"] - #[doc = " ::cudaGraphAddMemsetNode"] pub fn cudaGraphAddEmptyNode( pGraphNode: *mut cudaGraphNode_t, graph: cudaGraph_t, @@ -21743,59 +4988,12 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Clones a graph"] - #[doc = ""] - #[doc = " This function creates a copy of \\p originalGraph and returns it in \\p pGraphClone."] - #[doc = " All parameters are copied into the cloned graph. The original graph may be modified"] - #[doc = " after this call without affecting the clone."] - #[doc = ""] - #[doc = " Child graph nodes in the original graph are recursively copied into the clone."] - #[doc = ""] - #[doc = " \\param pGraphClone - Returns newly created cloned graph"] - #[doc = " \\param originalGraph - Graph to clone"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " ::cudaErrorMemoryAllocation"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphCreate,"] - #[doc = " ::cudaGraphNodeFindInClone"] pub fn cudaGraphClone( pGraphClone: *mut cudaGraph_t, originalGraph: cudaGraph_t, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Finds a cloned version of a node"] - #[doc = ""] - #[doc = " This function returns the node in \\p clonedGraph corresponding to \\p originalNode"] - #[doc = " in the original graph."] - #[doc = ""] - #[doc = " \\p clonedGraph must have been cloned from \\p originalGraph via ::cudaGraphClone."] - #[doc = " \\p originalNode must have been in \\p originalGraph at the time of the call to"] - #[doc = " ::cudaGraphClone, and the corresponding cloned node in \\p clonedGraph must not have"] - #[doc = " been removed. The cloned node is then returned via \\p pClonedNode."] - #[doc = ""] - #[doc = " \\param pNode - Returns handle to the cloned node"] - #[doc = " \\param originalNode - Handle to the original node"] - #[doc = " \\param clonedGraph - Cloned graph to query"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphClone"] pub fn cudaGraphNodeFindInClone( pNode: *mut cudaGraphNode_t, originalNode: cudaGraphNode_t, @@ -21803,66 +5001,12 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns a node's type"] - #[doc = ""] - #[doc = " Returns the node type of \\p node in \\p pType."] - #[doc = ""] - #[doc = " \\param node - Node to query"] - #[doc = " \\param pType - Pointer to return the node type"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphGetNodes,"] - #[doc = " ::cudaGraphGetRootNodes,"] - #[doc = " ::cudaGraphChildGraphNodeGetGraph,"] - #[doc = " ::cudaGraphKernelNodeGetParams,"] - #[doc = " ::cudaGraphKernelNodeSetParams,"] - #[doc = " ::cudaGraphHostNodeGetParams,"] - #[doc = " ::cudaGraphHostNodeSetParams,"] - #[doc = " ::cudaGraphMemcpyNodeGetParams,"] - #[doc = " ::cudaGraphMemcpyNodeSetParams,"] - #[doc = " ::cudaGraphMemsetNodeGetParams,"] - #[doc = " ::cudaGraphMemsetNodeSetParams"] pub fn cudaGraphNodeGetType( node: cudaGraphNode_t, pType: *mut cudaGraphNodeType, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns a graph's nodes"] - #[doc = ""] - #[doc = " Returns a list of \\p graph's nodes. \\p nodes may be NULL, in which case this"] - #[doc = " function will return the number of nodes in \\p numNodes. Otherwise,"] - #[doc = " \\p numNodes entries will be filled in. If \\p numNodes is higher than the actual"] - #[doc = " number of nodes, the remaining entries in \\p nodes will be set to NULL, and the"] - #[doc = " number of nodes actually obtained will be returned in \\p numNodes."] - #[doc = ""] - #[doc = " \\param graph - Graph to query"] - #[doc = " \\param nodes - Pointer to return the nodes"] - #[doc = " \\param numNodes - See description"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphCreate,"] - #[doc = " ::cudaGraphGetRootNodes,"] - #[doc = " ::cudaGraphGetEdges,"] - #[doc = " ::cudaGraphNodeGetType,"] - #[doc = " ::cudaGraphNodeGetDependencies,"] - #[doc = " ::cudaGraphNodeGetDependentNodes"] pub fn cudaGraphGetNodes( graph: cudaGraph_t, nodes: *mut cudaGraphNode_t, @@ -21870,33 +5014,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns a graph's root nodes"] - #[doc = ""] - #[doc = " Returns a list of \\p graph's root nodes. \\p pRootNodes may be NULL, in which case this"] - #[doc = " function will return the number of root nodes in \\p pNumRootNodes. Otherwise,"] - #[doc = " \\p pNumRootNodes entries will be filled in. If \\p pNumRootNodes is higher than the actual"] - #[doc = " number of root nodes, the remaining entries in \\p pRootNodes will be set to NULL, and the"] - #[doc = " number of nodes actually obtained will be returned in \\p pNumRootNodes."] - #[doc = ""] - #[doc = " \\param graph - Graph to query"] - #[doc = " \\param pRootNodes - Pointer to return the root nodes"] - #[doc = " \\param pNumRootNodes - See description"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphCreate,"] - #[doc = " ::cudaGraphGetNodes,"] - #[doc = " ::cudaGraphGetEdges,"] - #[doc = " ::cudaGraphNodeGetType,"] - #[doc = " ::cudaGraphNodeGetDependencies,"] - #[doc = " ::cudaGraphNodeGetDependentNodes"] pub fn cudaGraphGetRootNodes( graph: cudaGraph_t, pRootNodes: *mut cudaGraphNode_t, @@ -21904,36 +5021,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns a graph's dependency edges"] - #[doc = ""] - #[doc = " Returns a list of \\p graph's dependency edges. Edges are returned via corresponding"] - #[doc = " indices in \\p from and \\p to; that is, the node in \\p to[i] has a dependency on the"] - #[doc = " node in \\p from[i]. \\p from and \\p to may both be NULL, in which"] - #[doc = " case this function only returns the number of edges in \\p numEdges. Otherwise,"] - #[doc = " \\p numEdges entries will be filled in. If \\p numEdges is higher than the actual"] - #[doc = " number of edges, the remaining entries in \\p from and \\p to will be set to NULL, and"] - #[doc = " the number of edges actually returned will be written to \\p numEdges."] - #[doc = ""] - #[doc = " \\param graph - Graph to get the edges from"] - #[doc = " \\param from - Location to return edge endpoints"] - #[doc = " \\param to - Location to return edge endpoints"] - #[doc = " \\param numEdges - See description"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphGetNodes,"] - #[doc = " ::cudaGraphGetRootNodes,"] - #[doc = " ::cudaGraphAddDependencies,"] - #[doc = " ::cudaGraphRemoveDependencies,"] - #[doc = " ::cudaGraphNodeGetDependencies,"] - #[doc = " ::cudaGraphNodeGetDependentNodes"] pub fn cudaGraphGetEdges( graph: cudaGraph_t, from: *mut cudaGraphNode_t, @@ -21942,33 +5029,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns a node's dependencies"] - #[doc = ""] - #[doc = " Returns a list of \\p node's dependencies. \\p pDependencies may be NULL, in which case this"] - #[doc = " function will return the number of dependencies in \\p pNumDependencies. Otherwise,"] - #[doc = " \\p pNumDependencies entries will be filled in. If \\p pNumDependencies is higher than the actual"] - #[doc = " number of dependencies, the remaining entries in \\p pDependencies will be set to NULL, and the"] - #[doc = " number of nodes actually obtained will be returned in \\p pNumDependencies."] - #[doc = ""] - #[doc = " \\param node - Node to query"] - #[doc = " \\param pDependencies - Pointer to return the dependencies"] - #[doc = " \\param pNumDependencies - See description"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphNodeGetDependentNodes,"] - #[doc = " ::cudaGraphGetNodes,"] - #[doc = " ::cudaGraphGetRootNodes,"] - #[doc = " ::cudaGraphGetEdges,"] - #[doc = " ::cudaGraphAddDependencies,"] - #[doc = " ::cudaGraphRemoveDependencies"] pub fn cudaGraphNodeGetDependencies( node: cudaGraphNode_t, pDependencies: *mut cudaGraphNode_t, @@ -21976,34 +5036,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Returns a node's dependent nodes"] - #[doc = ""] - #[doc = " Returns a list of \\p node's dependent nodes. \\p pDependentNodes may be NULL, in which"] - #[doc = " case this function will return the number of dependent nodes in \\p pNumDependentNodes."] - #[doc = " Otherwise, \\p pNumDependentNodes entries will be filled in. If \\p pNumDependentNodes is"] - #[doc = " higher than the actual number of dependent nodes, the remaining entries in"] - #[doc = " \\p pDependentNodes will be set to NULL, and the number of nodes actually obtained will"] - #[doc = " be returned in \\p pNumDependentNodes."] - #[doc = ""] - #[doc = " \\param node - Node to query"] - #[doc = " \\param pDependentNodes - Pointer to return the dependent nodes"] - #[doc = " \\param pNumDependentNodes - See description"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphNodeGetDependencies,"] - #[doc = " ::cudaGraphGetNodes,"] - #[doc = " ::cudaGraphGetRootNodes,"] - #[doc = " ::cudaGraphGetEdges,"] - #[doc = " ::cudaGraphAddDependencies,"] - #[doc = " ::cudaGraphRemoveDependencies"] pub fn cudaGraphNodeGetDependentNodes( node: cudaGraphNode_t, pDependentNodes: *mut cudaGraphNode_t, @@ -22011,33 +5043,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Adds dependency edges to a graph."] - #[doc = ""] - #[doc = " The number of dependencies to be added is defined by \\p numDependencies"] - #[doc = " Elements in \\p pFrom and \\p pTo at corresponding indices define a dependency."] - #[doc = " Each node in \\p pFrom and \\p pTo must belong to \\p graph."] - #[doc = ""] - #[doc = " If \\p numDependencies is 0, elements in \\p pFrom and \\p pTo will be ignored."] - #[doc = " Specifying an existing dependency will return an error."] - #[doc = ""] - #[doc = " \\param graph - Graph to which dependencies are added"] - #[doc = " \\param from - Array of nodes that provide the dependencies"] - #[doc = " \\param to - Array of dependent nodes"] - #[doc = " \\param numDependencies - Number of dependencies to be added"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphRemoveDependencies,"] - #[doc = " ::cudaGraphGetEdges,"] - #[doc = " ::cudaGraphNodeGetDependencies,"] - #[doc = " ::cudaGraphNodeGetDependentNodes"] pub fn cudaGraphAddDependencies( graph: cudaGraph_t, from: *const cudaGraphNode_t, @@ -22046,33 +5051,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Removes dependency edges from a graph."] - #[doc = ""] - #[doc = " The number of \\p pDependencies to be removed is defined by \\p numDependencies."] - #[doc = " Elements in \\p pFrom and \\p pTo at corresponding indices define a dependency."] - #[doc = " Each node in \\p pFrom and \\p pTo must belong to \\p graph."] - #[doc = ""] - #[doc = " If \\p numDependencies is 0, elements in \\p pFrom and \\p pTo will be ignored."] - #[doc = " Specifying a non-existing dependency will return an error."] - #[doc = ""] - #[doc = " \\param graph - Graph from which to remove dependencies"] - #[doc = " \\param from - Array of nodes that provide the dependencies"] - #[doc = " \\param to - Array of dependent nodes"] - #[doc = " \\param numDependencies - Number of dependencies to be removed"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphAddDependencies,"] - #[doc = " ::cudaGraphGetEdges,"] - #[doc = " ::cudaGraphNodeGetDependencies,"] - #[doc = " ::cudaGraphNodeGetDependentNodes"] pub fn cudaGraphRemoveDependencies( graph: cudaGraph_t, from: *const cudaGraphNode_t, @@ -22081,63 +5059,9 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Remove a node from the graph"] - #[doc = ""] - #[doc = " Removes \\p node from its graph. This operation also severs any dependencies of other nodes"] - #[doc = " on \\p node and vice versa."] - #[doc = ""] - #[doc = " \\param node - Node to remove"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphAddChildGraphNode,"] - #[doc = " ::cudaGraphAddEmptyNode,"] - #[doc = " ::cudaGraphAddKernelNode,"] - #[doc = " ::cudaGraphAddHostNode,"] - #[doc = " ::cudaGraphAddMemcpyNode,"] - #[doc = " ::cudaGraphAddMemsetNode"] pub fn cudaGraphDestroyNode(node: cudaGraphNode_t) -> cudaError_t; } extern "C" { - #[doc = " \\brief Creates an executable graph from a graph"] - #[doc = ""] - #[doc = " Instantiates \\p graph as an executable graph. The graph is validated for any"] - #[doc = " structural constraints or intra-node constraints which were not previously"] - #[doc = " validated. If instantiation is successful, a handle to the instantiated graph"] - #[doc = " is returned in \\p pGraphExec."] - #[doc = ""] - #[doc = " If there are any errors, diagnostic information may be returned in \\p pErrorNode and"] - #[doc = " \\p pLogBuffer. This is the primary way to inspect instantiation errors. The output"] - #[doc = " will be null terminated unless the diagnostics overflow"] - #[doc = " the buffer. In this case, they will be truncated, and the last byte can be"] - #[doc = " inspected to determine if truncation occurred."] - #[doc = ""] - #[doc = " \\param pGraphExec - Returns instantiated graph"] - #[doc = " \\param graph - Graph to instantiate"] - #[doc = " \\param pErrorNode - In case of an instantiation error, this may be modified to"] - #[doc = " indicate a node contributing to the error"] - #[doc = " \\param pLogBuffer - A character buffer to store diagnostic messages"] - #[doc = " \\param bufferSize - Size of the log buffer in bytes"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphCreate,"] - #[doc = " ::cudaGraphLaunch,"] - #[doc = " ::cudaGraphExecDestroy"] pub fn cudaGraphInstantiate( pGraphExec: *mut cudaGraphExec_t, graph: cudaGraph_t, @@ -22147,36 +5071,6 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Sets the parameters for a kernel node in the given graphExec"] - #[doc = ""] - #[doc = " Sets the parameters of a kernel node in an executable graph \\p hGraphExec."] - #[doc = " The node is identified by the corresponding node \\p node in the"] - #[doc = " non-executable graph, from which the executable graph was instantiated."] - #[doc = ""] - #[doc = " \\p node must not have been removed from the original graph. The \\p func field"] - #[doc = " of \\p nodeParams cannot be modified and must match the original value."] - #[doc = " All other values can be modified."] - #[doc = ""] - #[doc = " The modifications take effect at the next launch of \\p hGraphExec. Already"] - #[doc = " enqueued or running launches of \\p hGraphExec are not affected by this call."] - #[doc = " \\p node is also not modified by this call."] - #[doc = ""] - #[doc = " \\param hGraphExec - The executable graph in which to set the specified node"] - #[doc = " \\param node - kernel node from the graph from which graphExec was instantiated"] - #[doc = " \\param pNodeParams - Updated Parameters to set"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue,"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphAddKernelNode,"] - #[doc = " ::cudaGraphKernelNodeSetParams,"] - #[doc = " ::cudaGraphInstantiate"] pub fn cudaGraphExecKernelNodeSetParams( hGraphExec: cudaGraphExec_t, node: cudaGraphNode_t, @@ -22184,83 +5078,24 @@ extern "C" { ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Launches an executable graph in a stream"] - #[doc = ""] - #[doc = " Executes \\p graphExec in \\p stream. Only one instance of \\p graphExec may be executing"] - #[doc = " at a time. Each launch is ordered behind both any previous work in \\p stream"] - #[doc = " and any previous launches of \\p graphExec. To execute a graph concurrently, it must be"] - #[doc = " instantiated multiple times into multiple executable graphs."] - #[doc = ""] - #[doc = " \\param graphExec - Executable graph to launch"] - #[doc = " \\param stream - Stream in which to launch the graph"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphInstantiate,"] - #[doc = " ::cudaGraphExecDestroy"] pub fn cudaGraphLaunch( graphExec: cudaGraphExec_t, stream: cudaStream_t, ) -> cudaError_t; } extern "C" { - #[doc = " \\brief Destroys an executable graph"] - #[doc = ""] - #[doc = " Destroys the executable graph specified by \\p graphExec."] - #[doc = ""] - #[doc = " \\param graphExec - Executable graph to destroy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphInstantiate,"] - #[doc = " ::cudaGraphLaunch"] pub fn cudaGraphExecDestroy(graphExec: cudaGraphExec_t) -> cudaError_t; } extern "C" { - #[doc = " \\brief Destroys a graph"] - #[doc = ""] - #[doc = " Destroys the graph specified by \\p graph, as well as all of its nodes."] - #[doc = ""] - #[doc = " \\param graph - Graph to destroy"] - #[doc = ""] - #[doc = " \\return"] - #[doc = " ::cudaSuccess,"] - #[doc = " ::cudaErrorInvalidValue"] - #[doc = " \\note_graph_thread_safety"] - #[doc = " \\notefnerr"] - #[doc = " \\note_init_rt"] - #[doc = " \\note_callback"] - #[doc = ""] - #[doc = " \\sa"] - #[doc = " ::cudaGraphCreate"] pub fn cudaGraphDestroy(graph: cudaGraph_t) -> cudaError_t; } extern "C" { - #[doc = " \\cond impl_private"] pub fn cudaGetExportTable( ppExportTable: *mut *const ::std::os::raw::c_void, pExportTableId: *const cudaUUID_t, ) -> cudaError_t; } pub mod nvrtcResult { - #[doc = " \\ingroup error"] - #[doc = " \\brief The enumerated type nvrtcResult defines API call result codes."] - #[doc = " NVRTC API functions return nvrtcResult to indicate the call"] - #[doc = " result."] pub type Type = u32; pub const NVRTC_SUCCESS: Type = 0; pub const NVRTC_ERROR_OUT_OF_MEMORY: Type = 1; @@ -22276,30 +5111,11 @@ pub mod nvrtcResult { pub const NVRTC_ERROR_INTERNAL_ERROR: Type = 11; } extern "C" { - #[doc = " \\ingroup error"] - #[doc = " \\brief nvrtcGetErrorString is a helper function that returns a string"] - #[doc = " describing the given nvrtcResult code, e.g., NVRTC_SUCCESS to"] - #[doc = " \\c \"NVRTC_SUCCESS\"."] - #[doc = " For unrecognized enumeration values, it returns"] - #[doc = " \\c \"NVRTC_ERROR unknown\"."] - #[doc = ""] - #[doc = " \\param [in] result CUDA Runtime Compilation API result code."] - #[doc = " \\return Message string for the given #nvrtcResult code."] pub fn nvrtcGetErrorString( result: nvrtcResult::Type, ) -> *const ::std::os::raw::c_char; } extern "C" { - #[doc = " \\ingroup query"] - #[doc = " \\brief nvrtcVersion sets the output parameters \\p major and \\p minor"] - #[doc = " with the CUDA Runtime Compilation version number."] - #[doc = ""] - #[doc = " \\param [out] major CUDA Runtime Compilation major version number."] - #[doc = " \\param [out] minor CUDA Runtime Compilation minor version number."] - #[doc = " \\return"] - #[doc = " - \\link #nvrtcResult NVRTC_SUCCESS \\endlink"] - #[doc = " - \\link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \\endlink"] - #[doc = ""] pub fn nvrtcVersion( major: *mut ::std::os::raw::c_int, minor: *mut ::std::os::raw::c_int, @@ -22310,42 +5126,8 @@ extern "C" { pub struct _nvrtcProgram { _unused: [u8; 0], } -#[doc = " \\ingroup compilation"] -#[doc = " \\brief nvrtcProgram is the unit of compilation, and an opaque handle for"] -#[doc = " a program."] -#[doc = ""] -#[doc = " To compile a CUDA program string, an instance of nvrtcProgram must be"] -#[doc = " created first with ::nvrtcCreateProgram, then compiled with"] -#[doc = " ::nvrtcCompileProgram."] pub type nvrtcProgram = *mut _nvrtcProgram; extern "C" { - #[doc = " \\ingroup compilation"] - #[doc = " \\brief nvrtcCreateProgram creates an instance of nvrtcProgram with the"] - #[doc = " given input parameters, and sets the output parameter \\p prog with"] - #[doc = " it."] - #[doc = ""] - #[doc = " \\param [out] prog CUDA Runtime Compilation program."] - #[doc = " \\param [in] src CUDA program source."] - #[doc = " \\param [in] name CUDA program name.\\n"] - #[doc = " \\p name can be \\c NULL; \\c \"default_program\" is"] - #[doc = " used when \\p name is \\c NULL."] - #[doc = " \\param [in] numHeaders Number of headers used.\\n"] - #[doc = " \\p numHeaders must be greater than or equal to 0."] - #[doc = " \\param [in] headers Sources of the headers.\\n"] - #[doc = " \\p headers can be \\c NULL when \\p numHeaders is"] - #[doc = " 0."] - #[doc = " \\param [in] includeNames Name of each header by which they can be"] - #[doc = " included in the CUDA program source.\\n"] - #[doc = " \\p includeNames can be \\c NULL when \\p numHeaders"] - #[doc = " is 0."] - #[doc = " \\return"] - #[doc = " - \\link #nvrtcResult NVRTC_SUCCESS \\endlink"] - #[doc = " - \\link #nvrtcResult NVRTC_ERROR_OUT_OF_MEMORY \\endlink"] - #[doc = " - \\link #nvrtcResult NVRTC_ERROR_PROGRAM_CREATION_FAILURE \\endlink"] - #[doc = " - \\link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \\endlink"] - #[doc = " - \\link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \\endlink"] - #[doc = ""] - #[doc = " \\see ::nvrtcDestroyProgram"] pub fn nvrtcCreateProgram( prog: *mut nvrtcProgram, src: *const ::std::os::raw::c_char, @@ -22356,22 +5138,9 @@ extern "C" { ) -> nvrtcResult::Type; } extern "C" { - #[doc = " \\ingroup compilation"] - #[doc = " \\brief nvrtcDestroyProgram destroys the given program."] - #[doc = ""] - #[doc = " \\param [in] prog CUDA Runtime Compilation program."] - #[doc = " \\return"] - #[doc = " - \\link #nvrtcResult NVRTC_SUCCESS \\endlink"] - #[doc = " - \\link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \\endlink"] - #[doc = ""] - #[doc = " \\see ::nvrtcCreateProgram"] pub fn nvrtcDestroyProgram(prog: *mut nvrtcProgram) -> nvrtcResult::Type; } extern "C" { - #[doc = " \\ingroup compilation"] - #[doc = " \\brief nvrtcCompileProgram compiles the given program."] - #[doc = ""] - #[doc = " It supports compile options listed in \\ref options."] pub fn nvrtcCompileProgram( prog: nvrtcProgram, numOptions: ::std::os::raw::c_int, @@ -22379,127 +5148,36 @@ extern "C" { ) -> nvrtcResult::Type; } extern "C" { - #[doc = " \\ingroup compilation"] - #[doc = " \\brief nvrtcGetPTXSize sets \\p ptxSizeRet with the size of the PTX"] - #[doc = " generated by the previous compilation of \\p prog (including the"] - #[doc = " trailing \\c NULL)."] - #[doc = ""] - #[doc = " \\param [in] prog CUDA Runtime Compilation program."] - #[doc = " \\param [out] ptxSizeRet Size of the generated PTX (including the trailing"] - #[doc = " \\c NULL)."] - #[doc = " \\return"] - #[doc = " - \\link #nvrtcResult NVRTC_SUCCESS \\endlink"] - #[doc = " - \\link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \\endlink"] - #[doc = " - \\link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \\endlink"] - #[doc = ""] - #[doc = " \\see ::nvrtcGetPTX"] pub fn nvrtcGetPTXSize( prog: nvrtcProgram, ptxSizeRet: *mut usize, ) -> nvrtcResult::Type; } extern "C" { - #[doc = " \\ingroup compilation"] - #[doc = " \\brief nvrtcGetPTX stores the PTX generated by the previous compilation"] - #[doc = " of \\p prog in the memory pointed by \\p ptx."] - #[doc = ""] - #[doc = " \\param [in] prog CUDA Runtime Compilation program."] - #[doc = " \\param [out] ptx Compiled result."] - #[doc = " \\return"] - #[doc = " - \\link #nvrtcResult NVRTC_SUCCESS \\endlink"] - #[doc = " - \\link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \\endlink"] - #[doc = " - \\link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \\endlink"] - #[doc = ""] - #[doc = " \\see ::nvrtcGetPTXSize"] pub fn nvrtcGetPTX( prog: nvrtcProgram, ptx: *mut ::std::os::raw::c_char, ) -> nvrtcResult::Type; } extern "C" { - #[doc = " \\ingroup compilation"] - #[doc = " \\brief nvrtcGetProgramLogSize sets \\p logSizeRet with the size of the"] - #[doc = " log generated by the previous compilation of \\p prog (including the"] - #[doc = " trailing \\c NULL)."] - #[doc = ""] - #[doc = " Note that compilation log may be generated with warnings and informative"] - #[doc = " messages, even when the compilation of \\p prog succeeds."] - #[doc = ""] - #[doc = " \\param [in] prog CUDA Runtime Compilation program."] - #[doc = " \\param [out] logSizeRet Size of the compilation log"] - #[doc = " (including the trailing \\c NULL)."] - #[doc = " \\return"] - #[doc = " - \\link #nvrtcResult NVRTC_SUCCESS \\endlink"] - #[doc = " - \\link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \\endlink"] - #[doc = " - \\link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \\endlink"] - #[doc = ""] - #[doc = " \\see ::nvrtcGetProgramLog"] pub fn nvrtcGetProgramLogSize( prog: nvrtcProgram, logSizeRet: *mut usize, ) -> nvrtcResult::Type; } extern "C" { - #[doc = " \\ingroup compilation"] - #[doc = " \\brief nvrtcGetProgramLog stores the log generated by the previous"] - #[doc = " compilation of \\p prog in the memory pointed by \\p log."] - #[doc = ""] - #[doc = " \\param [in] prog CUDA Runtime Compilation program."] - #[doc = " \\param [out] log Compilation log."] - #[doc = " \\return"] - #[doc = " - \\link #nvrtcResult NVRTC_SUCCESS \\endlink"] - #[doc = " - \\link #nvrtcResult NVRTC_ERROR_INVALID_INPUT \\endlink"] - #[doc = " - \\link #nvrtcResult NVRTC_ERROR_INVALID_PROGRAM \\endlink"] - #[doc = ""] - #[doc = " \\see ::nvrtcGetProgramLogSize"] pub fn nvrtcGetProgramLog( prog: nvrtcProgram, log: *mut ::std::os::raw::c_char, ) -> nvrtcResult::Type; } extern "C" { - #[doc = " \\ingroup compilation"] - #[doc = " \\brief nvrtcAddNameExpression notes the given name expression"] - #[doc = " denoting the address of a __global__ function"] - #[doc = " or __device__/__constant__ variable."] - #[doc = ""] - #[doc = " The identical name expression string must be provided on a subsequent"] - #[doc = " call to nvrtcGetLoweredName to extract the lowered name."] - #[doc = " \\param [in] prog CUDA Runtime Compilation program."] - #[doc = " \\param [in] name_expression constant expression denoting the address of"] - #[doc = " a __global__ function or __device__/__constant__ variable."] - #[doc = " \\return"] - #[doc = " - \\link #nvrtcResult NVRTC_SUCCESS \\endlink"] - #[doc = " - \\link #nvrtcResult NVRTC_ERROR_NO_NAME_EXPRESSIONS_AFTER_COMPILATION \\endlink"] - #[doc = ""] - #[doc = " \\see ::nvrtcGetLoweredName"] pub fn nvrtcAddNameExpression( prog: nvrtcProgram, name_expression: *const ::std::os::raw::c_char, ) -> nvrtcResult::Type; } extern "C" { - #[doc = " \\ingroup compilation"] - #[doc = " \\brief nvrtcGetLoweredName extracts the lowered (mangled) name"] - #[doc = " for a __global__ function or __device__/__constant__ variable,"] - #[doc = " and updates *lowered_name to point to it. The memory containing"] - #[doc = " the name is released when the NVRTC program is destroyed by"] - #[doc = " nvrtcDestroyProgram."] - #[doc = " The identical name expression must have been previously"] - #[doc = " provided to nvrtcAddNameExpression."] - #[doc = ""] - #[doc = " \\param [in] prog CUDA Runtime Compilation program."] - #[doc = " \\param [in] name_expression constant expression denoting the address of"] - #[doc = " a __global__ function or __device__/__constant__ variable."] - #[doc = " \\param [out] lowered_name initialized by the function to point to a"] - #[doc = " C string containing the lowered (mangled)"] - #[doc = " name corresponding to the provided name expression."] - #[doc = " \\return"] - #[doc = " - \\link #nvrtcResult NVRTC_SUCCESS \\endlink"] - #[doc = " - \\link #nvrtcResult NVRTC_ERROR_NO_LOWERED_NAMES_BEFORE_COMPILATION \\endlink"] - #[doc = " - \\link #nvrtcResult NVRTC_ERROR_NAME_EXPRESSION_NOT_VALID \\endlink"] - #[doc = ""] - #[doc = " \\see ::nvrtcAddNameExpression"] pub fn nvrtcGetLoweredName( prog: nvrtcProgram, name_expression: *const ::std::os::raw::c_char, diff --git a/optix-sys/optix_wrapper.rs b/optix-sys/optix_wrapper.rs index a68d704..7251e5e 100644 --- a/optix-sys/optix_wrapper.rs +++ b/optix-sys/optix_wrapper.rs @@ -517,8 +517,8 @@ pub const OptixExceptionCodes_OPTIX_EXCEPTION_CODE_TRACE_DEPTH_EXCEEDED: OptixExceptionCodes = -2; pub const OptixExceptionCodes_OPTIX_EXCEPTION_CODE_TRAVERSAL_DEPTH_EXCEEDED: OptixExceptionCodes = -3; -pub const OptixExceptionCodes_OPTIX_EXCEPTION_CODE_TRAVERSAL_INVALID_TRAVERSABLE : OptixExceptionCodes = -5 ; -pub const OptixExceptionCodes_OPTIX_EXCEPTION_CODE_TRAVERSAL_INVALID_MISS_SBT : OptixExceptionCodes = -6 ; +pub const OptixExceptionCodes_OPTIX_EXCEPTION_CODE_TRAVERSAL_INVALID_TRAVERSABLE : OptixExceptionCodes = - 5 ; +pub const OptixExceptionCodes_OPTIX_EXCEPTION_CODE_TRAVERSAL_INVALID_MISS_SBT : OptixExceptionCodes = - 6 ; pub const OptixExceptionCodes_OPTIX_EXCEPTION_CODE_TRAVERSAL_INVALID_HIT_SBT: OptixExceptionCodes = -7; pub type OptixExceptionCodes = i32; diff --git a/optix/Cargo.toml b/optix/Cargo.toml index 88afe85..4c35cfb 100644 --- a/optix/Cargo.toml +++ b/optix/Cargo.toml @@ -8,24 +8,25 @@ edition = "2018" [dependencies] optix-sys = {path="../optix-sys"} -bitflags = "1.1.0" -gl = "0.13.0" -imath = {path="../../imath-rs", optional=true} -cfg-if = "0.1.10" -nalgebra-glm = {version = "0.5.0", optional=true} -nalgebra = {version = "0.19.0", optional=true} -log = "0.4.8" +bitflags = "1.3.2" +gl = "0.14.0" +imath = {version = "0.1.0", optional=true} +cfg-if = "1.0.0" +nalgebra-glm = {version = "0.16.0", optional=true} # features=["cuda"], +nalgebra = {version = "0.30.1", optional=true} +#ultraviolet = {version = "0.9.0", optional=true} +log = "0.4.14" thiserror = "1.0" bitfield = "0.13.2" -ustr = "0.2" +ustr = "0.8.1" [dev-dependencies] optix-derive = {path="../optix-derive"} -glfw = "0.32.0" -tobj = "0.1.10" -image = "0.22.2" +glfw = "0.44.0" +tobj = "3.2.0" +image = "0.24.1" enum_primitive = "0.1.1" -num = "0.2.0" +num = "0.4.0" [features] default=["math-nalgebra"] diff --git a/optix/examples/02_pipeline/LaunchParams.h b/optix/examples/02_pipeline/LaunchParams.h index d980e42..a24b8b6 100644 --- a/optix/examples/02_pipeline/LaunchParams.h +++ b/optix/examples/02_pipeline/LaunchParams.h @@ -24,7 +24,7 @@ typedef unsigned int uint32_t; struct LaunchParams { int frameID{0}; uint32_t* colorBuffer; - V2i32 fbSize; + i32x2 fbSize; }; } // namespace osc diff --git a/optix/examples/07_obj/main.rs b/optix/examples/07_obj/main.rs index c5d49b5..94cf862 100644 --- a/optix/examples/07_obj/main.rs +++ b/optix/examples/07_obj/main.rs @@ -13,9 +13,11 @@ use optix::cuda::TaggedMallocator; use optix::math::*; fn load_model(path: &std::path::Path) -> Model { - let (models, materials) = tobj::load_obj(path).unwrap(); + let (models, materials) = tobj::load_obj(path, + &tobj::LoadOptions::default()).unwrap(); let mut bounds = Box3f32::make_empty(); + let materials = materials.expect("Failed to load MTL file"); let meshes = models .into_iter() .map(|model| { diff --git a/optix/examples/08_texture/main.rs b/optix/examples/08_texture/main.rs index 1d136ce..254e028 100644 --- a/optix/examples/08_texture/main.rs +++ b/optix/examples/08_texture/main.rs @@ -115,7 +115,7 @@ fn handle_window_event(window: &mut glfw::Window, event: glfw::WindowEvent) { fn load_texture(path: &std::path::Path) -> Option> { let im = match image::open(path) { - Ok(im) => im.to_rgba(), + Ok(im) => im.to_rgba8(), Err(e) => { println!("{}", e); return None; @@ -131,11 +131,13 @@ fn load_texture(path: &std::path::Path) -> Option> { } fn load_model(path: &std::path::Path) -> Model { - let (models, materials) = tobj::load_obj(path).unwrap(); + let (models, materials) = tobj::load_obj(path, + &tobj::LoadOptions::default()).unwrap(); let mut bounds = Box3f32::make_empty(); let mut loaded_texture_ids = std::collections::HashMap::new(); let mut textures = Vec::new(); + let materials = materials.expect("Failed to load MTL file"); let meshes = models .into_iter() .map(|model| { diff --git a/optix/examples/09_shadow/devicePrograms.cu b/optix/examples/09_shadow/devicePrograms.cu index d2556a8..c93fffe 100644 --- a/optix/examples/09_shadow/devicePrograms.cu +++ b/optix/examples/09_shadow/devicePrograms.cu @@ -72,7 +72,7 @@ extern "C" __global__ void __closesthit__radiance() { // gather some basic hit information // ------------------------------------------------------------------ const int primID = optixGetPrimitiveIndex(); - const V3i32 index = sbtData.index[primID]; + const i32x3 index = sbtData.index[primID]; const f32 u = optixGetTriangleBarycentrics().x; const f32 v = optixGetTriangleBarycentrics().y; @@ -80,11 +80,11 @@ extern "C" __global__ void __closesthit__radiance() { // compute normal, using either shading normal (if avail), or // geometry normal (fallback) // ------------------------------------------------------------------ - const V3f32& A = sbtData.vertex[index.x]; - const V3f32& B = sbtData.vertex[index.y]; - const V3f32& C = sbtData.vertex[index.z]; - V3f32 Ng = cross(B - A, C - A); - V3f32 Ns = (!sbtData.normal.is_null()) + const f32x3& A = sbtData.vertex[index.x]; + const f32x3& B = sbtData.vertex[index.y]; + const f32x3& C = sbtData.vertex[index.z]; + f32x3 Ng = cross(B - A, C - A); + f32x3 Ns = (!sbtData.normal.is_null()) ? ((1.f - u - v) * sbtData.normal[index.x] + u * sbtData.normal[index.y] + v * sbtData.normal[index.z]) : Ng; @@ -92,7 +92,7 @@ extern "C" __global__ void __closesthit__radiance() { // ------------------------------------------------------------------ // face-forward and normalize normals // ------------------------------------------------------------------ - const V3f32 rayDir = optixGetWorldRayDirection(); + const f32x3 rayDir = optixGetWorldRayDirection(); if (dot(rayDir, Ng) > 0.f) Ng = -Ng; @@ -106,27 +106,28 @@ extern "C" __global__ void __closesthit__radiance() { // compute diffuse material color, including diffuse texture, if // available // ------------------------------------------------------------------ - V3f32 diffuseColor = sbtData.color; + f32x3 diffuseColor = sbtData.color; if (sbtData.has_texture && !sbtData.texcoord.is_null()) { - const V2f32 tc = (1.f - u - v) * sbtData.texcoord[index.x] + + const f32x2 tc = (1.f - u - v) * sbtData.texcoord[index.x] + u * sbtData.texcoord[index.y] + v * sbtData.texcoord[index.z]; - V4f32 fromTexture = tex2D(sbtData.texture, tc.x, tc.y); - diffuseColor = diffuseColor * fromTexture.xyz(); + f32x4 fromTexture = tex2D(sbtData.texture, tc.x, tc.y); + //diffuseColor = diffuseColor * fromTexture.xyz(); + diffuseColor = diffuseColor * make_f32x3(fromTexture); } // ------------------------------------------------------------------ // compute shadow // ------------------------------------------------------------------ - const V3f32 surfPos = (1.f - u - v) * sbtData.vertex[index.x] + + const f32x3 surfPos = (1.f - u - v) * sbtData.vertex[index.x] + u * sbtData.vertex[index.y] + v * sbtData.vertex[index.z]; - const V3f32 lightPos(-907.108f, 2205.875f, -400.0267f); - const V3f32 lightDir = lightPos - surfPos; + const f32x3 lightPos = make_f32x3(-907.108f, 2205.875f, -400.0267f); + const f32x3 lightDir = lightPos - surfPos; // trace shadow ray: - V3f32 lightVisibility(1.f); + f32x3 lightVisibility=make_f32x3(1.f,1.f,1.f); // the values we store the PRD pointer in: u32 u0, u1; packPointer(&lightVisibility, u0, u1); @@ -148,7 +149,7 @@ extern "C" __global__ void __closesthit__radiance() { // ------------------------------------------------------------------ const float cosDN = 0.1f + .8f * fabsf(dot(rayDir, Ns)); - V3f32& prd = *(V3f32*)getPRD(); + f32x3& prd = *(f32x3*)getPRD(); prd = (.1f + (.2f + .8f * lightVisibility) * cosDN) * diffuseColor; } @@ -158,8 +159,8 @@ __anyhit__radiance() { /*! for this simple example, this will remain empty */ extern "C" __global__ void __anyhit__shadow() { // in this simple example, we terminate on ANY hit - V3f32& prd = *(V3f32*)getPRD(); - prd = V3f32(0.f); + f32x3& prd = *(f32x3*)getPRD(); + prd = make_f32x3(0.f,0.f,0.f); optixTerminateRay(); } @@ -172,9 +173,9 @@ extern "C" __global__ void __anyhit__shadow() { // ------------------------------------------------------------------------------ extern "C" __global__ void __miss__radiance() { - V3f32& prd = *(V3f32*)getPRD(); + f32x3& prd = *(f32x3*)getPRD(); // set to constant white as background color - prd = V3f32(1.f); + prd = make_f32x3(1.f,1.f,1.f); } extern "C" __global__ void __miss__shadow() { @@ -194,19 +195,19 @@ extern "C" __global__ void __raygen__renderFrame() { // our per-ray data for this example. what we initialize it to // won't matter, since this value will be overwritten by either // the miss or hit program, anyway - V3f32 pixelColorPRD = V3f32(0.f, 0.0f, 0.0f); + f32x3 pixelColorPRD = make_f32x3(0.f, 0.0f, 0.0f); // the values we store the PRD pointer in: u32 u0, u1; packPointer(&pixelColorPRD, u0, u1); // normalized screen plane position, in [0,1]^2 - const V2f32 screen = - V2f32(f32(ix) + .5f, f32(iy) + .5f) / - V2f32(optixLaunchParams.frame.size.x, optixLaunchParams.frame.size.y); + const f32x2 screen = + make_f32x2(f32(ix) + .5f, f32(iy) + .5f) / + make_f32x2(optixLaunchParams.frame.size.x, optixLaunchParams.frame.size.y); // generate ray direction - V3f32 rayDir = + f32x3 rayDir = normalize(camera.direction + (screen.x - 0.5f) * camera.horizontal + (screen.y - 0.5f) * camera.vertical); diff --git a/optix/examples/09_shadow/main.rs b/optix/examples/09_shadow/main.rs index 3b62814..6527768 100644 --- a/optix/examples/09_shadow/main.rs +++ b/optix/examples/09_shadow/main.rs @@ -36,8 +36,10 @@ fn main() { up: v3f32(0.0, 1.0, 0.0), }; + let alloc = TaggedMallocator::new(); let mut sample = - SampleRenderer::new(v2i32(width as i32, height as i32), camera, model) + SampleRenderer::new(v2i32(width as i32, height as i32), camera, model, + &alloc) .unwrap(); let (mut window, events) = glfw @@ -110,7 +112,7 @@ fn handle_window_event(window: &mut glfw::Window, event: glfw::WindowEvent) { fn load_texture(path: &std::path::Path) -> Option> { let im = match image::open(path) { - Ok(im) => im.to_rgba(), + Ok(im) => im.to_rgba8(), Err(e) => { println!("{}", e); return None; @@ -126,11 +128,13 @@ fn load_texture(path: &std::path::Path) -> Option> { } fn load_model(path: &std::path::Path) -> Model { - let (models, materials) = tobj::load_obj(path).unwrap(); + let (models, materials) = tobj::load_obj(path, + &tobj::LoadOptions::default()).unwrap(); let mut bounds = Box3f32::make_empty(); let mut loaded_texture_ids = std::collections::HashMap::new(); let mut textures = Vec::new(); + let materials = materials.expect("Failed to load MTL file"); let meshes = models .into_iter() .map(|model| { diff --git a/optix/examples/09_shadow/sample_renderer.rs b/optix/examples/09_shadow/sample_renderer.rs index e6486b1..c1fd392 100644 --- a/optix/examples/09_shadow/sample_renderer.rs +++ b/optix/examples/09_shadow/sample_renderer.rs @@ -9,14 +9,33 @@ use optix_derive::device_shared; use std::rc::Rc; use std::sync::Arc; +use optix::cuda::Allocator; + +enum_from_primitive! { +#[repr(u64)] +#[derive(Debug, PartialEq)] +pub enum MemTags { + OutputBuffer = 1001, + SBT = 2001, + MissRecords = 2002, + HgRecords = 2003, + LaunchParams = 3001, + VertexBuffer = 4001, + IndexBuffer = 4002, + NormalBuffer = 4003, + TexcoordBuffer = 4004, + Accel = 5001, +} +} #[device_shared] -struct TriangleMeshSBTData { +struct TriangleMeshSBTData<'a, AllocT> + where AllocT: 'a + Allocator { color: V3f32, - vertex: Rc>, - normal: Rc>, - texcoord: Rc>, - index: Rc>, + vertex: Rc>, + normal: Rc>, + texcoord: Rc>, + index: Rc>, has_texture: bool, texture: Option>, } @@ -41,7 +60,9 @@ pub struct Model { pub bounds: Box3f32, } -pub struct SampleRenderer { +pub struct SampleRenderer<'a, AllocT> + where AllocT: 'a + Allocator { + alloc: &'a AllocT, cuda_context: cuda::ContextRef, stream: cuda::Stream, device_prop: cuda::DeviceProp, @@ -51,9 +72,9 @@ pub struct SampleRenderer { module: optix::ModuleRef, program_groups: Vec, - sbt: optix::ShaderBindingTable, + sbt: optix::ShaderBindingTable<'a, 'a, AllocT>, - launch_params: SharedVariable, + launch_params: SharedVariable<'a, AllocT, LaunchParams<'a, AllocT>>, last_set_camera: Camera, @@ -62,12 +83,15 @@ pub struct SampleRenderer { ctx: optix::DeviceContext, } -impl SampleRenderer { +impl<'a, AllocT> SampleRenderer<'a, AllocT> + where AllocT: 'a + Allocator +{ pub fn new( fb_size: V2i32, camera: Camera, model: Model, - ) -> Result { + alloc: &'a AllocT, + ) -> Result> { // Make sure CUDA context is initialized cuda::init(); // Check that we've got available devices @@ -125,11 +149,11 @@ impl SampleRenderer { name: "launch_params.h".into(), contents: format!( "{} {} {} {} {}", - optix::Buffer::::cuda_decl(), - Frame::cuda_decl(), + optix::Buffer::<'a, AllocT, i32>::cuda_decl(), + Frame::<'a, AllocT>::cuda_decl(), RenderCamera::cuda_decl(), - LaunchParams::cuda_decl(), - TriangleMeshSBTData::cuda_decl(), + LaunchParams::<'a, AllocT>::cuda_decl(), + TriangleMeshSBTData::<'a, AllocT>::cuda_decl(), ), }; @@ -282,13 +306,17 @@ impl SampleRenderer { for mesh in &model.meshes { let vertex_buffer = - Rc::new(optix::Buffer::new(&mesh.vertex).unwrap()); + Rc::new(optix::Buffer::new(&mesh.vertex,MemTags::VertexBuffer as u64, + alloc,).unwrap()); let index_buffer = - Rc::new(optix::Buffer::new(&mesh.index).unwrap()); + Rc::new(optix::Buffer::new(&mesh.index, MemTags::IndexBuffer as u64, + alloc,).unwrap()); let normal_buffer = - Rc::new(optix::Buffer::new(&mesh.normal).unwrap()); + Rc::new(optix::Buffer::new(&mesh.normal,MemTags::NormalBuffer as u64, + alloc,).unwrap()); let texcoord_buffer = - Rc::new(optix::Buffer::new(&mesh.texcoord).unwrap()); + Rc::new(optix::Buffer::new(&mesh.texcoord,MemTags::TexcoordBuffer as u64, + alloc,).unwrap()); for pg in &hitgroup_pgs { let (has_texture, texture) = @@ -343,7 +371,10 @@ impl SampleRenderer { .accel_compute_memory_usage(&accel_build_options, &build_inputs)?; let compacted_size_buffer = - cuda::Buffer::new(std::mem::size_of::())?; + cuda::Buffer::new(std::mem::size_of::(), + std::mem::align_of::(), + MemTags::Accel as u64, + alloc,)?; let compacted_size_desc = optix::AccelEmitDesc::new( &compacted_size_buffer, @@ -352,9 +383,15 @@ impl SampleRenderer { // allocate and execute build let temp_buffer = - cuda::Buffer::new(blas_buffer_sizes[0].temp_size_in_bytes)?; + cuda::Buffer::new(blas_buffer_sizes[0].temp_size_in_bytes, + optix_sys::OptixAccelBufferByteAlignment, + MemTags::Accel as u64, + alloc,)?; let output_buffer = - cuda::Buffer::new(blas_buffer_sizes[0].output_size_in_bytes)?; + cuda::Buffer::new(blas_buffer_sizes[0].output_size_in_bytes, + optix_sys::OptixAccelBufferByteAlignment, + MemTags::Accel as u64, + alloc,)?; let as_handle = ctx.accel_build( &cuda::Stream::default(), @@ -379,10 +416,16 @@ impl SampleRenderer { let compacted_size = compacted_size_buffer.download_primitive::()?; - let as_buffer = cuda::Buffer::new(compacted_size)?; + let mut as_buffer = cuda::Buffer::new( + compacted_size, + optix_sys::OptixAccelBufferByteAlignment, + MemTags::Accel as u64, + alloc, + )?; let as_handle = ctx.accel_compact(&cuda::Stream::default(), as_handle, as_buffer)?; + // sync again match cuda::device_synchronize() { Ok(_) => (), @@ -392,8 +435,10 @@ impl SampleRenderer { } }; - let color_buffer = optix::Buffer::::uninitialized( + let color_buffer = optix::Buffer::<'a, AllocT, V4f32>::uninitialized( (fb_size.x * fb_size.y) as usize, + MemTags::OutputBuffer as u64, + alloc, )?; let cos_fovy = 0.66f32; @@ -416,14 +461,20 @@ impl SampleRenderer { traversable: as_handle, }; - let sbt = optix::ShaderBindingTableBuilder::new(rg_rec) - .miss_records(miss_recs) - .hitgroup_records(hg_recs) + let sbt = optix::ShaderBindingTableBuilder::new(rg_rec, + MemTags::SBT as u64, + alloc,) + .miss_records(miss_recs, MemTags::SBT as u64, alloc) + .hitgroup_records(hg_recs, MemTags::SBT as u64, alloc) .build(); - let launch_params = SharedVariable::::new(launch_params)?; + let launch_params = + SharedVariable::<'a, AllocT, LaunchParams<'a, AllocT>>::new(launch_params, + MemTags::LaunchParams as u64, + alloc,)?; Ok(SampleRenderer { + alloc, cuda_context, stream, device_prop, @@ -461,7 +512,9 @@ impl SampleRenderer { pub fn resize(&mut self, size: V2i32) { self.launch_params.frame.size = size.into(); self.launch_params.frame.color_buffer = - optix::Buffer::::uninitialized((size.x * size.y) as usize) + optix::Buffer::<'a, AllocT, V4f32>::uninitialized((size.x * size.y) as usize, + MemTags::OutputBuffer as u64, + self.alloc,) .unwrap(); } @@ -595,14 +648,16 @@ struct RenderCamera { } #[device_shared] -struct Frame { - color_buffer: optix::Buffer, +struct Frame<'a, AllocT> + where AllocT: 'a + Allocator, { + color_buffer: optix::Buffer<'a, AllocT, V4f32>, size: V2i32, } #[device_shared] -pub struct LaunchParams { - frame: Frame, +pub struct LaunchParams<'a, AllocT> + where AllocT: 'a + Allocator { + frame: Frame<'a, AllocT>, camera: RenderCamera, - traversable: optix::TraversableHandle, + traversable: optix::TraversableHandle<'a, AllocT>, } diff --git a/optix/examples/10_softshadow/devicePrograms.cu b/optix/examples/10_softshadow/devicePrograms.cu index 76a00ea..e29f77a 100644 --- a/optix/examples/10_softshadow/devicePrograms.cu +++ b/optix/examples/10_softshadow/devicePrograms.cu @@ -44,7 +44,7 @@ extern "C" __constant__ LaunchParams optixLaunchParams; can access RNG state */ struct PRD { Random random; - V3f32 pixelColor; + f32x3 pixelColor; }; static __forceinline__ DEVICE void* unpackPointer(u32 i0, u32 i1) { @@ -88,7 +88,7 @@ extern "C" __global__ void __closesthit__radiance() { // gather some basic hit information // ------------------------------------------------------------------ const i32 primID = optixGetPrimitiveIndex(); - const V3i32 index = sbtData.index[primID]; + const i32x3 index = sbtData.index[primID]; const f32 u = optixGetTriangleBarycentrics().x; const f32 v = optixGetTriangleBarycentrics().y; @@ -96,11 +96,11 @@ extern "C" __global__ void __closesthit__radiance() { // compute normal, using either shading normal (if avail), or // geometry normal (fallback) // ------------------------------------------------------------------ - const V3f32& A = sbtData.vertex[index.x]; - const V3f32& B = sbtData.vertex[index.y]; - const V3f32& C = sbtData.vertex[index.z]; - V3f32 Ng = cross(B - A, C - A); - V3f32 Ns = + const f32x3& A = sbtData.vertex[index.x]; + const f32x3& B = sbtData.vertex[index.y]; + const f32x3& C = sbtData.vertex[index.z]; + f32x3 Ng = cross(B - A, C - A); + f32x3 Ns = (sbtData.normal.is_null()) ? Ng : ((1.f - u - v) * sbtData.normal[index.x] + @@ -109,7 +109,7 @@ extern "C" __global__ void __closesthit__radiance() { // ------------------------------------------------------------------ // face-forward and normalize normals // ------------------------------------------------------------------ - const V3f32 rayDir = optixGetWorldRayDirection(); + const f32x3 rayDir = optixGetWorldRayDirection(); if (dot(rayDir, Ng) > 0.f) Ng = -Ng; @@ -123,23 +123,23 @@ extern "C" __global__ void __closesthit__radiance() { // compute diffuse material color, including diffuse texture, if // available // ------------------------------------------------------------------ - V3f32 diffuseColor = sbtData.color; + f32x3 diffuseColor = sbtData.color; if (sbtData.has_texture && !sbtData.texcoord.is_null()) { - const V2f32 tc = (1.f - u - v) * sbtData.texcoord[index.x] + + const f32x2 tc = (1.f - u - v) * sbtData.texcoord[index.x] + u * sbtData.texcoord[index.y] + v * sbtData.texcoord[index.z]; - V4f32 fromTexture = tex2D(sbtData.texture, tc.x, tc.y); - diffuseColor = diffuseColor * fromTexture.xyz(); + f32x4 fromTexture = tex2D(sbtData.texture, tc.x, tc.y); + diffuseColor = diffuseColor * make_f32x3(fromTexture); } // start with some ambient term - V3f32 pixelColor = (0.01f + 0.1f * fabsf(dot(Ns, rayDir))) * diffuseColor; + f32x3 pixelColor = (0.01f + 0.1f * fabsf(dot(Ns, rayDir))) * diffuseColor; // ------------------------------------------------------------------ // compute shadow // ------------------------------------------------------------------ - const V3f32 surfPos = (1.f - u - v) * sbtData.vertex[index.x] + + const f32x3 surfPos = (1.f - u - v) * sbtData.vertex[index.x] + u * sbtData.vertex[index.y] + v * sbtData.vertex[index.z]; @@ -147,17 +147,17 @@ extern "C" __global__ void __closesthit__radiance() { for (i32 lightSampleID = 0; lightSampleID < numLightSamples; lightSampleID++) { // produce random light sample - const V3f32 lightPos = optixLaunchParams.light.origin + + const f32x3 lightPos = optixLaunchParams.light.origin + prd.random() * optixLaunchParams.light.du + prd.random() * optixLaunchParams.light.dv; - V3f32 lightDir = lightPos - surfPos; - f32 lightDist = lightDir.length(); + f32x3 lightDir = lightPos - surfPos; + f32 lightDist = length(lightDir); lightDir = normalize(lightDir); // trace shadow ray: const f32 NdotL = dot(lightDir, Ns); if (NdotL >= 0.f) { - V3f32 lightVisibility(1.f); + f32x3 lightVisibility=make_float3(1.f,1.f,1.f); // the values we store the PRD poi32er in: u32 u0, u1; packPointer(&lightVisibility, u0, u1); @@ -189,8 +189,8 @@ __anyhit__radiance() { /*! for this simple example, this will remain empty */ extern "C" __global__ void __anyhit__shadow() { // in this simple example, we terminate on ANY hit - V3f32& prd = *getPRD(); - prd = V3f32(0.f); + f32x3& prd = *getPRD(); + prd = make_float3(0.f,0.f,0.f); optixTerminateRay(); } @@ -205,7 +205,7 @@ extern "C" __global__ void __anyhit__shadow() { extern "C" __global__ void __miss__radiance() { PRD& prd = *getPRD(); // set to constant white as background color - prd.pixelColor = V3f32(1.f); + prd.pixelColor = make_float3(1.f,1.f,1.f); } extern "C" __global__ void __miss__shadow() { @@ -225,7 +225,7 @@ extern "C" __global__ void __raygen__renderFrame() { PRD prd; prd.random.init(ix + accum_id * optixLaunchParams.frame.size.x, iy + accum_id * optixLaunchParams.frame.size.y); - prd.pixelColor = V3f32(0.f); + prd.pixelColor = make_float3(0.f,0.f,0.f); // the values we store the PRD poi32er in: u32 u0, u1; @@ -233,15 +233,15 @@ extern "C" __global__ void __raygen__renderFrame() { i32 numPixelSamples = NUM_PIXEL_SAMPLES; - V3f32 pixelColor(0.f); + f32x3 pixelColor=make_float3(0.f,0.f,0.f); for (i32 sampleID = 0; sampleID < numPixelSamples; sampleID++) { // normalized screen plane position, in [0,1]^2 - const V2f32 screen(V2f32(ix + prd.random(), iy + prd.random()) / - V2f32(optixLaunchParams.frame.size.x, + const f32x2 screen(make_float2(ix + prd.random(), iy + prd.random()) / + make_float2(optixLaunchParams.frame.size.x, optixLaunchParams.frame.size.y)); // generate ray direction - V3f32 rayDir = + f32x3 rayDir = normalize(camera.direction + (screen.x - 0.5f) * camera.horizontal + (screen.y - 0.5f) * camera.vertical); diff --git a/optix/examples/10_softshadow/main.rs b/optix/examples/10_softshadow/main.rs index bd02a1c..a81b18e 100644 --- a/optix/examples/10_softshadow/main.rs +++ b/optix/examples/10_softshadow/main.rs @@ -8,6 +8,7 @@ use crate::gl_util::*; use optix::math::*; use std::rc::Rc; +use optix::cuda::TaggedMallocator; fn main() { let mut glfw = glfw::init(glfw::FAIL_ON_ERRORS).unwrap(); @@ -39,11 +40,13 @@ fn main() { power: v3f32(3000000.0, 3000000.0, 3000000.0), }; + let alloc = TaggedMallocator::new(); let mut sample = SampleRenderer::new( v2i32(width as i32, height as i32), camera, model, light, + &alloc ) .unwrap(); @@ -86,6 +89,7 @@ fn main() { let w = w as u32; let h = h as u32; if w != width || h != height { + println!("Resize"); fsq.resize(w, h); sample.resize(v2i32(w as i32, h as i32)); width = w; @@ -119,7 +123,7 @@ fn handle_window_event(window: &mut glfw::Window, event: glfw::WindowEvent) { fn load_texture(path: &std::path::Path) -> Option> { let im = match image::open(path) { - Ok(im) => im.to_rgba(), + Ok(im) => im.to_rgba8(), Err(e) => { println!("{}", e); return None; @@ -135,11 +139,13 @@ fn load_texture(path: &std::path::Path) -> Option> { } fn load_model(path: &std::path::Path) -> Model { - let (models, materials) = tobj::load_obj(path).unwrap(); + let (models, materials) = tobj::load_obj(path, + &tobj::LoadOptions::default()).unwrap(); let mut bounds = Box3f32::make_empty(); let mut loaded_texture_ids = std::collections::HashMap::new(); let mut textures = Vec::new(); + let materials = materials.expect("Failed to load MTL file"); let meshes = models .into_iter() .map(|model| { diff --git a/optix/examples/10_softshadow/sample_renderer.rs b/optix/examples/10_softshadow/sample_renderer.rs index 9905fe5..3c2de9d 100644 --- a/optix/examples/10_softshadow/sample_renderer.rs +++ b/optix/examples/10_softshadow/sample_renderer.rs @@ -10,24 +10,49 @@ use optix_derive::device_shared; use std::rc::Rc; use std::sync::Arc; +use optix::cuda::Allocator; + + +// enum_from_primitive! { +#[repr(u64)] +#[derive(Debug, PartialEq)] +pub enum MemTags { + OutputBuffer = 1001, + SBT = 2001, + MissRecords = 2002, + HgRecords = 2003, + LaunchParams = 3001, + VertexBuffer = 4001, + IndexBuffer = 4002, + NormalBuffer = 4003, + TexcoordBuffer = 4004, + Accel = 5001, +} +// } -pub struct SampleRenderer { +pub struct SampleRenderer <'a, AllocT> + where + AllocT: 'a + Allocator, { + alloc: &'a AllocT, cuda_context: cuda::ContextRef, stream: cuda::Stream, ctx: optix::DeviceContext, pipeline: optix::PipelineRef, - sbt: optix::ShaderBindingTable, - launch_params: SharedVariable, + sbt: optix::ShaderBindingTable<'a, 'a, AllocT>, + launch_params: SharedVariable<'a, AllocT, LaunchParams<'a, AllocT>>, last_set_camera: Camera, } -impl SampleRenderer { +impl <'a, AllocT> SampleRenderer<'a, AllocT> + where + AllocT: 'a + Allocator, { pub fn new( fb_size: V2i32, camera: Camera, model: Model, light: QuadLight, - ) -> Result { + alloc: &'a AllocT, + ) -> Result> { // Make sure CUDA context is initialized cuda::init(); // Check that we've got available devices @@ -86,12 +111,12 @@ impl SampleRenderer { name: "launch_params.h".into(), contents: format!( "{} {} {} {} {} {}", - optix::Buffer::::cuda_decl(), - Frame::cuda_decl(), + optix::Buffer::<'a, AllocT, i32>::cuda_decl(), + Frame::<'a, AllocT>::cuda_decl(), RenderCamera::cuda_decl(), RenderLight::cuda_decl(), - LaunchParams::cuda_decl(), - TriangleMeshSBTData::cuda_decl(), + LaunchParams::<'a, AllocT>::cuda_decl(), + TriangleMeshSBTData::<'a, AllocT>::cuda_decl(), ), }; let cuda_source = include_str!("devicePrograms.cu"); @@ -246,13 +271,20 @@ impl SampleRenderer { let mut build_inputs = Vec::with_capacity(model.meshes.len()); for mesh in &model.meshes { let vertex_buffer = - Rc::new(optix::Buffer::new(&mesh.vertex).unwrap()); + Rc::new(optix::Buffer::new(&mesh.vertex,MemTags::VertexBuffer as u64, + alloc,).unwrap()); let index_buffer = - Rc::new(optix::Buffer::new(&mesh.index).unwrap()); + Rc::new(optix::Buffer::new(&mesh.index, + MemTags::IndexBuffer as u64, + alloc,).unwrap()); let normal_buffer = - Rc::new(optix::Buffer::new(&mesh.normal).unwrap()); + Rc::new(optix::Buffer::new(&mesh.normal, + MemTags::NormalBuffer as u64, + alloc,).unwrap()); let texcoord_buffer = - Rc::new(optix::Buffer::new(&mesh.texcoord).unwrap()); + Rc::new(optix::Buffer::new(&mesh.texcoord, + MemTags::TexcoordBuffer as u64, + alloc,).unwrap()); for pg in &hitgroup_pgs { let (has_texture, texture) = @@ -304,7 +336,10 @@ impl SampleRenderer { let blas_buffer_sizes = ctx .accel_compute_memory_usage(&accel_build_options, &build_inputs)?; let compacted_size_buffer = - cuda::Buffer::new(std::mem::size_of::())?; + cuda::Buffer::new(std::mem::size_of::(), + std::mem::align_of::(), + MemTags::Accel as u64, + alloc)?; let compacted_size_desc = optix::AccelEmitDesc::new( &compacted_size_buffer, optix::AccelPropertyType::CompactedSize, @@ -312,9 +347,15 @@ impl SampleRenderer { // Allocate and build acceleration structure let temp_buffer = - cuda::Buffer::new(blas_buffer_sizes[0].temp_size_in_bytes)?; + cuda::Buffer::new(blas_buffer_sizes[0].temp_size_in_bytes, + optix_sys::OptixAccelBufferByteAlignment, + MemTags::Accel as u64, + alloc)?; let output_buffer = - cuda::Buffer::new(blas_buffer_sizes[0].output_size_in_bytes)?; + cuda::Buffer::new(blas_buffer_sizes[0].output_size_in_bytes, + optix_sys::OptixAccelBufferByteAlignment, + MemTags::Accel as u64, + alloc,)?; let as_handle = ctx.accel_build( &cuda::Stream::default(), @@ -333,7 +374,10 @@ impl SampleRenderer { let compacted_size = compacted_size_buffer.download_primitive::()?; - let as_buffer = cuda::Buffer::new(compacted_size)?; + let as_buffer = cuda::Buffer::new(compacted_size, + optix_sys::OptixAccelBufferByteAlignment, + MemTags::Accel as u64, + alloc,)?; let as_handle = ctx.accel_compact(&cuda::Stream::default(), as_handle, as_buffer)?; @@ -341,8 +385,10 @@ impl SampleRenderer { cuda::device_synchronize()?; // allocate the output buffer - let color_buffer = optix::Buffer::::uninitialized( + let color_buffer = optix::Buffer::<'a, AllocT, V4f32>::uninitialized( (fb_size.x * fb_size.y) as usize, + MemTags::OutputBuffer as u64, + alloc, )?; // set up the camera @@ -377,18 +423,24 @@ impl SampleRenderer { }; // Build the ShaderBindingTable with the records we created earlier - let sbt = optix::ShaderBindingTable::new(rg_rec) - .miss_records(miss_recs) - .hitgroup_records(hg_recs) + let sbt = optix::ShaderBindingTable::new(rg_rec, + MemTags::SBT as u64, + alloc,) + .miss_records(miss_recs,MemTags::SBT as u64, alloc) + .hitgroup_records(hg_recs,MemTags::SBT as u64, alloc) .build(); // Create the SharedVariable that wraps the LaunchParams. This manages // the device-side storage for us. When we want to sync to the device // we just call upload() - let launch_params = SharedVariable::::new(launch_params)?; + let launch_params = + SharedVariable::<'a, AllocT, LaunchParams<'a, AllocT>>::new(launch_params, + MemTags::LaunchParams as u64, + alloc,)?; // Store the pieces we need to persist in the SampleRenderer Ok(SampleRenderer { + alloc, cuda_context, stream, pipeline, @@ -429,7 +481,9 @@ impl SampleRenderer { pub fn resize(&mut self, size: V2i32) { self.launch_params.frame.size = size; self.launch_params.frame.color_buffer = - optix::Buffer::::uninitialized((size.x * size.y) as usize) + optix::Buffer::<'a, AllocT, V4f32>::uninitialized((size.x * size.y) as usize, + MemTags::OutputBuffer as u64, + self.alloc,) .unwrap(); } @@ -559,8 +613,10 @@ struct RenderCamera { } #[device_shared] -struct Frame { - color_buffer: optix::Buffer, +struct Frame<'a, AllocT> + where + AllocT: 'a + Allocator, { + color_buffer: optix::Buffer<'a, AllocT, V4f32>, size: V2i32, accum_id: i32, } @@ -574,20 +630,24 @@ struct RenderLight { } #[device_shared] -pub struct LaunchParams { - frame: Frame, +pub struct LaunchParams<'a, AllocT> + where + AllocT: 'a + Allocator, { + frame: Frame<'a, AllocT>, camera: RenderCamera, light: RenderLight, - traversable: optix::TraversableHandle, + traversable: optix::TraversableHandle<'a, AllocT>, } #[device_shared] -struct TriangleMeshSBTData { +struct TriangleMeshSBTData<'a, AllocT> + where + AllocT: 'a + Allocator, { color: V3f32, - vertex: Rc>, - normal: Rc>, - texcoord: Rc>, - index: Rc>, + vertex: Rc>, + normal: Rc>, + texcoord: Rc>, + index: Rc>, has_texture: bool, texture: Option>, } diff --git a/optix/src/math.rs b/optix/src/math.rs index 433c0a8..22670b4 100644 --- a/optix/src/math.rs +++ b/optix/src/math.rs @@ -114,7 +114,8 @@ cfg_if::cfg_if! { zero, }; - pub use nalgebra_glm::{Dimension, Scalar, Number, RealField}; + pub use nalgebra_glm::{Scalar, Number}; + pub use nalgebra::{RealField}; pub fn cast_slice_v4u8(s: &[u8]) -> &[V4u8] { if s.len() % 4 != 0 { @@ -193,14 +194,14 @@ cfg_if::cfg_if! { pub max: TVec3, } - impl Box3 where T: RealField { + impl Box3 where T: RealField+Number { pub fn new(min: TVec3, max: TVec3) -> Box3 { Box3{min, max} } pub fn make_empty() -> Box3 { - let max = T::min_value(); - let min = T::max_value(); + let max = ::min_value().unwrap(); + let min = ::max_value().unwrap(); Box3 { min: vec3(min, min, min), max: vec3(max, max, max),