All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.lwjgl.vulkan.NVRayTracing Maven / Gradle / Ivy

Go to download

A new generation graphics and compute API that provides high-efficiency, cross-platform access to modern GPUs used in a wide variety of devices from PCs and consoles to mobile phones and embedded platforms.

There is a newer version: 3.3.4
Show newest version
/*
 * Copyright LWJGL. All rights reserved.
 * License terms: https://www.lwjgl.org/license
 * MACHINE GENERATED FILE, DO NOT EDIT
 */
package org.lwjgl.vulkan;

import javax.annotation.*;

import java.nio.*;

import org.lwjgl.system.*;

import static org.lwjgl.system.Checks.*;
import static org.lwjgl.system.JNI.*;
import static org.lwjgl.system.MemoryUtil.*;

/**
 * Rasterization has been the dominant method to produce interactive graphics, but increasing performance of graphics hardware has made ray tracing a viable option for interactive rendering. Being able to integrate ray tracing with traditional rasterization makes it easier for applications to incrementally add ray traced effects to existing applications or to do hybrid approaches with rasterization for primary visibility and ray tracing for secondary queries.
 * 
 * 

To enable ray tracing, this extension adds a few different categories of new functionality:

* *
    *
  • Acceleration structure objects and build commands
  • *
  • A new pipeline type with new shader domains
  • *
  • An indirection table to link shader groups with acceleration structure items
  • *
* *

This extension adds support for the following SPIR-V extension in Vulkan:

* *
    *
  • {@code SPV_NV_ray_tracing}
  • *
* *
Sample Code
* *

Example ray generation GLSL shader

* *

 * #version 450 core
 * #extension GL_NV_ray_tracing : require
 * layout(set = 0, binding = 0, rgba8) uniform image2D image;
 * layout(set = 0, binding = 1) uniform accelerationStructureNV as;
 * layout(location = 0) rayPayloadNV float payload;
 * 
 * void main()
 * {
 *    vec4 col = vec4(0, 0, 0, 1);
 * 
 *    vec3 origin = vec3(float(gl_LaunchIDNV.x)/float(gl_LaunchSizeNV.x), float(gl_LaunchIDNV.y)/float(gl_LaunchSizeNV.y), 1.0);
 *    vec3 dir = vec3(0.0, 0.0, -1.0);
 * 
 *    traceNV(as, 0, 0xff, 0, 1, 0, origin, 0.0, dir, 1000.0, 0);
 * 
 *    col.y = payload;
 * 
 *    imageStore(image, ivec2(gl_LaunchIDNV.xy), col);
 * }
* *
VK_NV_ray_tracing
* *
*
Name String
*
{@code VK_NV_ray_tracing}
*
Extension Type
*
Device extension
*
Registered Extension Number
*
166
*
Revision
*
3
*
Extension and Version Dependencies
*
    *
  • Requires Vulkan 1.0
  • *
  • Requires {@link KHRGetPhysicalDeviceProperties2 VK_KHR_get_physical_device_properties2}
  • *
  • Requires {@link KHRGetMemoryRequirements2 VK_KHR_get_memory_requirements2}
  • *
*
Contact
*
*
* *
Other Extension Metadata
* *
*
Last Modified Date
*
2018-11-20
*
Interactions and External Dependencies
*
*
Contributors
*
    *
  • Eric Werness, NVIDIA
  • *
  • Ashwin Lele, NVIDIA
  • *
  • Robert Stepinski, NVIDIA
  • *
  • Nuno Subtil, NVIDIA
  • *
  • Christoph Kubisch, NVIDIA
  • *
  • Martin Stich, NVIDIA
  • *
  • Daniel Koch, NVIDIA
  • *
  • Jeff Bolz, NVIDIA
  • *
  • Joshua Barczak, Intel
  • *
  • Tobias Hector, AMD
  • *
  • Henrik Rydgard, NVIDIA
  • *
  • Pascal Gautron, NVIDIA
  • *
*
*/ public class NVRayTracing { /** The extension specification version. */ public static final int VK_NV_RAY_TRACING_SPEC_VERSION = 3; /** The extension name. */ public static final String VK_NV_RAY_TRACING_EXTENSION_NAME = "VK_NV_ray_tracing"; /** VK_SHADER_UNUSED_NV */ public static final int VK_SHADER_UNUSED_NV = (~0); /** * Extends {@code VkStructureType}. * *
Enum values:
* *
    *
  • {@link #VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV}
  • *
  • {@link #VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV}
  • *
  • {@link #VK_STRUCTURE_TYPE_GEOMETRY_NV STRUCTURE_TYPE_GEOMETRY_NV}
  • *
  • {@link #VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV}
  • *
  • {@link #VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV STRUCTURE_TYPE_GEOMETRY_AABB_NV}
  • *
  • {@link #VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV}
  • *
  • {@link #VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV}
  • *
  • {@link #VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV}
  • *
  • {@link #VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV}
  • *
  • {@link #VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV}
  • *
  • {@link #VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV}
  • *
*/ public static final int VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV = 1000165000, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV = 1000165001, VK_STRUCTURE_TYPE_GEOMETRY_NV = 1000165003, VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV = 1000165004, VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV = 1000165005, VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV = 1000165006, VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV = 1000165007, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV = 1000165008, VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV = 1000165009, VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV = 1000165011, VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV = 1000165012; /** * Extends {@code VkShaderStageFlagBits}. * *
Enum values:
* *
    *
  • {@link #VK_SHADER_STAGE_RAYGEN_BIT_NV SHADER_STAGE_RAYGEN_BIT_NV}
  • *
  • {@link #VK_SHADER_STAGE_ANY_HIT_BIT_NV SHADER_STAGE_ANY_HIT_BIT_NV}
  • *
  • {@link #VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV SHADER_STAGE_CLOSEST_HIT_BIT_NV}
  • *
  • {@link #VK_SHADER_STAGE_MISS_BIT_NV SHADER_STAGE_MISS_BIT_NV}
  • *
  • {@link #VK_SHADER_STAGE_INTERSECTION_BIT_NV SHADER_STAGE_INTERSECTION_BIT_NV}
  • *
  • {@link #VK_SHADER_STAGE_CALLABLE_BIT_NV SHADER_STAGE_CALLABLE_BIT_NV}
  • *
*/ public static final int VK_SHADER_STAGE_RAYGEN_BIT_NV = 0x100, VK_SHADER_STAGE_ANY_HIT_BIT_NV = 0x200, VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV = 0x400, VK_SHADER_STAGE_MISS_BIT_NV = 0x800, VK_SHADER_STAGE_INTERSECTION_BIT_NV = 0x1000, VK_SHADER_STAGE_CALLABLE_BIT_NV = 0x2000; /** * Extends {@code VkPipelineStageFlagBits}. * *
Enum values:
* *
    *
  • {@link #VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV}
  • *
  • {@link #VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV}
  • *
*/ public static final int VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV = 0x200000, VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV = 0x2000000; /** Extends {@code VkBufferUsageFlagBits}. */ public static final int VK_BUFFER_USAGE_RAY_TRACING_BIT_NV = 0x400; /** Extends {@code VkPipelineBindPoint}. */ public static final int VK_PIPELINE_BIND_POINT_RAY_TRACING_NV = 1000165000; /** Extends {@code VkDescriptorType}. */ public static final int VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000; /** * Extends {@code VkAccessFlagBits}. * *
Enum values:
* *
    *
  • {@link #VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV}
  • *
  • {@link #VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV}
  • *
*/ public static final int VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV = 0x200000, VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV = 0x400000; /** Extends {@code VkQueryType}. */ public static final int VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV = 1000165000; /** Extends {@code VkPipelineCreateFlagBits}. */ public static final int VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV = 0x20; /** Extends {@code VkObjectType}. */ public static final int VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000; /** Extends {@code VkDebugReportObjectTypeEXT}. */ public static final int VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT = 1000165000; /** Extends {@code VkIndexType}. */ public static final int VK_INDEX_TYPE_NONE_NV = 1000165000; /** * Extends {@code VkRayTracingShaderGroupTypeKHR}. * *
Enum values:
* *
    *
  • {@link #VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV}
  • *
  • {@link #VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV}
  • *
  • {@link #VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV}
  • *
*/ public static final int VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV = 0, VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV = 1, VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV = 2; /** * Extends {@code VkGeometryTypeKHR}. * *
Enum values:
* *
    *
  • {@link #VK_GEOMETRY_TYPE_TRIANGLES_NV GEOMETRY_TYPE_TRIANGLES_NV}
  • *
  • {@link #VK_GEOMETRY_TYPE_AABBS_NV GEOMETRY_TYPE_AABBS_NV}
  • *
*/ public static final int VK_GEOMETRY_TYPE_TRIANGLES_NV = 0, VK_GEOMETRY_TYPE_AABBS_NV = 1; /** * Extends {@code VkAccelerationStructureTypeKHR}. * *
Enum values:
* *
    *
  • {@link #VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV}
  • *
  • {@link #VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV}
  • *
*/ public static final int VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV = 0, VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV = 1; /** * Extends {@code VkGeometryFlagBitsKHR}. * *
Enum values:
* *
    *
  • {@link #VK_GEOMETRY_OPAQUE_BIT_NV GEOMETRY_OPAQUE_BIT_NV}
  • *
  • {@link #VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_NV GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_NV}
  • *
*/ public static final int VK_GEOMETRY_OPAQUE_BIT_NV = 0x1, VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_NV = 0x2; /** * Extends {@code VkGeometryInstanceFlagBitsKHR}. * *
Enum values:
* *
    *
  • {@link #VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV}
  • *
  • {@link #VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_NV GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_NV}
  • *
  • {@link #VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_NV GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_NV}
  • *
  • {@link #VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_NV GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_NV}
  • *
*/ public static final int VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV = 0x1, VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_NV = 0x2, VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_NV = 0x4, VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_NV = 0x8; /** * Extends {@code VkBuildAccelerationStructureFlagBitsKHR}. * *
Enum values:
* *
    *
  • {@link #VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV}
  • *
  • {@link #VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV}
  • *
  • {@link #VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NV BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NV}
  • *
  • {@link #VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NV BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NV}
  • *
  • {@link #VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NV BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NV}
  • *
*/ public static final int VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV = 0x1, VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV = 0x2, VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NV = 0x4, VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NV = 0x8, VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NV = 0x10; /** * Extends {@code VkCopyAccelerationStructureModeKHR}. * *
Enum values:
* *
    *
  • {@link #VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV}
  • *
  • {@link #VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV}
  • *
*/ public static final int VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV = 0, VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV = 1; /** * VkAccelerationStructureMemoryRequirementsTypeNV - Acceleration structure memory requirement type * *
Description
* *
    *
  • {@link #VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV} requests the memory requirement for the {@code VkAccelerationStructureNV} backing store.
  • *
  • {@link #VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV} requests the memory requirement for scratch space during the initial build.
  • *
  • {@link #VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV} requests the memory requirement for scratch space during an update.
  • *
* *
See Also
* *

{@link VkAccelerationStructureMemoryRequirementsInfoNV}

*/ public static final int VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV = 0, VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV = 1, VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV = 2; protected NVRayTracing() { throw new UnsupportedOperationException(); } // --- [ vkCreateAccelerationStructureNV ] --- /** Unsafe version of: {@link #vkCreateAccelerationStructureNV CreateAccelerationStructureNV} */ public static int nvkCreateAccelerationStructureNV(VkDevice device, long pCreateInfo, long pAllocator, long pAccelerationStructure) { long __functionAddress = device.getCapabilities().vkCreateAccelerationStructureNV; if (CHECKS) { check(__functionAddress); VkAccelerationStructureCreateInfoNV.validate(pCreateInfo); if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPPPPI(device.address(), pCreateInfo, pAllocator, pAccelerationStructure, __functionAddress); } /** * Create a new acceleration structure object. * *
C Specification
* *

To create acceleration structures, call:

* *

     * VkResult vkCreateAccelerationStructureNV(
     *     VkDevice                                    device,
     *     const VkAccelerationStructureCreateInfoNV*  pCreateInfo,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkAccelerationStructureNV*                  pAccelerationStructure);
* *
Description
* *

Similarly to other objects in Vulkan, the acceleration structure creation merely creates an object with a specific “{@code shape}” as specified by the information in {@link VkAccelerationStructureInfoNV} and {@code compactedSize} in {@code pCreateInfo}. Populating the data in the object after allocating and binding memory is done with {@link #vkCmdBuildAccelerationStructureNV CmdBuildAccelerationStructureNV} and {@link #vkCmdCopyAccelerationStructureNV CmdCopyAccelerationStructureNV}.

* *

Acceleration structure creation uses the count and type information from the geometries, but does not use the data references in the structures.

* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pCreateInfo} must be a valid pointer to a valid {@link VkAccelerationStructureCreateInfoNV} structure
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pAccelerationStructure} must be a valid pointer to a {@code VkAccelerationStructureNV} handle
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link VK10#VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link VK10#VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
*
* *
See Also
* *

{@link VkAccelerationStructureCreateInfoNV}, {@link VkAllocationCallbacks}

* * @param device the logical device that creates the buffer object. * @param pCreateInfo a pointer to a {@link VkAccelerationStructureCreateInfoNV} structure containing parameters affecting creation of the acceleration structure. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pAccelerationStructure a pointer to a {@code VkAccelerationStructureNV} handle in which the resulting acceleration structure object is returned. */ @NativeType("VkResult") public static int vkCreateAccelerationStructureNV(VkDevice device, @NativeType("VkAccelerationStructureCreateInfoNV const *") VkAccelerationStructureCreateInfoNV pCreateInfo, @Nullable @NativeType("VkAllocationCallbacks const *") VkAllocationCallbacks pAllocator, @NativeType("VkAccelerationStructureNV *") LongBuffer pAccelerationStructure) { if (CHECKS) { check(pAccelerationStructure, 1); } return nvkCreateAccelerationStructureNV(device, pCreateInfo.address(), memAddressSafe(pAllocator), memAddress(pAccelerationStructure)); } // --- [ vkDestroyAccelerationStructureNV ] --- /** Unsafe version of: {@link #vkDestroyAccelerationStructureNV DestroyAccelerationStructureNV} */ public static void nvkDestroyAccelerationStructureNV(VkDevice device, long accelerationStructure, long pAllocator) { long __functionAddress = device.getCapabilities().vkDestroyAccelerationStructureNV; if (CHECKS) { check(__functionAddress); if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPJPV(device.address(), accelerationStructure, pAllocator, __functionAddress); } /** * Destroy an acceleration structure object. * *
C Specification
* *

To destroy an acceleration structure, call:

* *

     * void vkDestroyAccelerationStructureNV(
     *     VkDevice                                    device,
     *     VkAccelerationStructureNV                   accelerationStructure,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Valid Usage
* *
    *
  • All submitted commands that refer to {@code accelerationStructure} must have completed execution
  • *
  • If {@link VkAllocationCallbacks} were provided when {@code accelerationStructure} was created, a compatible set of callbacks must be provided here
  • *
  • If no {@link VkAllocationCallbacks} were provided when {@code accelerationStructure} was created, {@code pAllocator} must be {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code accelerationStructure} is not {@link VK10#VK_NULL_HANDLE NULL_HANDLE}, {@code accelerationStructure} must be a valid {@code VkAccelerationStructureNV} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • If {@code accelerationStructure} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code accelerationStructure} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param device the logical device that destroys the buffer. * @param accelerationStructure the acceleration structure to destroy. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkDestroyAccelerationStructureNV(VkDevice device, @NativeType("VkAccelerationStructureNV") long accelerationStructure, @Nullable @NativeType("VkAllocationCallbacks const *") VkAllocationCallbacks pAllocator) { nvkDestroyAccelerationStructureNV(device, accelerationStructure, memAddressSafe(pAllocator)); } // --- [ vkGetAccelerationStructureMemoryRequirementsNV ] --- /** Unsafe version of: {@link #vkGetAccelerationStructureMemoryRequirementsNV GetAccelerationStructureMemoryRequirementsNV} */ public static void nvkGetAccelerationStructureMemoryRequirementsNV(VkDevice device, long pInfo, long pMemoryRequirements) { long __functionAddress = device.getCapabilities().vkGetAccelerationStructureMemoryRequirementsNV; if (CHECKS) { check(__functionAddress); } callPPPV(device.address(), pInfo, pMemoryRequirements, __functionAddress); } /** * Get acceleration structure memory requirements. * *
C Specification
* *

An acceleration structure has memory requirements for the structure object itself, scratch space for the build, and scratch space for the update.

* *

Scratch space is allocated as a {@code VkBuffer}, so for {@link #VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV} and {@link #VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV} the {@code pMemoryRequirements→alignment} and {@code pMemoryRequirements→memoryTypeBits} values returned by this call must be filled with zero, and should be ignored by the application.

* *

To query the memory requirements, call:

* *

     * void vkGetAccelerationStructureMemoryRequirementsNV(
     *     VkDevice                                    device,
     *     const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo,
     *     VkMemoryRequirements2KHR*                   pMemoryRequirements);
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pInfo} must be a valid pointer to a valid {@link VkAccelerationStructureMemoryRequirementsInfoNV} structure
  • *
  • {@code pMemoryRequirements} must be a valid pointer to a {@link VkMemoryRequirements2KHR} structure
  • *
* *
See Also
* *

{@link VkAccelerationStructureMemoryRequirementsInfoNV}, {@link VkMemoryRequirements2KHR}

* * @param device the logical device on which the acceleration structure was created. * @param pInfo a pointer to a {@link VkAccelerationStructureMemoryRequirementsInfoNV} structure specifying the acceleration structure to get memory requirements for. * @param pMemoryRequirements a pointer to a {@link VkMemoryRequirements2KHR} structure in which the requested acceleration structure memory requirements are returned. */ public static void vkGetAccelerationStructureMemoryRequirementsNV(VkDevice device, @NativeType("VkAccelerationStructureMemoryRequirementsInfoNV const *") VkAccelerationStructureMemoryRequirementsInfoNV pInfo, @NativeType("VkMemoryRequirements2KHR *") VkMemoryRequirements2KHR pMemoryRequirements) { nvkGetAccelerationStructureMemoryRequirementsNV(device, pInfo.address(), pMemoryRequirements.address()); } // --- [ vkBindAccelerationStructureMemoryNV ] --- /** * Unsafe version of: {@link #vkBindAccelerationStructureMemoryNV BindAccelerationStructureMemoryNV} * * @param bindInfoCount the number of elements in {@code pBindInfos}. */ public static int nvkBindAccelerationStructureMemoryNV(VkDevice device, int bindInfoCount, long pBindInfos) { long __functionAddress = device.getCapabilities().vkBindAccelerationStructureMemoryNV; if (CHECKS) { check(__functionAddress); Struct.validate(pBindInfos, bindInfoCount, VkBindAccelerationStructureMemoryInfoNV.SIZEOF, VkBindAccelerationStructureMemoryInfoNV::validate); } return callPPI(device.address(), bindInfoCount, pBindInfos, __functionAddress); } /** * Bind acceleration structure memory. * *
C Specification
* *

To attach memory to one or more acceleration structures at a time, call:

* *

     * VkResult vkBindAccelerationStructureMemoryNV(
     *     VkDevice                                    device,
     *     uint32_t                                    bindInfoCount,
     *     const VkBindAccelerationStructureMemoryInfoNV* pBindInfos);
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pBindInfos} must be a valid pointer to an array of {@code bindInfoCount} valid {@link VkBindAccelerationStructureMemoryInfoNV} structures
  • *
  • {@code bindInfoCount} must be greater than 0
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link VK10#VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link VK10#VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link VK10#VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkBindAccelerationStructureMemoryInfoNV}

* * @param device the logical device that owns the acceleration structures and memory. * @param pBindInfos a pointer to an array of {@link VkBindAccelerationStructureMemoryInfoNV} structures describing acceleration structures and memory to bind. */ @NativeType("VkResult") public static int vkBindAccelerationStructureMemoryNV(VkDevice device, @NativeType("VkBindAccelerationStructureMemoryInfoNV const *") VkBindAccelerationStructureMemoryInfoNV.Buffer pBindInfos) { return nvkBindAccelerationStructureMemoryNV(device, pBindInfos.remaining(), pBindInfos.address()); } // --- [ vkCmdBuildAccelerationStructureNV ] --- /** Unsafe version of: {@link #vkCmdBuildAccelerationStructureNV CmdBuildAccelerationStructureNV} */ public static void nvkCmdBuildAccelerationStructureNV(VkCommandBuffer commandBuffer, long pInfo, long instanceData, long instanceOffset, int update, long dst, long src, long scratch, long scratchOffset) { long __functionAddress = commandBuffer.getCapabilities().vkCmdBuildAccelerationStructureNV; if (CHECKS) { check(__functionAddress); VkAccelerationStructureInfoNV.validate(pInfo); } callPPJJJJJJV(commandBuffer.address(), pInfo, instanceData, instanceOffset, update, dst, src, scratch, scratchOffset, __functionAddress); } /** * Build an acceleration structure. * *
C Specification
* *

To build an acceleration structure call:

* *

     * void vkCmdBuildAccelerationStructureNV(
     *     VkCommandBuffer                             commandBuffer,
     *     const VkAccelerationStructureInfoNV*        pInfo,
     *     VkBuffer                                    instanceData,
     *     VkDeviceSize                                instanceOffset,
     *     VkBool32                                    update,
     *     VkAccelerationStructureNV                   dst,
     *     VkAccelerationStructureNV                   src,
     *     VkBuffer                                    scratch,
     *     VkDeviceSize                                scratchOffset);
* *
Description
* *

Accesses to {@code dst}, {@code src}, and {@code scratch} must be synchronized with the {@link KHRAccelerationStructure#VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR} pipeline stage and an access type of {@link KHRAccelerationStructure#VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR} or {@link KHRAccelerationStructure#VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR}.

* *
Valid Usage
* *
    *
  • {@code geometryCount} must be less than or equal to {@link VkPhysicalDeviceRayTracingPropertiesNV}{@code ::maxGeometryCount}
  • *
  • {@code dst} must have been created with compatible {@link VkAccelerationStructureInfoNV} where {@link VkAccelerationStructureInfoNV}{@code ::type} and {@link VkAccelerationStructureInfoNV}{@code ::flags} are identical, {@link VkAccelerationStructureInfoNV}{@code ::instanceCount} and {@link VkAccelerationStructureInfoNV}{@code ::geometryCount} for {@code dst} are greater than or equal to the build size and each geometry in {@link VkAccelerationStructureInfoNV}{@code ::pGeometries} for {@code dst} has greater than or equal to the number of vertices, indices, and AABBs
  • *
  • If {@code update} is {@link VK10#VK_TRUE TRUE}, {@code src} must not be {@link VK10#VK_NULL_HANDLE NULL_HANDLE}
  • *
  • If {@code update} is {@link VK10#VK_TRUE TRUE}, {@code src} must have previously been constructed with {@link #VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV} set in {@link VkAccelerationStructureInfoNV}{@code ::flags} in the original build
  • *
  • If {@code update} is {@link VK10#VK_FALSE FALSE}, the {@code size} member of the {@link VkMemoryRequirements} structure returned from a call to {@link #vkGetAccelerationStructureMemoryRequirementsNV GetAccelerationStructureMemoryRequirementsNV} with {@link VkAccelerationStructureMemoryRequirementsInfoNV}{@code ::accelerationStructure} set to {@code dst} and {@link VkAccelerationStructureMemoryRequirementsInfoNV}{@code ::type} set to {@link #VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV} must be less than or equal to the size of {@code scratch} minus {@code scratchOffset}
  • *
  • If {@code update} is {@link VK10#VK_TRUE TRUE}, the {@code size} member of the {@link VkMemoryRequirements} structure returned from a call to {@link #vkGetAccelerationStructureMemoryRequirementsNV GetAccelerationStructureMemoryRequirementsNV} with {@link VkAccelerationStructureMemoryRequirementsInfoNV}{@code ::accelerationStructure} set to {@code dst} and {@link VkAccelerationStructureMemoryRequirementsInfoNV}{@code ::type} set to {@link #VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV} must be less than or equal to the size of {@code scratch} minus {@code scratchOffset}
  • *
  • {@code scratch} must have been created with {@link #VK_BUFFER_USAGE_RAY_TRACING_BIT_NV BUFFER_USAGE_RAY_TRACING_BIT_NV} usage flag
  • *
  • If {@code instanceData} is not {@link VK10#VK_NULL_HANDLE NULL_HANDLE}, {@code instanceData} must have been created with {@link #VK_BUFFER_USAGE_RAY_TRACING_BIT_NV BUFFER_USAGE_RAY_TRACING_BIT_NV} usage flag
  • *
  • Each {@link VkAccelerationStructureInstanceKHR}{@code ::accelerationStructureReference} value in {@code instanceData} must be a valid device address containing a value obtained from {@link #vkGetAccelerationStructureHandleNV GetAccelerationStructureHandleNV}
  • *
  • If {@code update} is {@link VK10#VK_TRUE TRUE}, then objects that were previously active must not be made inactive as per Inactive Primitives and Instances
  • *
  • If {@code update} is {@link VK10#VK_TRUE TRUE}, then objects that were previously inactive must not be made active as per Inactive Primitives and Instances
  • *
  • If {@code update} is {@link VK10#VK_TRUE TRUE}, the {@code src} and {@code dst} objects must either be the same object or not have any memory aliasing
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code pInfo} must be a valid pointer to a valid {@link VkAccelerationStructureInfoNV} structure
  • *
  • If {@code instanceData} is not {@link VK10#VK_NULL_HANDLE NULL_HANDLE}, {@code instanceData} must be a valid {@code VkBuffer} handle
  • *
  • {@code dst} must be a valid {@code VkAccelerationStructureNV} handle
  • *
  • If {@code src} is not {@link VK10#VK_NULL_HANDLE NULL_HANDLE}, {@code src} must be a valid {@code VkAccelerationStructureNV} handle
  • *
  • {@code scratch} must be a valid {@code VkBuffer} handle
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • Each of {@code commandBuffer}, {@code dst}, {@code instanceData}, {@code scratch}, and {@code src} that are valid handles of non-ignored parameters must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue Types
Primary SecondaryOutsideCompute
* *
See Also
* *

{@link VkAccelerationStructureInfoNV}

* * @param commandBuffer the command buffer into which the command will be recorded. * @param pInfo contains the shared information for the acceleration structure’s structure. * @param instanceData the buffer containing an array of {@link VkAccelerationStructureInstanceKHR} structures defining acceleration structures. This parameter must be {@code NULL} for bottom level acceleration structures. * @param instanceOffset the offset in bytes (relative to the start of {@code instanceData}) at which the instance data is located. * @param update specifies whether to update the {@code dst} acceleration structure with the data in {@code src}. * @param dst a pointer to the target acceleration structure for the build. * @param src a pointer to an existing acceleration structure that is to be used to update the {@code dst} acceleration structure. * @param scratch the {@code VkBuffer} that will be used as scratch memory for the build. * @param scratchOffset the offset in bytes relative to the start of {@code scratch} that will be used as a scratch memory. */ public static void vkCmdBuildAccelerationStructureNV(VkCommandBuffer commandBuffer, @NativeType("VkAccelerationStructureInfoNV const *") VkAccelerationStructureInfoNV pInfo, @NativeType("VkBuffer") long instanceData, @NativeType("VkDeviceSize") long instanceOffset, @NativeType("VkBool32") boolean update, @NativeType("VkAccelerationStructureNV") long dst, @NativeType("VkAccelerationStructureNV") long src, @NativeType("VkBuffer") long scratch, @NativeType("VkDeviceSize") long scratchOffset) { nvkCmdBuildAccelerationStructureNV(commandBuffer, pInfo.address(), instanceData, instanceOffset, update ? 1 : 0, dst, src, scratch, scratchOffset); } // --- [ vkCmdCopyAccelerationStructureNV ] --- /** * Copy an acceleration structure. * *
C Specification
* *

To copy an acceleration structure call:

* *

     * void vkCmdCopyAccelerationStructureNV(
     *     VkCommandBuffer                             commandBuffer,
     *     VkAccelerationStructureNV                   dst,
     *     VkAccelerationStructureNV                   src,
     *     VkCopyAccelerationStructureModeKHR          mode);
* *
Description
* *

Accesses to {@code src} and {@code dst} must be synchronized with the {@link KHRAccelerationStructure#VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR} pipeline stage and an access type of {@link KHRAccelerationStructure#VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR} or {@link KHRAccelerationStructure#VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR} as appropriate.

* *
Valid Usage
* *
    *
  • {@code mode} must be {@link KHRAccelerationStructure#VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR} or {@link KHRAccelerationStructure#VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR}
  • *
  • The source acceleration structure {@code src} must have been constructed prior to the execution of this command
  • *
  • If {@code mode} is {@link KHRAccelerationStructure#VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR}, {@code src} must have been constructed with {@link KHRAccelerationStructure#VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR} in the build
  • *
  • The {@code buffer} used to create {@code src} must be bound to device memory
  • *
  • The {@code buffer} used to create {@code dst} must be bound to device memory
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code dst} must be a valid {@code VkAccelerationStructureNV} handle
  • *
  • {@code src} must be a valid {@code VkAccelerationStructureNV} handle
  • *
  • {@code mode} must be a valid {@code VkCopyAccelerationStructureModeKHR} value
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • Each of {@code commandBuffer}, {@code dst}, and {@code src} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue Types
Primary SecondaryOutsideCompute
* * @param commandBuffer the command buffer into which the command will be recorded. * @param dst the target acceleration structure for the copy. * @param src the source acceleration structure for the copy. * @param mode a {@code VkCopyAccelerationStructureModeKHR} value specifying additional operations to perform during the copy. */ public static void vkCmdCopyAccelerationStructureNV(VkCommandBuffer commandBuffer, @NativeType("VkAccelerationStructureNV") long dst, @NativeType("VkAccelerationStructureNV") long src, @NativeType("VkCopyAccelerationStructureModeKHR") int mode) { long __functionAddress = commandBuffer.getCapabilities().vkCmdCopyAccelerationStructureNV; if (CHECKS) { check(__functionAddress); } callPJJV(commandBuffer.address(), dst, src, mode, __functionAddress); } // --- [ vkCmdTraceRaysNV ] --- /** * Initialize a ray tracing dispatch. * *
C Specification
* *

To dispatch ray tracing use:

* *

     * void vkCmdTraceRaysNV(
     *     VkCommandBuffer                             commandBuffer,
     *     VkBuffer                                    raygenShaderBindingTableBuffer,
     *     VkDeviceSize                                raygenShaderBindingOffset,
     *     VkBuffer                                    missShaderBindingTableBuffer,
     *     VkDeviceSize                                missShaderBindingOffset,
     *     VkDeviceSize                                missShaderBindingStride,
     *     VkBuffer                                    hitShaderBindingTableBuffer,
     *     VkDeviceSize                                hitShaderBindingOffset,
     *     VkDeviceSize                                hitShaderBindingStride,
     *     VkBuffer                                    callableShaderBindingTableBuffer,
     *     VkDeviceSize                                callableShaderBindingOffset,
     *     VkDeviceSize                                callableShaderBindingStride,
     *     uint32_t                                    width,
     *     uint32_t                                    height,
     *     uint32_t                                    depth);
* *
Description
* *

When the command is executed, a ray generation group of width × height × depth rays is assembled.

* *
Valid Usage
* *
    *
  • If a {@code VkSampler} created with {@code magFilter} or {@code minFilter} equal to {@link VK10#VK_FILTER_LINEAR FILTER_LINEAR} and {@code compareEnable} equal to {@link VK10#VK_FALSE FALSE} is used to sample a {@code VkImageView} as a result of this command, then the image view’s format features must contain {@link VK10#VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT}
  • *
  • If a {@code VkSampler} created with {@code mipmapMode} equal to {@link VK10#VK_SAMPLER_MIPMAP_MODE_LINEAR SAMPLER_MIPMAP_MODE_LINEAR} and {@code compareEnable} equal to {@link VK10#VK_FALSE FALSE} is used to sample a {@code VkImageView} as a result of this command, then the image view’s format features must contain {@link VK10#VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT}
  • *
  • If a {@code VkImageView} is accessed using atomic operations as a result of this command, then the image view’s format features must contain {@link VK10#VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT}
  • *
  • If a {@code VkImageView} is sampled with {@link EXTFilterCubic#VK_FILTER_CUBIC_EXT FILTER_CUBIC_EXT} as a result of this command, then the image view’s format features must contain {@link EXTFilterCubic#VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT}
  • *
  • Any {@code VkImageView} being sampled with {@link EXTFilterCubic#VK_FILTER_CUBIC_EXT FILTER_CUBIC_EXT} as a result of this command must have a {@code VkImageViewType} and format that supports cubic filtering, as specified by {@link VkFilterCubicImageViewImageFormatPropertiesEXT}{@code ::filterCubic} returned by {@code vkGetPhysicalDeviceImageFormatProperties2}
  • *
  • Any {@code VkImageView} being sampled with {@link EXTFilterCubic#VK_FILTER_CUBIC_EXT FILTER_CUBIC_EXT} with a reduction mode of either {@link VK12#VK_SAMPLER_REDUCTION_MODE_MIN SAMPLER_REDUCTION_MODE_MIN} or {@link VK12#VK_SAMPLER_REDUCTION_MODE_MAX SAMPLER_REDUCTION_MODE_MAX} as a result of this command must have a {@code VkImageViewType} and format that supports cubic filtering together with minmax filtering, as specified by {@link VkFilterCubicImageViewImageFormatPropertiesEXT}{@code ::filterCubicMinmax} returned by {@code vkGetPhysicalDeviceImageFormatProperties2}
  • *
  • Any {@code VkImage} created with a {@link VkImageCreateInfo}{@code ::flags} containing {@link NVCornerSampledImage#VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV IMAGE_CREATE_CORNER_SAMPLED_BIT_NV} sampled as a result of this command must only be sampled using a {@code VkSamplerAddressMode} of {@link VK10#VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE}
  • *
  • Any {@code VkImageView} or {@code VkBufferView} being written as a storage image or storage texel buffer where the image format field of the {@code OpTypeImage} is {@code Unknown} must have image format features that support {@link KHRFormatFeatureFlags2#VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR}
  • *
  • Any {@code VkImageView} or {@code VkBufferView} being read as a storage image or storage texel buffer where the image format field of the {@code OpTypeImage} is {@code Unknown} must have image format features that support {@link KHRFormatFeatureFlags2#VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR}
  • *
  • For each set n that is statically used by the {@code VkPipeline} bound to the pipeline bind point used by this command, a descriptor set must have been bound to n at the same pipeline bind point, with a {@code VkPipelineLayout} that is compatible for set n, with the {@code VkPipelineLayout} used to create the current {@code VkPipeline}, as described in Pipeline Layout Compatibility
  • *
  • If the {@code maintenance4} feature is not enabled, then for each push constant that is statically used by the {@code VkPipeline} bound to the pipeline bind point used by this command, a push constant value must have been set for the same pipeline bind point, with a {@code VkPipelineLayout} that is compatible for push constants, with the {@code VkPipelineLayout} used to create the current {@code VkPipeline}, as described in Pipeline Layout Compatibility
  • *
  • Descriptors in each bound descriptor set, specified via {@code vkCmdBindDescriptorSets}, must be valid if they are statically used by the {@code VkPipeline} bound to the pipeline bind point used by this command
  • *
  • A valid pipeline must be bound to the pipeline bind point used by this command
  • *
  • If the {@code VkPipeline} object bound to the pipeline bind point used by this command requires any dynamic state, that state must have been set or inherited (if the {@link NVInheritedViewportScissor VK_NV_inherited_viewport_scissor} extension is enabled) for {@code commandBuffer}, and done so after any previously bound pipeline with the corresponding state not specified as dynamic
  • *
  • There must not have been any calls to dynamic state setting commands for any state not specified as dynamic in the {@code VkPipeline} object bound to the pipeline bind point used by this command, since that pipeline was bound
  • *
  • If the {@code VkPipeline} object bound to the pipeline bind point used by this command accesses a {@code VkSampler} object that uses unnormalized coordinates, that sampler must not be used to sample from any {@code VkImage} with a {@code VkImageView} of the type {@link VK10#VK_IMAGE_VIEW_TYPE_3D IMAGE_VIEW_TYPE_3D}, {@link VK10#VK_IMAGE_VIEW_TYPE_CUBE IMAGE_VIEW_TYPE_CUBE}, {@link VK10#VK_IMAGE_VIEW_TYPE_1D_ARRAY IMAGE_VIEW_TYPE_1D_ARRAY}, {@link VK10#VK_IMAGE_VIEW_TYPE_2D_ARRAY IMAGE_VIEW_TYPE_2D_ARRAY} or {@link VK10#VK_IMAGE_VIEW_TYPE_CUBE_ARRAY IMAGE_VIEW_TYPE_CUBE_ARRAY}, in any shader stage
  • *
  • If the {@code VkPipeline} object bound to the pipeline bind point used by this command accesses a {@code VkSampler} object that uses unnormalized coordinates, that sampler must not be used with any of the SPIR-V {@code OpImageSample*} or {@code OpImageSparseSample*} instructions with {@code ImplicitLod}, {@code Dref} or {@code Proj} in their name, in any shader stage
  • *
  • If the {@code VkPipeline} object bound to the pipeline bind point used by this command accesses a {@code VkSampler} object that uses unnormalized coordinates, that sampler must not be used with any of the SPIR-V {@code OpImageSample*} or {@code OpImageSparseSample*} instructions that includes a LOD bias or any offset values, in any shader stage
  • *
  • If the robust buffer access feature is not enabled, and if the {@code VkPipeline} object bound to the pipeline bind point used by this command accesses a uniform buffer, it must not access values outside of the range of the buffer as specified in the descriptor set bound to the same pipeline bind point
  • *
  • If the robust buffer access feature is not enabled, and if the {@code VkPipeline} object bound to the pipeline bind point used by this command accesses a storage buffer, it must not access values outside of the range of the buffer as specified in the descriptor set bound to the same pipeline bind point
  • *
  • If {@code commandBuffer} is an unprotected command buffer and {@code protectedNoFault} is not supported, any resource accessed by the {@code VkPipeline} object bound to the pipeline bind point used by this command must not be a protected resource
  • *
  • If a {@code VkImageView} is accessed using {@code OpImageWrite} as a result of this command, then the {@code Type} of the {@code Texel} operand of that instruction must have at least as many components as the image view’s format
  • *
  • If a {@code VkBufferView} is accessed using {@code OpImageWrite} as a result of this command, then the {@code Type} of the {@code Texel} operand of that instruction must have at least as many components as the buffer view’s format
  • *
  • If a {@code VkImageView} with a {@code VkFormat} that has a 64-bit component width is accessed as a result of this command, the {@code SampledType} of the {@code OpTypeImage} operand of that instruction must have a {@code Width} of 64
  • *
  • If a {@code VkImageView} with a {@code VkFormat} that has a component width less than 64-bit is accessed as a result of this command, the {@code SampledType} of the {@code OpTypeImage} operand of that instruction must have a {@code Width} of 32
  • *
  • If a {@code VkBufferView} with a {@code VkFormat} that has a 64-bit component width is accessed as a result of this command, the {@code SampledType} of the {@code OpTypeImage} operand of that instruction must have a {@code Width} of 64
  • *
  • If a {@code VkBufferView} with a {@code VkFormat} that has a component width less than 64-bit is accessed as a result of this command, the {@code SampledType} of the {@code OpTypeImage} operand of that instruction must have a {@code Width} of 32
  • *
  • If the {@code sparseImageInt64Atomics} feature is not enabled, {@code VkImage} objects created with the {@link VK10#VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT IMAGE_CREATE_SPARSE_RESIDENCY_BIT} flag must not be accessed by atomic instructions through an {@code OpTypeImage} with a {@code SampledType} with a {@code Width} of 64 by this command
  • *
  • If the {@code sparseImageInt64Atomics} feature is not enabled, {@code VkBuffer} objects created with the {@link VK10#VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT BUFFER_CREATE_SPARSE_RESIDENCY_BIT} flag must not be accessed by atomic instructions through an {@code OpTypeImage} with a {@code SampledType} with a {@code Width} of 64 by this command
  • *
  • Any shader group handle referenced by this call must have been queried from the currently bound ray tracing pipeline
  • *
* *
    *
  • {@code commandBuffer} must not be a protected command buffer
  • *
  • This command must not cause a pipeline trace ray instruction to be executed from a shader invocation with a recursion depth greater than the value of {@code maxRecursionDepth} used to create the bound ray tracing pipeline
  • *
  • If {@code raygenShaderBindingTableBuffer} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code raygenShaderBindingOffset} must be less than the size of {@code raygenShaderBindingTableBuffer}
  • *
  • {@code raygenShaderBindingOffset} must be a multiple of {@link VkPhysicalDeviceRayTracingPropertiesNV}{@code ::shaderGroupBaseAlignment}
  • *
  • If {@code missShaderBindingTableBuffer} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code missShaderBindingOffset} must be less than the size of {@code missShaderBindingTableBuffer}
  • *
  • {@code missShaderBindingOffset} must be a multiple of {@link VkPhysicalDeviceRayTracingPropertiesNV}{@code ::shaderGroupBaseAlignment}
  • *
  • If {@code hitShaderBindingTableBuffer} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code hitShaderBindingOffset} must be less than the size of {@code hitShaderBindingTableBuffer}
  • *
  • {@code hitShaderBindingOffset} must be a multiple of {@link VkPhysicalDeviceRayTracingPropertiesNV}{@code ::shaderGroupBaseAlignment}
  • *
  • If {@code callableShaderBindingTableBuffer} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code callableShaderBindingOffset} must be less than the size of {@code callableShaderBindingTableBuffer}
  • *
  • {@code callableShaderBindingOffset} must be a multiple of {@link VkPhysicalDeviceRayTracingPropertiesNV}{@code ::shaderGroupBaseAlignment}
  • *
  • {@code missShaderBindingStride} must be a multiple of {@link VkPhysicalDeviceRayTracingPropertiesNV}{@code ::shaderGroupHandleSize}
  • *
  • {@code hitShaderBindingStride} must be a multiple of {@link VkPhysicalDeviceRayTracingPropertiesNV}{@code ::shaderGroupHandleSize}
  • *
  • {@code callableShaderBindingStride} must be a multiple of {@link VkPhysicalDeviceRayTracingPropertiesNV}{@code ::shaderGroupHandleSize}
  • *
  • {@code missShaderBindingStride} must be less than or equal to {@link VkPhysicalDeviceRayTracingPropertiesNV}{@code ::maxShaderGroupStride}
  • *
  • {@code hitShaderBindingStride} must be less than or equal to {@link VkPhysicalDeviceRayTracingPropertiesNV}{@code ::maxShaderGroupStride}
  • *
  • {@code callableShaderBindingStride} must be less than or equal to {@link VkPhysicalDeviceRayTracingPropertiesNV}{@code ::maxShaderGroupStride}
  • *
  • {@code width} must be less than or equal to {@link VkPhysicalDeviceLimits}{@code ::maxComputeWorkGroupCount}[0]
  • *
  • {@code height} must be less than or equal to {@link VkPhysicalDeviceLimits}{@code ::maxComputeWorkGroupCount}[1]
  • *
  • {@code depth} must be less than or equal to {@link VkPhysicalDeviceLimits}{@code ::maxComputeWorkGroupCount}[2]
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code raygenShaderBindingTableBuffer} must be a valid {@code VkBuffer} handle
  • *
  • If {@code missShaderBindingTableBuffer} is not {@link VK10#VK_NULL_HANDLE NULL_HANDLE}, {@code missShaderBindingTableBuffer} must be a valid {@code VkBuffer} handle
  • *
  • If {@code hitShaderBindingTableBuffer} is not {@link VK10#VK_NULL_HANDLE NULL_HANDLE}, {@code hitShaderBindingTableBuffer} must be a valid {@code VkBuffer} handle
  • *
  • If {@code callableShaderBindingTableBuffer} is not {@link VK10#VK_NULL_HANDLE NULL_HANDLE}, {@code callableShaderBindingTableBuffer} must be a valid {@code VkBuffer} handle
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • Each of {@code callableShaderBindingTableBuffer}, {@code commandBuffer}, {@code hitShaderBindingTableBuffer}, {@code missShaderBindingTableBuffer}, and {@code raygenShaderBindingTableBuffer} that are valid handles of non-ignored parameters must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue Types
Primary SecondaryOutsideCompute
* * @param commandBuffer the command buffer into which the command will be recorded. * @param raygenShaderBindingTableBuffer the buffer object that holds the shader binding table data for the ray generation shader stage. * @param raygenShaderBindingOffset the offset in bytes (relative to {@code raygenShaderBindingTableBuffer}) of the ray generation shader being used for the trace. * @param missShaderBindingTableBuffer the buffer object that holds the shader binding table data for the miss shader stage. * @param missShaderBindingOffset the offset in bytes (relative to {@code missShaderBindingTableBuffer}) of the miss shader being used for the trace. * @param missShaderBindingStride the size in bytes of each shader binding table record in {@code missShaderBindingTableBuffer}. * @param hitShaderBindingTableBuffer the buffer object that holds the shader binding table data for the hit shader stages. * @param hitShaderBindingOffset the offset in bytes (relative to {@code hitShaderBindingTableBuffer}) of the hit shader group being used for the trace. * @param hitShaderBindingStride the size in bytes of each shader binding table record in {@code hitShaderBindingTableBuffer}. * @param callableShaderBindingTableBuffer the buffer object that holds the shader binding table data for the callable shader stage. * @param callableShaderBindingOffset the offset in bytes (relative to {@code callableShaderBindingTableBuffer}) of the callable shader being used for the trace. * @param callableShaderBindingStride the size in bytes of each shader binding table record in {@code callableShaderBindingTableBuffer}. * @param width the width of the ray trace query dimensions. * @param height height of the ray trace query dimensions. * @param depth depth of the ray trace query dimensions. */ public static void vkCmdTraceRaysNV(VkCommandBuffer commandBuffer, @NativeType("VkBuffer") long raygenShaderBindingTableBuffer, @NativeType("VkDeviceSize") long raygenShaderBindingOffset, @NativeType("VkBuffer") long missShaderBindingTableBuffer, @NativeType("VkDeviceSize") long missShaderBindingOffset, @NativeType("VkDeviceSize") long missShaderBindingStride, @NativeType("VkBuffer") long hitShaderBindingTableBuffer, @NativeType("VkDeviceSize") long hitShaderBindingOffset, @NativeType("VkDeviceSize") long hitShaderBindingStride, @NativeType("VkBuffer") long callableShaderBindingTableBuffer, @NativeType("VkDeviceSize") long callableShaderBindingOffset, @NativeType("VkDeviceSize") long callableShaderBindingStride, @NativeType("uint32_t") int width, @NativeType("uint32_t") int height, @NativeType("uint32_t") int depth) { long __functionAddress = commandBuffer.getCapabilities().vkCmdTraceRaysNV; if (CHECKS) { check(__functionAddress); } callPJJJJJJJJJJJV(commandBuffer.address(), raygenShaderBindingTableBuffer, raygenShaderBindingOffset, missShaderBindingTableBuffer, missShaderBindingOffset, missShaderBindingStride, hitShaderBindingTableBuffer, hitShaderBindingOffset, hitShaderBindingStride, callableShaderBindingTableBuffer, callableShaderBindingOffset, callableShaderBindingStride, width, height, depth, __functionAddress); } // --- [ vkCreateRayTracingPipelinesNV ] --- /** * Unsafe version of: {@link #vkCreateRayTracingPipelinesNV CreateRayTracingPipelinesNV} * * @param createInfoCount the length of the {@code pCreateInfos} and {@code pPipelines} arrays. */ public static int nvkCreateRayTracingPipelinesNV(VkDevice device, long pipelineCache, int createInfoCount, long pCreateInfos, long pAllocator, long pPipelines) { long __functionAddress = device.getCapabilities().vkCreateRayTracingPipelinesNV; if (CHECKS) { check(__functionAddress); Struct.validate(pCreateInfos, createInfoCount, VkRayTracingPipelineCreateInfoNV.SIZEOF, VkRayTracingPipelineCreateInfoNV::validate); if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPJPPPI(device.address(), pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines, __functionAddress); } /** * Creates a new ray tracing pipeline object. * *
C Specification
* *

To create ray tracing pipelines, call:

* *

     * VkResult vkCreateRayTracingPipelinesNV(
     *     VkDevice                                    device,
     *     VkPipelineCache                             pipelineCache,
     *     uint32_t                                    createInfoCount,
     *     const VkRayTracingPipelineCreateInfoNV*     pCreateInfos,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkPipeline*                                 pPipelines);
* *
Valid Usage
* *
    *
  • If the {@code flags} member of any element of {@code pCreateInfos} contains the {@link VK10#VK_PIPELINE_CREATE_DERIVATIVE_BIT PIPELINE_CREATE_DERIVATIVE_BIT} flag, and the {@code basePipelineIndex} member of that same element is not {@code -1}, {@code basePipelineIndex} must be less than the index into {@code pCreateInfos} that corresponds to that element
  • *
  • If the {@code flags} member of any element of {@code pCreateInfos} contains the {@link VK10#VK_PIPELINE_CREATE_DERIVATIVE_BIT PIPELINE_CREATE_DERIVATIVE_BIT} flag, the base pipeline must have been created with the {@link VK10#VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT} flag set
  • *
  • {@code flags} must not contain the {@link VK11#VK_PIPELINE_CREATE_DISPATCH_BASE PIPELINE_CREATE_DISPATCH_BASE} flag
  • *
  • If {@code pipelineCache} was created with {@link EXTPipelineCreationCacheControl#VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT}, host access to {@code pipelineCache} must be externally synchronized
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code pipelineCache} is not {@link VK10#VK_NULL_HANDLE NULL_HANDLE}, {@code pipelineCache} must be a valid {@code VkPipelineCache} handle
  • *
  • {@code pCreateInfos} must be a valid pointer to an array of {@code createInfoCount} valid {@link VkRayTracingPipelineCreateInfoNV} structures
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pPipelines} must be a valid pointer to an array of {@code createInfoCount} {@code VkPipeline} handles
  • *
  • {@code createInfoCount} must be greater than 0
  • *
  • If {@code pipelineCache} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link VK10#VK_SUCCESS SUCCESS}
  • *
  • {@link EXTPipelineCreationCacheControl#VK_PIPELINE_COMPILE_REQUIRED_EXT PIPELINE_COMPILE_REQUIRED_EXT}
  • *
*
On failure, this command returns
*
    *
  • {@link VK10#VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link VK10#VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link NVGLSLShader#VK_ERROR_INVALID_SHADER_NV ERROR_INVALID_SHADER_NV}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkRayTracingPipelineCreateInfoNV}

* * @param device the logical device that creates the ray tracing pipelines. * @param pipelineCache either {@link VK10#VK_NULL_HANDLE NULL_HANDLE}, indicating that pipeline caching is disabled, or the handle of a valid pipeline cache object, in which case use of that cache is enabled for the duration of the command. * @param pCreateInfos a pointer to an array of {@link VkRayTracingPipelineCreateInfoNV} structures. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pPipelines a pointer to an array in which the resulting ray tracing pipeline objects are returned. */ @NativeType("VkResult") public static int vkCreateRayTracingPipelinesNV(VkDevice device, @NativeType("VkPipelineCache") long pipelineCache, @NativeType("VkRayTracingPipelineCreateInfoNV const *") VkRayTracingPipelineCreateInfoNV.Buffer pCreateInfos, @Nullable @NativeType("VkAllocationCallbacks const *") VkAllocationCallbacks pAllocator, @NativeType("VkPipeline *") LongBuffer pPipelines) { if (CHECKS) { check(pPipelines, pCreateInfos.remaining()); } return nvkCreateRayTracingPipelinesNV(device, pipelineCache, pCreateInfos.remaining(), pCreateInfos.address(), memAddressSafe(pAllocator), memAddress(pPipelines)); } // --- [ vkGetRayTracingShaderGroupHandlesNV ] --- /** * Unsafe version of: {@link #vkGetRayTracingShaderGroupHandlesNV GetRayTracingShaderGroupHandlesNV} * * @param dataSize the size in bytes of the buffer pointed to by {@code pData}. */ public static int nvkGetRayTracingShaderGroupHandlesNV(VkDevice device, long pipeline, int firstGroup, int groupCount, long dataSize, long pData) { long __functionAddress = device.getCapabilities().vkGetRayTracingShaderGroupHandlesNV; if (CHECKS) { check(__functionAddress); } return callPJPPI(device.address(), pipeline, firstGroup, groupCount, dataSize, pData, __functionAddress); } /** * See {@link KHRRayTracingPipeline#vkGetRayTracingShaderGroupHandlesKHR GetRayTracingShaderGroupHandlesKHR}. * * @param device the logical device containing the ray tracing pipeline. * @param pipeline the ray tracing pipeline object containing the shaders. * @param firstGroup the index of the first group to retrieve a handle for from the {@link VkRayTracingPipelineCreateInfoKHR}{@code ::pGroups} or {@link VkRayTracingPipelineCreateInfoNV}{@code ::pGroups} array. * @param groupCount the number of shader handles to retrieve. * @param pData a pointer to a user-allocated buffer where the results will be written. */ @NativeType("VkResult") public static int vkGetRayTracingShaderGroupHandlesNV(VkDevice device, @NativeType("VkPipeline") long pipeline, @NativeType("uint32_t") int firstGroup, @NativeType("uint32_t") int groupCount, @NativeType("void *") ByteBuffer pData) { return nvkGetRayTracingShaderGroupHandlesNV(device, pipeline, firstGroup, groupCount, pData.remaining(), memAddress(pData)); } // --- [ vkGetAccelerationStructureHandleNV ] --- /** * Unsafe version of: {@link #vkGetAccelerationStructureHandleNV GetAccelerationStructureHandleNV} * * @param dataSize the size in bytes of the buffer pointed to by {@code pData}. */ public static int nvkGetAccelerationStructureHandleNV(VkDevice device, long accelerationStructure, long dataSize, long pData) { long __functionAddress = device.getCapabilities().vkGetAccelerationStructureHandleNV; if (CHECKS) { check(__functionAddress); } return callPJPPI(device.address(), accelerationStructure, dataSize, pData, __functionAddress); } /** * Get opaque acceleration structure handle. * *
C Specification
* *

To allow constructing geometry instances with device code if desired, we need to be able to query a opaque handle for an acceleration structure. This handle is a value of 8 bytes. To get this handle, call:

* *

     * VkResult vkGetAccelerationStructureHandleNV(
     *     VkDevice                                    device,
     *     VkAccelerationStructureNV                   accelerationStructure,
     *     size_t                                      dataSize,
     *     void*                                       pData);
* *
Valid Usage
* *
    *
  • {@code dataSize} must be large enough to contain the result of the query, as described above
  • *
  • {@code accelerationStructure} must be bound completely and contiguously to a single {@code VkDeviceMemory} object via {@link #vkBindAccelerationStructureMemoryNV BindAccelerationStructureMemoryNV}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code accelerationStructure} must be a valid {@code VkAccelerationStructureNV} handle
  • *
  • {@code pData} must be a valid pointer to an array of {@code dataSize} bytes
  • *
  • {@code dataSize} must be greater than 0
  • *
  • {@code accelerationStructure} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link VK10#VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link VK10#VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link VK10#VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* * @param device the logical device that owns the acceleration structures. * @param accelerationStructure the acceleration structure. * @param pData a pointer to a user-allocated buffer where the results will be written. */ @NativeType("VkResult") public static int vkGetAccelerationStructureHandleNV(VkDevice device, @NativeType("VkAccelerationStructureNV") long accelerationStructure, @NativeType("void *") ByteBuffer pData) { return nvkGetAccelerationStructureHandleNV(device, accelerationStructure, pData.remaining(), memAddress(pData)); } /** * Get opaque acceleration structure handle. * *
C Specification
* *

To allow constructing geometry instances with device code if desired, we need to be able to query a opaque handle for an acceleration structure. This handle is a value of 8 bytes. To get this handle, call:

* *

     * VkResult vkGetAccelerationStructureHandleNV(
     *     VkDevice                                    device,
     *     VkAccelerationStructureNV                   accelerationStructure,
     *     size_t                                      dataSize,
     *     void*                                       pData);
* *
Valid Usage
* *
    *
  • {@code dataSize} must be large enough to contain the result of the query, as described above
  • *
  • {@code accelerationStructure} must be bound completely and contiguously to a single {@code VkDeviceMemory} object via {@link #vkBindAccelerationStructureMemoryNV BindAccelerationStructureMemoryNV}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code accelerationStructure} must be a valid {@code VkAccelerationStructureNV} handle
  • *
  • {@code pData} must be a valid pointer to an array of {@code dataSize} bytes
  • *
  • {@code dataSize} must be greater than 0
  • *
  • {@code accelerationStructure} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link VK10#VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link VK10#VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link VK10#VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* * @param device the logical device that owns the acceleration structures. * @param accelerationStructure the acceleration structure. * @param pData a pointer to a user-allocated buffer where the results will be written. */ @NativeType("VkResult") public static int vkGetAccelerationStructureHandleNV(VkDevice device, @NativeType("VkAccelerationStructureNV") long accelerationStructure, @NativeType("void *") LongBuffer pData) { return nvkGetAccelerationStructureHandleNV(device, accelerationStructure, Integer.toUnsignedLong(pData.remaining()) << 3, memAddress(pData)); } // --- [ vkCmdWriteAccelerationStructuresPropertiesNV ] --- /** * Unsafe version of: {@link #vkCmdWriteAccelerationStructuresPropertiesNV CmdWriteAccelerationStructuresPropertiesNV} * * @param accelerationStructureCount the count of acceleration structures for which to query the property. */ public static void nvkCmdWriteAccelerationStructuresPropertiesNV(VkCommandBuffer commandBuffer, int accelerationStructureCount, long pAccelerationStructures, int queryType, long queryPool, int firstQuery) { long __functionAddress = commandBuffer.getCapabilities().vkCmdWriteAccelerationStructuresPropertiesNV; if (CHECKS) { check(__functionAddress); } callPPJV(commandBuffer.address(), accelerationStructureCount, pAccelerationStructures, queryType, queryPool, firstQuery, __functionAddress); } /** * Write acceleration structure result parameters to query results. * *
C Specification
* *

To query acceleration structure size parameters call:

* *

     * void vkCmdWriteAccelerationStructuresPropertiesNV(
     *     VkCommandBuffer                             commandBuffer,
     *     uint32_t                                    accelerationStructureCount,
     *     const VkAccelerationStructureNV*            pAccelerationStructures,
     *     VkQueryType                                 queryType,
     *     VkQueryPool                                 queryPool,
     *     uint32_t                                    firstQuery);
* *
Description
* *

Accesses to any of the acceleration structures listed in {@code pAccelerationStructures} must be synchronized with the {@link KHRAccelerationStructure#VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR} pipeline stage and an access type of {@link KHRAccelerationStructure#VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR}.

* *
Valid Usage
* *
    *
  • {@code queryPool} must have been created with a {@code queryType} matching {@code queryType}
  • *
  • The queries identified by {@code queryPool} and {@code firstQuery} must be unavailable
  • *
  • {@code accelerationStructure} must be bound completely and contiguously to a single {@code VkDeviceMemory} object via {@link #vkBindAccelerationStructureMemoryNV BindAccelerationStructureMemoryNV}
  • *
  • All acceleration structures in {@code pAccelerationStructures} must have been built prior to the execution of this command
  • *
  • All acceleration structures in {@code pAccelerationStructures} must have been built with {@link KHRAccelerationStructure#VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR} if {@code queryType} is {@link #VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV}
  • *
  • {@code queryType} must be {@link #VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code pAccelerationStructures} must be a valid pointer to an array of {@code accelerationStructureCount} valid {@code VkAccelerationStructureNV} handles
  • *
  • {@code queryType} must be a valid {@code VkQueryType} value
  • *
  • {@code queryPool} must be a valid {@code VkQueryPool} handle
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • {@code accelerationStructureCount} must be greater than 0
  • *
  • Each of {@code commandBuffer}, {@code queryPool}, and the elements of {@code pAccelerationStructures} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue Types
Primary SecondaryOutsideCompute
* * @param commandBuffer the command buffer into which the command will be recorded. * @param pAccelerationStructures a pointer to an array of existing previously built acceleration structures. * @param queryType a {@code VkQueryType} value specifying the type of queries managed by the pool. * @param queryPool the query pool that will manage the results of the query. * @param firstQuery the first query index within the query pool that will contain the {@code accelerationStructureCount} number of results. */ public static void vkCmdWriteAccelerationStructuresPropertiesNV(VkCommandBuffer commandBuffer, @NativeType("VkAccelerationStructureNV const *") LongBuffer pAccelerationStructures, @NativeType("VkQueryType") int queryType, @NativeType("VkQueryPool") long queryPool, @NativeType("uint32_t") int firstQuery) { nvkCmdWriteAccelerationStructuresPropertiesNV(commandBuffer, pAccelerationStructures.remaining(), memAddress(pAccelerationStructures), queryType, queryPool, firstQuery); } // --- [ vkCompileDeferredNV ] --- /** * Deferred compilation of shaders. * *
C Specification
* *

To compile a deferred shader in a pipeline call:

* *

     * VkResult vkCompileDeferredNV(
     *     VkDevice                                    device,
     *     VkPipeline                                  pipeline,
     *     uint32_t                                    shader);
* *
Valid Usage
* *
    *
  • {@code pipeline} must be a ray tracing pipeline
  • *
  • {@code pipeline} must have been created with {@link #VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV PIPELINE_CREATE_DEFER_COMPILE_BIT_NV}
  • *
  • {@code shader} must not have been called as a deferred compile before
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pipeline} must be a valid {@code VkPipeline} handle
  • *
  • {@code pipeline} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link VK10#VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link VK10#VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link VK10#VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* * @param device the logical device containing the ray tracing pipeline. * @param pipeline the ray tracing pipeline object containing the shaders. * @param shader the index of the shader to compile. */ @NativeType("VkResult") public static int vkCompileDeferredNV(VkDevice device, @NativeType("VkPipeline") long pipeline, @NativeType("uint32_t") int shader) { long __functionAddress = device.getCapabilities().vkCompileDeferredNV; if (CHECKS) { check(__functionAddress); } return callPJI(device.address(), pipeline, shader, __functionAddress); } /** Array version of: {@link #vkCreateAccelerationStructureNV CreateAccelerationStructureNV} */ @NativeType("VkResult") public static int vkCreateAccelerationStructureNV(VkDevice device, @NativeType("VkAccelerationStructureCreateInfoNV const *") VkAccelerationStructureCreateInfoNV pCreateInfo, @Nullable @NativeType("VkAllocationCallbacks const *") VkAllocationCallbacks pAllocator, @NativeType("VkAccelerationStructureNV *") long[] pAccelerationStructure) { long __functionAddress = device.getCapabilities().vkCreateAccelerationStructureNV; if (CHECKS) { check(__functionAddress); check(pAccelerationStructure, 1); VkAccelerationStructureCreateInfoNV.validate(pCreateInfo.address()); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPPPPI(device.address(), pCreateInfo.address(), memAddressSafe(pAllocator), pAccelerationStructure, __functionAddress); } /** Array version of: {@link #vkCreateRayTracingPipelinesNV CreateRayTracingPipelinesNV} */ @NativeType("VkResult") public static int vkCreateRayTracingPipelinesNV(VkDevice device, @NativeType("VkPipelineCache") long pipelineCache, @NativeType("VkRayTracingPipelineCreateInfoNV const *") VkRayTracingPipelineCreateInfoNV.Buffer pCreateInfos, @Nullable @NativeType("VkAllocationCallbacks const *") VkAllocationCallbacks pAllocator, @NativeType("VkPipeline *") long[] pPipelines) { long __functionAddress = device.getCapabilities().vkCreateRayTracingPipelinesNV; if (CHECKS) { check(__functionAddress); check(pPipelines, pCreateInfos.remaining()); Struct.validate(pCreateInfos.address(), pCreateInfos.remaining(), VkRayTracingPipelineCreateInfoNV.SIZEOF, VkRayTracingPipelineCreateInfoNV::validate); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPJPPPI(device.address(), pipelineCache, pCreateInfos.remaining(), pCreateInfos.address(), memAddressSafe(pAllocator), pPipelines, __functionAddress); } /** Array version of: {@link #vkGetAccelerationStructureHandleNV GetAccelerationStructureHandleNV} */ @NativeType("VkResult") public static int vkGetAccelerationStructureHandleNV(VkDevice device, @NativeType("VkAccelerationStructureNV") long accelerationStructure, @NativeType("void *") long[] pData) { long __functionAddress = device.getCapabilities().vkGetAccelerationStructureHandleNV; if (CHECKS) { check(__functionAddress); } return callPJPPI(device.address(), accelerationStructure, Integer.toUnsignedLong(pData.length) << 3, pData, __functionAddress); } /** Array version of: {@link #vkCmdWriteAccelerationStructuresPropertiesNV CmdWriteAccelerationStructuresPropertiesNV} */ public static void vkCmdWriteAccelerationStructuresPropertiesNV(VkCommandBuffer commandBuffer, @NativeType("VkAccelerationStructureNV const *") long[] pAccelerationStructures, @NativeType("VkQueryType") int queryType, @NativeType("VkQueryPool") long queryPool, @NativeType("uint32_t") int firstQuery) { long __functionAddress = commandBuffer.getCapabilities().vkCmdWriteAccelerationStructuresPropertiesNV; if (CHECKS) { check(__functionAddress); } callPPJV(commandBuffer.address(), pAccelerationStructures.length, pAccelerationStructures, queryType, queryPool, firstQuery, __functionAddress); } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy