All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.lwjgl.vulkan.VK10 Maven / Gradle / Ivy

Go to download

A new generation graphics and compute API that provides high-efficiency, cross-platform access to modern GPUs used in a wide variety of devices from PCs and consoles to mobile phones and embedded platforms.

There is a newer version: 3.3.4
Show newest version
/*
 * Copyright LWJGL. All rights reserved.
 * License terms: https://www.lwjgl.org/license
 * MACHINE GENERATED FILE, DO NOT EDIT
 */
package org.lwjgl.vulkan;

import javax.annotation.*;

import java.nio.*;

import org.lwjgl.*;

import org.lwjgl.system.*;

import static org.lwjgl.system.Checks.*;
import static org.lwjgl.system.JNI.*;
import static org.lwjgl.system.MemoryStack.*;
import static org.lwjgl.system.MemoryUtil.*;

/** The core Vulkan 1.0 functionality. */
public class VK10 {

    /** API Constants */
    public static final int
        VK_MAX_PHYSICAL_DEVICE_NAME_SIZE = 256,
        VK_UUID_SIZE                     = 16,
        VK_LUID_SIZE_KHR                 = 8,
        VK_MAX_EXTENSION_NAME_SIZE       = 256,
        VK_MAX_DESCRIPTION_SIZE          = 256,
        VK_MAX_MEMORY_TYPES              = 32,
        VK_MAX_MEMORY_HEAPS              = 16,
        VK_REMAINING_MIP_LEVELS          = (~0),
        VK_REMAINING_ARRAY_LAYERS        = (~0),
        VK_ATTACHMENT_UNUSED             = (~0),
        VK_TRUE                          = 1,
        VK_FALSE                         = 0,
        VK_QUEUE_FAMILY_IGNORED          = (~0),
        VK_QUEUE_FAMILY_EXTERNAL_KHR     = (~0-1),
        VK_QUEUE_FAMILY_FOREIGN_EXT      = (~0-2),
        VK_SUBPASS_EXTERNAL              = (~0);

    /** API Constants */
    public static final float VK_LOD_CLAMP_NONE = 1000.0f;

    /** API Constants */
    public static final long VK_WHOLE_SIZE = (~0L);

    /**
     * VkPipelineCacheHeaderVersion - Encode pipeline cache version
     * 
     * 
Description
* *
    *
  • {@link #VK_PIPELINE_CACHE_HEADER_VERSION_ONE PIPELINE_CACHE_HEADER_VERSION_ONE} specifies version one of the pipeline cache.
  • *
* *
See Also
* *

{@link #vkCreatePipelineCache CreatePipelineCache}, {@link #vkGetPipelineCacheData GetPipelineCacheData}

*/ public static final int VK_PIPELINE_CACHE_HEADER_VERSION_ONE = 1; /** * VkResult - Vulkan command return codes * *
Description
* *
    *
  • {@link #VK_SUCCESS SUCCESS} Command successfully completed
  • *
  • {@link #VK_NOT_READY NOT_READY} A fence or query has not yet completed
  • *
  • {@link #VK_TIMEOUT TIMEOUT} A wait operation has not completed in the specified time
  • *
  • {@link #VK_EVENT_SET EVENT_SET} An event is signaled
  • *
  • {@link #VK_EVENT_RESET EVENT_RESET} An event is unsignaled
  • *
  • {@link #VK_INCOMPLETE INCOMPLETE} A return array was too small for the result
  • *
  • {@link KHRSwapchain#VK_SUBOPTIMAL_KHR SUBOPTIMAL_KHR} A swapchain no longer matches the surface properties exactly, but can still be used to present to the surface successfully.
  • *
* *
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY} A host memory allocation has failed.
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY} A device memory allocation has failed.
  • *
  • {@link #VK_ERROR_INITIALIZATION_FAILED ERROR_INITIALIZATION_FAILED} Initialization of an object could not be completed for implementation-specific reasons.
  • *
  • {@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST} The logical or physical device has been lost. See Lost Device
  • *
  • {@link #VK_ERROR_MEMORY_MAP_FAILED ERROR_MEMORY_MAP_FAILED} Mapping of a memory object has failed.
  • *
  • {@link #VK_ERROR_LAYER_NOT_PRESENT ERROR_LAYER_NOT_PRESENT} A requested layer is not present or could not be loaded.
  • *
  • {@link #VK_ERROR_EXTENSION_NOT_PRESENT ERROR_EXTENSION_NOT_PRESENT} A requested extension is not supported.
  • *
  • {@link #VK_ERROR_FEATURE_NOT_PRESENT ERROR_FEATURE_NOT_PRESENT} A requested feature is not supported.
  • *
  • {@link #VK_ERROR_INCOMPATIBLE_DRIVER ERROR_INCOMPATIBLE_DRIVER} The requested version of Vulkan is not supported by the driver or is otherwise incompatible for implementation-specific reasons.
  • *
  • {@link #VK_ERROR_TOO_MANY_OBJECTS ERROR_TOO_MANY_OBJECTS} Too many objects of the type have already been created.
  • *
  • {@link #VK_ERROR_FORMAT_NOT_SUPPORTED ERROR_FORMAT_NOT_SUPPORTED} A requested format is not supported on this device.
  • *
  • {@link #VK_ERROR_FRAGMENTED_POOL ERROR_FRAGMENTED_POOL} A pool allocation has failed due to fragmentation of the pool’s memory. This must only be returned if no attempt to allocate host or device memory was made to accomodate the new allocation. This should be returned in preference to {@link KHRMaintenance1#VK_ERROR_OUT_OF_POOL_MEMORY_KHR ERROR_OUT_OF_POOL_MEMORY_KHR}, but only if the implementation is certain that the pool allocation failure was due to fragmentation.
  • *
  • {@link KHRSurface#VK_ERROR_SURFACE_LOST_KHR ERROR_SURFACE_LOST_KHR} A surface is no longer available.
  • *
  • {@link KHRSurface#VK_ERROR_NATIVE_WINDOW_IN_USE_KHR ERROR_NATIVE_WINDOW_IN_USE_KHR} The requested window is already in use by Vulkan or another API in a manner which prevents it from being used again.
  • *
  • {@link KHRSwapchain#VK_ERROR_OUT_OF_DATE_KHR ERROR_OUT_OF_DATE_KHR} A surface has changed in such a way that it is no longer compatible with the swapchain, and further presentation requests using the swapchain will fail. Applications must query the new surface properties and recreate their swapchain if they wish to continue presenting to the surface.
  • *
  • {@link KHRDisplaySwapchain#VK_ERROR_INCOMPATIBLE_DISPLAY_KHR ERROR_INCOMPATIBLE_DISPLAY_KHR} The display used by a swapchain does not use the same presentable image layout, or is incompatible in a way that prevents sharing an image.
  • *
  • {@link NVGLSLShader#VK_ERROR_INVALID_SHADER_NV ERROR_INVALID_SHADER_NV} One or more shaders failed to compile or link. More details are reported back to the application via {@link EXTDebugReport VK_EXT_debug_report} if enabled.
  • *
  • {@link KHRMaintenance1#VK_ERROR_OUT_OF_POOL_MEMORY_KHR ERROR_OUT_OF_POOL_MEMORY_KHR} A pool memory allocation has failed. This must only be returned if no attempt to allocate host or device memory was made to accomodate the new allocation. If the failure was definitely due to fragmentation of the pool, {@link #VK_ERROR_FRAGMENTED_POOL ERROR_FRAGMENTED_POOL} should be returned instead.
  • *
  • {@link KHRExternalMemory#VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR ERROR_INVALID_EXTERNAL_HANDLE_KHR} An external handle is not a valid handle of the specified type.
  • *
* *

If a command returns a run time error, unless otherwise specified any output parameters will have undefined contents, except that if the output parameter is a structure with {@code sType} and {@code pNext} fields, those fields will be unmodified. Any structures chained from {@code pNext} will also have undefined contents, except that {@code sType} and {@code pNext} will be unmodified.

* *

Out of memory errors do not damage any currently existing Vulkan objects. Objects that have already been successfully created can still be used by the application.

* *

Performance-critical commands generally do not have return codes. If a run time error occurs in such commands, the implementation will defer reporting the error until a specified point. For commands that record into command buffers (ftext:vkCmd*) run time errors are reported by {@link #vkEndCommandBuffer EndCommandBuffer}.

* *
See Also
* *

No cross-references are available, {@link VkPresentInfoKHR}

*/ public static final int VK_SUCCESS = 0, VK_NOT_READY = 1, VK_TIMEOUT = 2, VK_EVENT_SET = 3, VK_EVENT_RESET = 4, VK_INCOMPLETE = 5, VK_ERROR_OUT_OF_HOST_MEMORY = -1, VK_ERROR_OUT_OF_DEVICE_MEMORY = -2, VK_ERROR_INITIALIZATION_FAILED = -3, VK_ERROR_DEVICE_LOST = -4, VK_ERROR_MEMORY_MAP_FAILED = -5, VK_ERROR_LAYER_NOT_PRESENT = -6, VK_ERROR_EXTENSION_NOT_PRESENT = -7, VK_ERROR_FEATURE_NOT_PRESENT = -8, VK_ERROR_INCOMPATIBLE_DRIVER = -9, VK_ERROR_TOO_MANY_OBJECTS = -10, VK_ERROR_FORMAT_NOT_SUPPORTED = -11, VK_ERROR_FRAGMENTED_POOL = -12; /** * VkStructureType - Vulkan structure types ({@code stype}) * *
Description
* *

Each value corresponds to a particular structure with a {@code sType} member with a matching name. As a general rule, the name of each {@code VkStructureType} value is obtained by taking the name of the structure, stripping the leading {@code Vk}, prefixing each capital letter with {@code _}, converting the entire resulting string to upper case, and prefixing it with {@code VK_STRUCTURE_TYPE_}. For example, structures of type {@link VkImageCreateInfo} correspond to a {@code VkStructureType} of {@link #VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO STRUCTURE_TYPE_IMAGE_CREATE_INFO}, and thus its {@code sType} member must equal that when it is passed to the API.

* *

The values {@link #VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO} and {@link #VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO} are reserved for internal use by the loader, and do not have corresponding Vulkan structures in this specification.

* *
See Also
* *

{@link VkAcquireNextImageInfoKHX}, {@link VkAndroidSurfaceCreateInfoKHR}, {@link VkApplicationInfo}, {@link VkBindBufferMemoryDeviceGroupInfoKHX}, {@link VkBindBufferMemoryInfoKHR}, {@link VkBindImageMemoryDeviceGroupInfoKHX}, {@link VkBindImageMemoryInfoKHR}, {@link VkBindImageMemorySwapchainInfoKHX}, {@link VkBindImagePlaneMemoryInfoKHR}, {@link VkBindSparseInfo}, {@link VkBufferCreateInfo}, {@link VkBufferMemoryBarrier}, {@link VkBufferMemoryRequirementsInfo2KHR}, {@link VkBufferViewCreateInfo}, {@link VkCmdProcessCommandsInfoNVX}, {@link VkCmdReserveSpaceForCommandsInfoNVX}, {@link VkCommandBufferAllocateInfo}, {@link VkCommandBufferBeginInfo}, {@link VkCommandBufferInheritanceInfo}, {@link VkCommandPoolCreateInfo}, {@link VkComputePipelineCreateInfo}, {@link VkCopyDescriptorSet}, {@link VkD3D12FenceSubmitInfoKHR}, {@link VkDebugMarkerMarkerInfoEXT}, {@link VkDebugMarkerObjectNameInfoEXT}, {@link VkDebugMarkerObjectTagInfoEXT}, {@link VkDebugReportCallbackCreateInfoEXT}, {@link VkDedicatedAllocationBufferCreateInfoNV}, {@link VkDedicatedAllocationImageCreateInfoNV}, {@link VkDedicatedAllocationMemoryAllocateInfoNV}, {@link VkDescriptorPoolCreateInfo}, {@link VkDescriptorSetAllocateInfo}, {@link VkDescriptorSetLayoutCreateInfo}, {@link VkDescriptorUpdateTemplateCreateInfoKHR}, {@link VkDeviceCreateInfo}, {@link VkDeviceEventInfoEXT}, {@link VkDeviceGeneratedCommandsFeaturesNVX}, {@link VkDeviceGeneratedCommandsLimitsNVX}, {@link VkDeviceGroupBindSparseInfoKHX}, {@link VkDeviceGroupCommandBufferBeginInfoKHX}, {@link VkDeviceGroupDeviceCreateInfoKHX}, {@link VkDeviceGroupPresentCapabilitiesKHX}, {@link VkDeviceGroupPresentInfoKHX}, {@link VkDeviceGroupRenderPassBeginInfoKHX}, {@link VkDeviceGroupSubmitInfoKHX}, {@link VkDeviceGroupSwapchainCreateInfoKHX}, {@link VkDeviceQueueCreateInfo}, {@link VkDeviceQueueGlobalPriorityCreateInfoEXT}, {@link VkDisplayEventInfoEXT}, {@link VkDisplayModeCreateInfoKHR}, {@link VkDisplayPowerInfoEXT}, {@link VkDisplayPresentInfoKHR}, {@link VkDisplaySurfaceCreateInfoKHR}, {@link VkEventCreateInfo}, {@link VkExportFenceCreateInfoKHR}, {@link VkExportFenceWin32HandleInfoKHR}, {@link VkExportMemoryAllocateInfoKHR}, {@link VkExportMemoryAllocateInfoNV}, {@link VkExportMemoryWin32HandleInfoKHR}, {@link VkExportMemoryWin32HandleInfoNV}, {@link VkExportSemaphoreCreateInfoKHR}, {@link VkExportSemaphoreWin32HandleInfoKHR}, {@link VkExternalBufferPropertiesKHR}, {@link VkExternalFencePropertiesKHR}, {@link VkExternalImageFormatPropertiesKHR}, {@link VkExternalMemoryBufferCreateInfoKHR}, {@link VkExternalMemoryImageCreateInfoKHR}, {@link VkExternalMemoryImageCreateInfoNV}, {@link VkExternalSemaphorePropertiesKHR}, {@link VkFenceCreateInfo}, {@link VkFenceGetFdInfoKHR}, {@link VkFenceGetWin32HandleInfoKHR}, {@link VkFormatProperties2KHR}, {@link VkFramebufferCreateInfo}, {@link VkGraphicsPipelineCreateInfo}, {@link VkHdrMetadataEXT}, {@link VkIOSSurfaceCreateInfoMVK}, {@link VkImageCreateInfo}, {@link VkImageFormatListCreateInfoKHR}, {@link VkImageFormatProperties2KHR}, {@link VkImageMemoryBarrier}, {@link VkImageMemoryRequirementsInfo2KHR}, {@link VkImagePlaneMemoryRequirementsInfoKHR}, {@link VkImageSparseMemoryRequirementsInfo2KHR}, {@link VkImageSwapchainCreateInfoKHX}, {@link VkImageViewCreateInfo}, {@link VkImageViewUsageCreateInfoKHR}, {@link VkImportFenceFdInfoKHR}, {@link VkImportFenceWin32HandleInfoKHR}, {@link VkImportMemoryFdInfoKHR}, {@link VkImportMemoryHostPointerInfoEXT}, {@link VkImportMemoryWin32HandleInfoKHR}, {@link VkImportMemoryWin32HandleInfoNV}, {@link VkImportSemaphoreFdInfoKHR}, {@link VkImportSemaphoreWin32HandleInfoKHR}, {@link VkIndirectCommandsLayoutCreateInfoNVX}, {@link VkInstanceCreateInfo}, {@link VkMacOSSurfaceCreateInfoMVK}, {@link VkMappedMemoryRange}, {@link VkMemoryAllocateFlagsInfoKHX}, {@link VkMemoryAllocateInfo}, {@link VkMemoryBarrier}, {@link VkMemoryDedicatedAllocateInfoKHR}, {@link VkMemoryDedicatedRequirementsKHR}, {@link VkMemoryFdPropertiesKHR}, {@link VkMemoryGetFdInfoKHR}, {@link VkMemoryGetWin32HandleInfoKHR}, {@link VkMemoryHostPointerPropertiesEXT}, {@link VkMemoryRequirements2KHR}, {@link VkMemoryWin32HandlePropertiesKHR}, {@link VkMirSurfaceCreateInfoKHR}, {@link VkMultisamplePropertiesEXT}, {@link VkObjectTableCreateInfoNVX}, {@link VkPhysicalDevice16BitStorageFeaturesKHR}, {@link VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT}, {@link VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT}, {@link VkPhysicalDeviceConservativeRasterizationPropertiesEXT}, {@link VkPhysicalDeviceDiscardRectanglePropertiesEXT}, {@link VkPhysicalDeviceExternalBufferInfoKHR}, {@link VkPhysicalDeviceExternalFenceInfoKHR}, {@link VkPhysicalDeviceExternalImageFormatInfoKHR}, {@link VkPhysicalDeviceExternalMemoryHostPropertiesEXT}, {@link VkPhysicalDeviceExternalSemaphoreInfoKHR}, {@link VkPhysicalDeviceFeatures2KHR}, {@link VkPhysicalDeviceGroupPropertiesKHX}, {@link VkPhysicalDeviceIDPropertiesKHR}, {@link VkPhysicalDeviceImageFormatInfo2KHR}, {@link VkPhysicalDeviceMemoryProperties2KHR}, {@link VkPhysicalDeviceMultiviewFeaturesKHX}, {@link VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX}, {@link VkPhysicalDeviceMultiviewPropertiesKHX}, {@link VkPhysicalDevicePointClippingPropertiesKHR}, {@link VkPhysicalDeviceProperties2KHR}, {@link VkPhysicalDevicePushDescriptorPropertiesKHR}, {@link VkPhysicalDeviceSampleLocationsPropertiesEXT}, {@link VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT}, {@link VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR}, {@link VkPhysicalDeviceSparseImageFormatInfo2KHR}, {@link VkPhysicalDeviceSurfaceInfo2KHR}, {@link VkPhysicalDeviceVariablePointerFeaturesKHR}, {@link VkPipelineCacheCreateInfo}, {@link VkPipelineColorBlendAdvancedStateCreateInfoEXT}, {@link VkPipelineColorBlendStateCreateInfo}, {@link VkPipelineCoverageModulationStateCreateInfoNV}, {@link VkPipelineCoverageToColorStateCreateInfoNV}, {@link VkPipelineDepthStencilStateCreateInfo}, {@link VkPipelineDiscardRectangleStateCreateInfoEXT}, {@link VkPipelineDynamicStateCreateInfo}, {@link VkPipelineInputAssemblyStateCreateInfo}, {@link VkPipelineLayoutCreateInfo}, {@link VkPipelineMultisampleStateCreateInfo}, {@link VkPipelineRasterizationConservativeStateCreateInfoEXT}, {@link VkPipelineRasterizationStateCreateInfo}, {@link VkPipelineRasterizationStateRasterizationOrderAMD}, {@link VkPipelineSampleLocationsStateCreateInfoEXT}, {@link VkPipelineShaderStageCreateInfo}, {@link VkPipelineTessellationDomainOriginStateCreateInfoKHR}, {@link VkPipelineTessellationStateCreateInfo}, {@link VkPipelineVertexInputStateCreateInfo}, {@link VkPipelineViewportStateCreateInfo}, {@link VkPipelineViewportSwizzleStateCreateInfoNV}, {@link VkPipelineViewportWScalingStateCreateInfoNV}, {@link VkPresentInfoKHR}, {@link VkPresentRegionsKHR}, {@link VkPresentTimesInfoGOOGLE}, {@link VkQueryPoolCreateInfo}, {@link VkQueueFamilyProperties2KHR}, {@link VkRenderPassBeginInfo}, {@link VkRenderPassCreateInfo}, {@link VkRenderPassInputAttachmentAspectCreateInfoKHR}, {@link VkRenderPassMultiviewCreateInfoKHX}, {@link VkRenderPassSampleLocationsBeginInfoEXT}, {@link VkSampleLocationsInfoEXT}, {@link VkSamplerCreateInfo}, {@link VkSamplerReductionModeCreateInfoEXT}, {@link VkSamplerYcbcrConversionCreateInfoKHR}, {@link VkSamplerYcbcrConversionImageFormatPropertiesKHR}, {@link VkSamplerYcbcrConversionInfoKHR}, {@link VkSemaphoreCreateInfo}, {@link VkSemaphoreGetFdInfoKHR}, {@link VkSemaphoreGetWin32HandleInfoKHR}, {@link VkShaderModuleCreateInfo}, {@link VkShaderModuleValidationCacheCreateInfoEXT}, {@link VkSharedPresentSurfaceCapabilitiesKHR}, {@link VkSparseImageFormatProperties2KHR}, {@link VkSparseImageMemoryRequirements2KHR}, {@link VkSubmitInfo}, {@link VkSurfaceCapabilities2EXT}, {@link VkSurfaceCapabilities2KHR}, {@link VkSurfaceFormat2KHR}, {@link VkSwapchainCounterCreateInfoEXT}, {@link VkSwapchainCreateInfoKHR}, {@link VkTextureLODGatherFormatPropertiesAMD}, {@link VkValidationCacheCreateInfoEXT}, {@link VkValidationFlagsEXT}, {@link VkViSurfaceCreateInfoNN}, {@link VkWaylandSurfaceCreateInfoKHR}, {@link VkWin32KeyedMutexAcquireReleaseInfoKHR}, {@link VkWin32KeyedMutexAcquireReleaseInfoNV}, {@link VkWin32SurfaceCreateInfoKHR}, {@link VkWriteDescriptorSet}, {@link VkXcbSurfaceCreateInfoKHR}, {@link VkXlibSurfaceCreateInfoKHR}

* *
Enum values:
* *
    *
  • {@link #VK_STRUCTURE_TYPE_APPLICATION_INFO STRUCTURE_TYPE_APPLICATION_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO STRUCTURE_TYPE_INSTANCE_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO STRUCTURE_TYPE_DEVICE_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_SUBMIT_INFO STRUCTURE_TYPE_SUBMIT_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE STRUCTURE_TYPE_MAPPED_MEMORY_RANGE}
  • *
  • {@link #VK_STRUCTURE_TYPE_BIND_SPARSE_INFO STRUCTURE_TYPE_BIND_SPARSE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_FENCE_CREATE_INFO STRUCTURE_TYPE_FENCE_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_EVENT_CREATE_INFO STRUCTURE_TYPE_EVENT_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO STRUCTURE_TYPE_BUFFER_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO STRUCTURE_TYPE_SAMPLER_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET}
  • *
  • {@link #VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET STRUCTURE_TYPE_COPY_DESCRIPTOR_SET}
  • *
  • {@link #VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO}
  • *
  • {@link #VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER}
  • *
  • {@link #VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER}
  • *
  • {@link #VK_STRUCTURE_TYPE_MEMORY_BARRIER STRUCTURE_TYPE_MEMORY_BARRIER}
  • *
*/ public static final int VK_STRUCTURE_TYPE_APPLICATION_INFO = 0, VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO = 1, VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO = 2, VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 3, VK_STRUCTURE_TYPE_SUBMIT_INFO = 4, VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO = 5, VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE = 6, VK_STRUCTURE_TYPE_BIND_SPARSE_INFO = 7, VK_STRUCTURE_TYPE_FENCE_CREATE_INFO = 8, VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO = 9, VK_STRUCTURE_TYPE_EVENT_CREATE_INFO = 10, VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO = 11, VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO = 12, VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO = 13, VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 14, VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 15, VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO = 16, VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO = 17, VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO = 18, VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO = 19, VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO = 20, VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO = 21, VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO = 22, VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO = 23, VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO = 24, VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO = 25, VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO = 26, VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO = 27, VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO = 28, VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 29, VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO = 30, VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 31, VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO = 32, VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO = 33, VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO = 34, VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET = 35, VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET = 36, VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO = 37, VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO = 38, VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO = 39, VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO = 40, VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO = 41, VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO = 42, VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO = 43, VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER = 44, VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER = 45, VK_STRUCTURE_TYPE_MEMORY_BARRIER = 46, VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO = 47, VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO = 48; /** * VkSystemAllocationScope - Allocation scope * *
Description
* *
    *
  • {@link #VK_SYSTEM_ALLOCATION_SCOPE_COMMAND SYSTEM_ALLOCATION_SCOPE_COMMAND} specifies that the allocation is scoped to the duration of the Vulkan command.
  • *
  • {@link #VK_SYSTEM_ALLOCATION_SCOPE_OBJECT SYSTEM_ALLOCATION_SCOPE_OBJECT} specifies that the allocation is scoped to the lifetime of the Vulkan object that is being created or used.
  • *
  • {@link #VK_SYSTEM_ALLOCATION_SCOPE_CACHE SYSTEM_ALLOCATION_SCOPE_CACHE} specifies that the allocation is scoped to the lifetime of a {@code VkPipelineCache} or {@code VkValidationCacheEXT} object.
  • *
  • {@link #VK_SYSTEM_ALLOCATION_SCOPE_DEVICE SYSTEM_ALLOCATION_SCOPE_DEVICE} specifies that the allocation is scoped to the lifetime of the Vulkan device.
  • *
  • {@link #VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE SYSTEM_ALLOCATION_SCOPE_INSTANCE} specifies that the allocation is scoped to the lifetime of the Vulkan instance.
  • *
* *

Most Vulkan commands operate on a single object, or there is a sole object that is being created or manipulated. When an allocation uses an allocation scope of {@link #VK_SYSTEM_ALLOCATION_SCOPE_OBJECT SYSTEM_ALLOCATION_SCOPE_OBJECT} or {@link #VK_SYSTEM_ALLOCATION_SCOPE_CACHE SYSTEM_ALLOCATION_SCOPE_CACHE}, the allocation is scoped to the object being created or manipulated.

* *

When an implementation requires host memory, it will make callbacks to the application using the most specific allocator and allocation scope available:

* *
    *
  • If an allocation is scoped to the duration of a command, the allocator will use the {@link #VK_SYSTEM_ALLOCATION_SCOPE_COMMAND SYSTEM_ALLOCATION_SCOPE_COMMAND} allocation scope. The most specific allocator available is used: if the object being created or manipulated has an allocator, that object’s allocator will be used, else if the parent {@code VkDevice} has an allocator it will be used, else if the parent {@code VkInstance} has an allocator it will be used. Else,
  • *
  • If an allocation is associated with an object of type {@code VkValidationCacheEXT} or {@code VkPipelineCache}, the allocator will use the {@link #VK_SYSTEM_ALLOCATION_SCOPE_CACHE SYSTEM_ALLOCATION_SCOPE_CACHE} allocation scope. The most specific allocator available is used (cache, else device, else instance). Else,
  • *
  • If an allocation is scoped to the lifetime of an object, that object is being created or manipulated by the command, and that object’s type is not {@code VkDevice} or {@code VkInstance}, the allocator will use an allocation scope of {@link #VK_SYSTEM_ALLOCATION_SCOPE_OBJECT SYSTEM_ALLOCATION_SCOPE_OBJECT}. The most specific allocator available is used (object, else device, else instance). Else,
  • *
  • If an allocation is scoped to the lifetime of a device, the allocator will use an allocation scope of {@link #VK_SYSTEM_ALLOCATION_SCOPE_DEVICE SYSTEM_ALLOCATION_SCOPE_DEVICE}. The most specific allocator available is used (device, else instance). Else,
  • *
  • If the allocation is scoped to the lifetime of an instance and the instance has an allocator, its allocator will be used with an allocation scope of {@link #VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE SYSTEM_ALLOCATION_SCOPE_INSTANCE}.
  • *
  • Otherwise an implementation will allocate memory through an alternative mechanism that is unspecified.
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

*/ public static final int VK_SYSTEM_ALLOCATION_SCOPE_COMMAND = 0, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT = 1, VK_SYSTEM_ALLOCATION_SCOPE_CACHE = 2, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE = 3, VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE = 4; /** * VkInternalAllocationType - Allocation type * *
Description
* *
    *
  • {@link #VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE INTERNAL_ALLOCATION_TYPE_EXECUTABLE} specifies that the allocation is intended for execution by the host.
  • *
* *
See Also
* *

{@link VkInternalAllocationNotification}, {@link VkInternalFreeNotification}

*/ public static final int VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE = 0; /** * VkFormat - Available image formats * *
Description
* *
    *
  • {@link #VK_FORMAT_UNDEFINED FORMAT_UNDEFINED} indicates that the format is not specified.
  • *
  • {@link #VK_FORMAT_R4G4_UNORM_PACK8 FORMAT_R4G4_UNORM_PACK8} specifies a two-component, 8-bit packed unsigned normalized format that has a 4-bit R component in bits 4..7, and a 4-bit G component in bits 0..3.
  • *
  • {@link #VK_FORMAT_R4G4B4A4_UNORM_PACK16 FORMAT_R4G4B4A4_UNORM_PACK16} specifies a four-component, 16-bit packed unsigned normalized format that has a 4-bit R component in bits 12..15, a 4-bit G component in bits 8..11, a 4-bit B component in bits 4..7, and a 4-bit A component in bits 0..3.
  • *
  • {@link #VK_FORMAT_B4G4R4A4_UNORM_PACK16 FORMAT_B4G4R4A4_UNORM_PACK16} specifies a four-component, 16-bit packed unsigned normalized format that has a 4-bit B component in bits 12..15, a 4-bit G component in bits 8..11, a 4-bit R component in bits 4..7, and a 4-bit A component in bits 0..3.
  • *
  • {@link #VK_FORMAT_R5G6B5_UNORM_PACK16 FORMAT_R5G6B5_UNORM_PACK16} specifies a three-component, 16-bit packed unsigned normalized format that has a 5-bit R component in bits 11..15, a 6-bit G component in bits 5..10, and a 5-bit B component in bits 0..4.
  • *
  • {@link #VK_FORMAT_B5G6R5_UNORM_PACK16 FORMAT_B5G6R5_UNORM_PACK16} specifies a three-component, 16-bit packed unsigned normalized format that has a 5-bit B component in bits 11..15, a 6-bit G component in bits 5..10, and a 5-bit R component in bits 0..4.
  • *
  • {@link #VK_FORMAT_R5G5B5A1_UNORM_PACK16 FORMAT_R5G5B5A1_UNORM_PACK16} specifies a four-component, 16-bit packed unsigned normalized format that has a 5-bit R component in bits 11..15, a 5-bit G component in bits 6..10, a 5-bit B component in bits 1..5, and a 1-bit A component in bit 0.
  • *
  • {@link #VK_FORMAT_B5G5R5A1_UNORM_PACK16 FORMAT_B5G5R5A1_UNORM_PACK16} specifies a four-component, 16-bit packed unsigned normalized format that has a 5-bit B component in bits 11..15, a 5-bit G component in bits 6..10, a 5-bit R component in bits 1..5, and a 1-bit A component in bit 0.
  • *
  • {@link #VK_FORMAT_A1R5G5B5_UNORM_PACK16 FORMAT_A1R5G5B5_UNORM_PACK16} specifies a four-component, 16-bit packed unsigned normalized format that has a 1-bit A component in bit 15, a 5-bit R component in bits 10..14, a 5-bit G component in bits 5..9, and a 5-bit B component in bits 0..4.
  • *
  • {@link #VK_FORMAT_R8_UNORM FORMAT_R8_UNORM} specifies a one-component, 8-bit unsigned normalized format that has a single 8-bit R component.
  • *
  • {@link #VK_FORMAT_R8_SNORM FORMAT_R8_SNORM} specifies a one-component, 8-bit signed normalized format that has a single 8-bit R component.
  • *
  • {@link #VK_FORMAT_R8_USCALED FORMAT_R8_USCALED} specifies a one-component, 8-bit unsigned scaled integer format that has a single 8-bit R component.
  • *
  • {@link #VK_FORMAT_R8_SSCALED FORMAT_R8_SSCALED} specifies a one-component, 8-bit signed scaled integer format that has a single 8-bit R component.
  • *
  • {@link #VK_FORMAT_R8_UINT FORMAT_R8_UINT} specifies a one-component, 8-bit unsigned integer format that has a single 8-bit R component.
  • *
  • {@link #VK_FORMAT_R8_SINT FORMAT_R8_SINT} specifies a one-component, 8-bit signed integer format that has a single 8-bit R component.
  • *
  • {@link #VK_FORMAT_R8_SRGB FORMAT_R8_SRGB} specifies a one-component, 8-bit unsigned normalized format that has a single 8-bit R component stored with sRGB nonlinear encoding.
  • *
  • {@link #VK_FORMAT_R8G8_UNORM FORMAT_R8G8_UNORM} specifies a two-component, 16-bit unsigned normalized format that has an 8-bit R component in byte 0, and an 8-bit G component in byte 1.
  • *
  • {@link #VK_FORMAT_R8G8_SNORM FORMAT_R8G8_SNORM} specifies a two-component, 16-bit signed normalized format that has an 8-bit R component in byte 0, and an 8-bit G component in byte 1.
  • *
  • {@link #VK_FORMAT_R8G8_USCALED FORMAT_R8G8_USCALED} specifies a two-component, 16-bit unsigned scaled integer format that has an 8-bit R component in byte 0, and an 8-bit G component in byte 1.
  • *
  • {@link #VK_FORMAT_R8G8_SSCALED FORMAT_R8G8_SSCALED} specifies a two-component, 16-bit signed scaled integer format that has an 8-bit R component in byte 0, and an 8-bit G component in byte 1.
  • *
  • {@link #VK_FORMAT_R8G8_UINT FORMAT_R8G8_UINT} specifies a two-component, 16-bit unsigned integer format that has an 8-bit R component in byte 0, and an 8-bit G component in byte 1.
  • *
  • {@link #VK_FORMAT_R8G8_SINT FORMAT_R8G8_SINT} specifies a two-component, 16-bit signed integer format that has an 8-bit R component in byte 0, and an 8-bit G component in byte 1.
  • *
  • {@link #VK_FORMAT_R8G8_SRGB FORMAT_R8G8_SRGB} specifies a two-component, 16-bit unsigned normalized format that has an 8-bit R component stored with sRGB nonlinear encoding in byte 0, and an 8-bit G component stored with sRGB nonlinear encoding in byte 1.
  • *
  • {@link #VK_FORMAT_R8G8B8_UNORM FORMAT_R8G8B8_UNORM} specifies a three-component, 24-bit unsigned normalized format that has an 8-bit R component in byte 0, an 8-bit G component in byte 1, and an 8-bit B component in byte 2.
  • *
  • {@link #VK_FORMAT_R8G8B8_SNORM FORMAT_R8G8B8_SNORM} specifies a three-component, 24-bit signed normalized format that has an 8-bit R component in byte 0, an 8-bit G component in byte 1, and an 8-bit B component in byte 2.
  • *
  • {@link #VK_FORMAT_R8G8B8_USCALED FORMAT_R8G8B8_USCALED} specifies a three-component, 24-bit unsigned scaled format that has an 8-bit R component in byte 0, an 8-bit G component in byte 1, and an 8-bit B component in byte 2.
  • *
  • {@link #VK_FORMAT_R8G8B8_SSCALED FORMAT_R8G8B8_SSCALED} specifies a three-component, 24-bit signed scaled format that has an 8-bit R component in byte 0, an 8-bit G component in byte 1, and an 8-bit B component in byte 2.
  • *
  • {@link #VK_FORMAT_R8G8B8_UINT FORMAT_R8G8B8_UINT} specifies a three-component, 24-bit unsigned integer format that has an 8-bit R component in byte 0, an 8-bit G component in byte 1, and an 8-bit B component in byte 2.
  • *
  • {@link #VK_FORMAT_R8G8B8_SINT FORMAT_R8G8B8_SINT} specifies a three-component, 24-bit signed integer format that has an 8-bit R component in byte 0, an 8-bit G component in byte 1, and an 8-bit B component in byte 2.
  • *
  • {@link #VK_FORMAT_R8G8B8_SRGB FORMAT_R8G8B8_SRGB} specifies a three-component, 24-bit unsigned normalized format that has an 8-bit R component stored with sRGB nonlinear encoding in byte 0, an 8-bit G component stored with sRGB nonlinear encoding in byte 1, and an 8-bit B component stored with sRGB nonlinear encoding in byte 2.
  • *
  • {@link #VK_FORMAT_B8G8R8_UNORM FORMAT_B8G8R8_UNORM} specifies a three-component, 24-bit unsigned normalized format that has an 8-bit B component in byte 0, an 8-bit G component in byte 1, and an 8-bit R component in byte 2.
  • *
  • {@link #VK_FORMAT_B8G8R8_SNORM FORMAT_B8G8R8_SNORM} specifies a three-component, 24-bit signed normalized format that has an 8-bit B component in byte 0, an 8-bit G component in byte 1, and an 8-bit R component in byte 2.
  • *
  • {@link #VK_FORMAT_B8G8R8_USCALED FORMAT_B8G8R8_USCALED} specifies a three-component, 24-bit unsigned scaled format that has an 8-bit B component in byte 0, an 8-bit G component in byte 1, and an 8-bit R component in byte 2.
  • *
  • {@link #VK_FORMAT_B8G8R8_SSCALED FORMAT_B8G8R8_SSCALED} specifies a three-component, 24-bit signed scaled format that has an 8-bit B component in byte 0, an 8-bit G component in byte 1, and an 8-bit R component in byte 2.
  • *
  • {@link #VK_FORMAT_B8G8R8_UINT FORMAT_B8G8R8_UINT} specifies a three-component, 24-bit unsigned integer format that has an 8-bit B component in byte 0, an 8-bit G component in byte 1, and an 8-bit R component in byte 2.
  • *
  • {@link #VK_FORMAT_B8G8R8_SINT FORMAT_B8G8R8_SINT} specifies a three-component, 24-bit signed integer format that has an 8-bit B component in byte 0, an 8-bit G component in byte 1, and an 8-bit R component in byte 2.
  • *
  • {@link #VK_FORMAT_B8G8R8_SRGB FORMAT_B8G8R8_SRGB} specifies a three-component, 24-bit unsigned normalized format that has an 8-bit B component stored with sRGB nonlinear encoding in byte 0, an 8-bit G component stored with sRGB nonlinear encoding in byte 1, and an 8-bit R component stored with sRGB nonlinear encoding in byte 2.
  • *
  • {@link #VK_FORMAT_R8G8B8A8_UNORM FORMAT_R8G8B8A8_UNORM} specifies a four-component, 32-bit unsigned normalized format that has an 8-bit R component in byte 0, an 8-bit G component in byte 1, an 8-bit B component in byte 2, and an 8-bit A component in byte 3.
  • *
  • {@link #VK_FORMAT_R8G8B8A8_SNORM FORMAT_R8G8B8A8_SNORM} specifies a four-component, 32-bit signed normalized format that has an 8-bit R component in byte 0, an 8-bit G component in byte 1, an 8-bit B component in byte 2, and an 8-bit A component in byte 3.
  • *
  • {@link #VK_FORMAT_R8G8B8A8_USCALED FORMAT_R8G8B8A8_USCALED} specifies a four-component, 32-bit unsigned scaled format that has an 8-bit R component in byte 0, an 8-bit G component in byte 1, an 8-bit B component in byte 2, and an 8-bit A component in byte 3.
  • *
  • {@link #VK_FORMAT_R8G8B8A8_SSCALED FORMAT_R8G8B8A8_SSCALED} specifies a four-component, 32-bit signed scaled format that has an 8-bit R component in byte 0, an 8-bit G component in byte 1, an 8-bit B component in byte 2, and an 8-bit A component in byte 3.
  • *
  • {@link #VK_FORMAT_R8G8B8A8_UINT FORMAT_R8G8B8A8_UINT} specifies a four-component, 32-bit unsigned integer format that has an 8-bit R component in byte 0, an 8-bit G component in byte 1, an 8-bit B component in byte 2, and an 8-bit A component in byte 3.
  • *
  • {@link #VK_FORMAT_R8G8B8A8_SINT FORMAT_R8G8B8A8_SINT} specifies a four-component, 32-bit signed integer format that has an 8-bit R component in byte 0, an 8-bit G component in byte 1, an 8-bit B component in byte 2, and an 8-bit A component in byte 3.
  • *
  • {@link #VK_FORMAT_R8G8B8A8_SRGB FORMAT_R8G8B8A8_SRGB} specifies a four-component, 32-bit unsigned normalized format that has an 8-bit R component stored with sRGB nonlinear encoding in byte 0, an 8-bit G component stored with sRGB nonlinear encoding in byte 1, an 8-bit B component stored with sRGB nonlinear encoding in byte 2, and an 8-bit A component in byte 3.
  • *
  • {@link #VK_FORMAT_B8G8R8A8_UNORM FORMAT_B8G8R8A8_UNORM} specifies a four-component, 32-bit unsigned normalized format that has an 8-bit B component in byte 0, an 8-bit G component in byte 1, an 8-bit R component in byte 2, and an 8-bit A component in byte 3.
  • *
  • {@link #VK_FORMAT_B8G8R8A8_SNORM FORMAT_B8G8R8A8_SNORM} specifies a four-component, 32-bit signed normalized format that has an 8-bit B component in byte 0, an 8-bit G component in byte 1, an 8-bit R component in byte 2, and an 8-bit A component in byte 3.
  • *
  • {@link #VK_FORMAT_B8G8R8A8_USCALED FORMAT_B8G8R8A8_USCALED} specifies a four-component, 32-bit unsigned scaled format that has an 8-bit B component in byte 0, an 8-bit G component in byte 1, an 8-bit R component in byte 2, and an 8-bit A component in byte 3.
  • *
  • {@link #VK_FORMAT_B8G8R8A8_SSCALED FORMAT_B8G8R8A8_SSCALED} specifies a four-component, 32-bit signed scaled format that has an 8-bit B component in byte 0, an 8-bit G component in byte 1, an 8-bit R component in byte 2, and an 8-bit A component in byte 3.
  • *
  • {@link #VK_FORMAT_B8G8R8A8_UINT FORMAT_B8G8R8A8_UINT} specifies a four-component, 32-bit unsigned integer format that has an 8-bit B component in byte 0, an 8-bit G component in byte 1, an 8-bit R component in byte 2, and an 8-bit A component in byte 3.
  • *
  • {@link #VK_FORMAT_B8G8R8A8_SINT FORMAT_B8G8R8A8_SINT} specifies a four-component, 32-bit signed integer format that has an 8-bit B component in byte 0, an 8-bit G component in byte 1, an 8-bit R component in byte 2, and an 8-bit A component in byte 3.
  • *
  • {@link #VK_FORMAT_B8G8R8A8_SRGB FORMAT_B8G8R8A8_SRGB} specifies a four-component, 32-bit unsigned normalized format that has an 8-bit B component stored with sRGB nonlinear encoding in byte 0, an 8-bit G component stored with sRGB nonlinear encoding in byte 1, an 8-bit R component stored with sRGB nonlinear encoding in byte 2, and an 8-bit A component in byte 3.
  • *
  • {@link #VK_FORMAT_A8B8G8R8_UNORM_PACK32 FORMAT_A8B8G8R8_UNORM_PACK32} specifies a four-component, 32-bit packed unsigned normalized format that has an 8-bit A component in bits 24..31, an 8-bit B component in bits 16..23, an 8-bit G component in bits 8..15, and an 8-bit R component in bits 0..7.
  • *
  • {@link #VK_FORMAT_A8B8G8R8_SNORM_PACK32 FORMAT_A8B8G8R8_SNORM_PACK32} specifies a four-component, 32-bit packed signed normalized format that has an 8-bit A component in bits 24..31, an 8-bit B component in bits 16..23, an 8-bit G component in bits 8..15, and an 8-bit R component in bits 0..7.
  • *
  • {@link #VK_FORMAT_A8B8G8R8_USCALED_PACK32 FORMAT_A8B8G8R8_USCALED_PACK32} specifies a four-component, 32-bit packed unsigned scaled integer format that has an 8-bit A component in bits 24..31, an 8-bit B component in bits 16..23, an 8-bit G component in bits 8..15, and an 8-bit R component in bits 0..7.
  • *
  • {@link #VK_FORMAT_A8B8G8R8_SSCALED_PACK32 FORMAT_A8B8G8R8_SSCALED_PACK32} specifies a four-component, 32-bit packed signed scaled integer format that has an 8-bit A component in bits 24..31, an 8-bit B component in bits 16..23, an 8-bit G component in bits 8..15, and an 8-bit R component in bits 0..7.
  • *
  • {@link #VK_FORMAT_A8B8G8R8_UINT_PACK32 FORMAT_A8B8G8R8_UINT_PACK32} specifies a four-component, 32-bit packed unsigned integer format that has an 8-bit A component in bits 24..31, an 8-bit B component in bits 16..23, an 8-bit G component in bits 8..15, and an 8-bit R component in bits 0..7.
  • *
  • {@link #VK_FORMAT_A8B8G8R8_SINT_PACK32 FORMAT_A8B8G8R8_SINT_PACK32} specifies a four-component, 32-bit packed signed integer format that has an 8-bit A component in bits 24..31, an 8-bit B component in bits 16..23, an 8-bit G component in bits 8..15, and an 8-bit R component in bits 0..7.
  • *
  • {@link #VK_FORMAT_A8B8G8R8_SRGB_PACK32 FORMAT_A8B8G8R8_SRGB_PACK32} specifies a four-component, 32-bit packed unsigned normalized format that has an 8-bit A component in bits 24..31, an 8-bit B component stored with sRGB nonlinear encoding in bits 16..23, an 8-bit G component stored with sRGB nonlinear encoding in bits 8..15, and an 8-bit R component stored with sRGB nonlinear encoding in bits 0..7.
  • *
  • {@link #VK_FORMAT_A2R10G10B10_UNORM_PACK32 FORMAT_A2R10G10B10_UNORM_PACK32} specifies a four-component, 32-bit packed unsigned normalized format that has a 2-bit A component in bits 30..31, a 10-bit R component in bits 20..29, a 10-bit G component in bits 10..19, and a 10-bit B component in bits 0..9.
  • *
  • {@link #VK_FORMAT_A2R10G10B10_SNORM_PACK32 FORMAT_A2R10G10B10_SNORM_PACK32} specifies a four-component, 32-bit packed signed normalized format that has a 2-bit A component in bits 30..31, a 10-bit R component in bits 20..29, a 10-bit G component in bits 10..19, and a 10-bit B component in bits 0..9.
  • *
  • {@link #VK_FORMAT_A2R10G10B10_USCALED_PACK32 FORMAT_A2R10G10B10_USCALED_PACK32} specifies a four-component, 32-bit packed unsigned scaled integer format that has a 2-bit A component in bits 30..31, a 10-bit R component in bits 20..29, a 10-bit G component in bits 10..19, and a 10-bit B component in bits 0..9.
  • *
  • {@link #VK_FORMAT_A2R10G10B10_SSCALED_PACK32 FORMAT_A2R10G10B10_SSCALED_PACK32} specifies a four-component, 32-bit packed signed scaled integer format that has a 2-bit A component in bits 30..31, a 10-bit R component in bits 20..29, a 10-bit G component in bits 10..19, and a 10-bit B component in bits 0..9.
  • *
  • {@link #VK_FORMAT_A2R10G10B10_UINT_PACK32 FORMAT_A2R10G10B10_UINT_PACK32} specifies a four-component, 32-bit packed unsigned integer format that has a 2-bit A component in bits 30..31, a 10-bit R component in bits 20..29, a 10-bit G component in bits 10..19, and a 10-bit B component in bits 0..9.
  • *
  • {@link #VK_FORMAT_A2R10G10B10_SINT_PACK32 FORMAT_A2R10G10B10_SINT_PACK32} specifies a four-component, 32-bit packed signed integer format that has a 2-bit A component in bits 30..31, a 10-bit R component in bits 20..29, a 10-bit G component in bits 10..19, and a 10-bit B component in bits 0..9.
  • *
  • {@link #VK_FORMAT_A2B10G10R10_UNORM_PACK32 FORMAT_A2B10G10R10_UNORM_PACK32} specifies a four-component, 32-bit packed unsigned normalized format that has a 2-bit A component in bits 30..31, a 10-bit B component in bits 20..29, a 10-bit G component in bits 10..19, and a 10-bit R component in bits 0..9.
  • *
  • {@link #VK_FORMAT_A2B10G10R10_SNORM_PACK32 FORMAT_A2B10G10R10_SNORM_PACK32} specifies a four-component, 32-bit packed signed normalized format that has a 2-bit A component in bits 30..31, a 10-bit B component in bits 20..29, a 10-bit G component in bits 10..19, and a 10-bit R component in bits 0..9.
  • *
  • {@link #VK_FORMAT_A2B10G10R10_USCALED_PACK32 FORMAT_A2B10G10R10_USCALED_PACK32} specifies a four-component, 32-bit packed unsigned scaled integer format that has a 2-bit A component in bits 30..31, a 10-bit B component in bits 20..29, a 10-bit G component in bits 10..19, and a 10-bit R component in bits 0..9.
  • *
  • {@link #VK_FORMAT_A2B10G10R10_SSCALED_PACK32 FORMAT_A2B10G10R10_SSCALED_PACK32} specifies a four-component, 32-bit packed signed scaled integer format that has a 2-bit A component in bits 30..31, a 10-bit B component in bits 20..29, a 10-bit G component in bits 10..19, and a 10-bit R component in bits 0..9.
  • *
  • {@link #VK_FORMAT_A2B10G10R10_UINT_PACK32 FORMAT_A2B10G10R10_UINT_PACK32} specifies a four-component, 32-bit packed unsigned integer format that has a 2-bit A component in bits 30..31, a 10-bit B component in bits 20..29, a 10-bit G component in bits 10..19, and a 10-bit R component in bits 0..9.
  • *
  • {@link #VK_FORMAT_A2B10G10R10_SINT_PACK32 FORMAT_A2B10G10R10_SINT_PACK32} specifies a four-component, 32-bit packed signed integer format that has a 2-bit A component in bits 30..31, a 10-bit B component in bits 20..29, a 10-bit G component in bits 10..19, and a 10-bit R component in bits 0..9.
  • *
  • {@link #VK_FORMAT_R16_UNORM FORMAT_R16_UNORM} specifies a one-component, 16-bit unsigned normalized format that has a single 16-bit R component.
  • *
  • {@link #VK_FORMAT_R16_SNORM FORMAT_R16_SNORM} specifies a one-component, 16-bit signed normalized format that has a single 16-bit R component.
  • *
  • {@link #VK_FORMAT_R16_USCALED FORMAT_R16_USCALED} specifies a one-component, 16-bit unsigned scaled integer format that has a single 16-bit R component.
  • *
  • {@link #VK_FORMAT_R16_SSCALED FORMAT_R16_SSCALED} specifies a one-component, 16-bit signed scaled integer format that has a single 16-bit R component.
  • *
  • {@link #VK_FORMAT_R16_UINT FORMAT_R16_UINT} specifies a one-component, 16-bit unsigned integer format that has a single 16-bit R component.
  • *
  • {@link #VK_FORMAT_R16_SINT FORMAT_R16_SINT} specifies a one-component, 16-bit signed integer format that has a single 16-bit R component.
  • *
  • {@link #VK_FORMAT_R16_SFLOAT FORMAT_R16_SFLOAT} specifies a one-component, 16-bit signed floating-point format that has a single 16-bit R component.
  • *
  • {@link #VK_FORMAT_R16G16_UNORM FORMAT_R16G16_UNORM} specifies a two-component, 32-bit unsigned normalized format that has a 16-bit R component in bytes 0..1, and a 16-bit G component in bytes 2..3.
  • *
  • {@link #VK_FORMAT_R16G16_SNORM FORMAT_R16G16_SNORM} specifies a two-component, 32-bit signed normalized format that has a 16-bit R component in bytes 0..1, and a 16-bit G component in bytes 2..3.
  • *
  • {@link #VK_FORMAT_R16G16_USCALED FORMAT_R16G16_USCALED} specifies a two-component, 32-bit unsigned scaled integer format that has a 16-bit R component in bytes 0..1, and a 16-bit G component in bytes 2..3.
  • *
  • {@link #VK_FORMAT_R16G16_SSCALED FORMAT_R16G16_SSCALED} specifies a two-component, 32-bit signed scaled integer format that has a 16-bit R component in bytes 0..1, and a 16-bit G component in bytes 2..3.
  • *
  • {@link #VK_FORMAT_R16G16_UINT FORMAT_R16G16_UINT} specifies a two-component, 32-bit unsigned integer format that has a 16-bit R component in bytes 0..1, and a 16-bit G component in bytes 2..3.
  • *
  • {@link #VK_FORMAT_R16G16_SINT FORMAT_R16G16_SINT} specifies a two-component, 32-bit signed integer format that has a 16-bit R component in bytes 0..1, and a 16-bit G component in bytes 2..3.
  • *
  • {@link #VK_FORMAT_R16G16_SFLOAT FORMAT_R16G16_SFLOAT} specifies a two-component, 32-bit signed floating-point format that has a 16-bit R component in bytes 0..1, and a 16-bit G component in bytes 2..3.
  • *
  • {@link #VK_FORMAT_R16G16B16_UNORM FORMAT_R16G16B16_UNORM} specifies a three-component, 48-bit unsigned normalized format that has a 16-bit R component in bytes 0..1, a 16-bit G component in bytes 2..3, and a 16-bit B component in bytes 4..5.
  • *
  • {@link #VK_FORMAT_R16G16B16_SNORM FORMAT_R16G16B16_SNORM} specifies a three-component, 48-bit signed normalized format that has a 16-bit R component in bytes 0..1, a 16-bit G component in bytes 2..3, and a 16-bit B component in bytes 4..5.
  • *
  • {@link #VK_FORMAT_R16G16B16_USCALED FORMAT_R16G16B16_USCALED} specifies a three-component, 48-bit unsigned scaled integer format that has a 16-bit R component in bytes 0..1, a 16-bit G component in bytes 2..3, and a 16-bit B component in bytes 4..5.
  • *
  • {@link #VK_FORMAT_R16G16B16_SSCALED FORMAT_R16G16B16_SSCALED} specifies a three-component, 48-bit signed scaled integer format that has a 16-bit R component in bytes 0..1, a 16-bit G component in bytes 2..3, and a 16-bit B component in bytes 4..5.
  • *
  • {@link #VK_FORMAT_R16G16B16_UINT FORMAT_R16G16B16_UINT} specifies a three-component, 48-bit unsigned integer format that has a 16-bit R component in bytes 0..1, a 16-bit G component in bytes 2..3, and a 16-bit B component in bytes 4..5.
  • *
  • {@link #VK_FORMAT_R16G16B16_SINT FORMAT_R16G16B16_SINT} specifies a three-component, 48-bit signed integer format that has a 16-bit R component in bytes 0..1, a 16-bit G component in bytes 2..3, and a 16-bit B component in bytes 4..5.
  • *
  • {@link #VK_FORMAT_R16G16B16_SFLOAT FORMAT_R16G16B16_SFLOAT} specifies a three-component, 48-bit signed floating-point format that has a 16-bit R component in bytes 0..1, a 16-bit G component in bytes 2..3, and a 16-bit B component in bytes 4..5.
  • *
  • {@link #VK_FORMAT_R16G16B16A16_UNORM FORMAT_R16G16B16A16_UNORM} specifies a four-component, 64-bit unsigned normalized format that has a 16-bit R component in bytes 0..1, a 16-bit G component in bytes 2..3, a 16-bit B component in bytes 4..5, and a 16-bit A component in bytes 6..7.
  • *
  • {@link #VK_FORMAT_R16G16B16A16_SNORM FORMAT_R16G16B16A16_SNORM} specifies a four-component, 64-bit signed normalized format that has a 16-bit R component in bytes 0..1, a 16-bit G component in bytes 2..3, a 16-bit B component in bytes 4..5, and a 16-bit A component in bytes 6..7.
  • *
  • {@link #VK_FORMAT_R16G16B16A16_USCALED FORMAT_R16G16B16A16_USCALED} specifies a four-component, 64-bit unsigned scaled integer format that has a 16-bit R component in bytes 0..1, a 16-bit G component in bytes 2..3, a 16-bit B component in bytes 4..5, and a 16-bit A component in bytes 6..7.
  • *
  • {@link #VK_FORMAT_R16G16B16A16_SSCALED FORMAT_R16G16B16A16_SSCALED} specifies a four-component, 64-bit signed scaled integer format that has a 16-bit R component in bytes 0..1, a 16-bit G component in bytes 2..3, a 16-bit B component in bytes 4..5, and a 16-bit A component in bytes 6..7.
  • *
  • {@link #VK_FORMAT_R16G16B16A16_UINT FORMAT_R16G16B16A16_UINT} specifies a four-component, 64-bit unsigned integer format that has a 16-bit R component in bytes 0..1, a 16-bit G component in bytes 2..3, a 16-bit B component in bytes 4..5, and a 16-bit A component in bytes 6..7.
  • *
  • {@link #VK_FORMAT_R16G16B16A16_SINT FORMAT_R16G16B16A16_SINT} specifies a four-component, 64-bit signed integer format that has a 16-bit R component in bytes 0..1, a 16-bit G component in bytes 2..3, a 16-bit B component in bytes 4..5, and a 16-bit A component in bytes 6..7.
  • *
  • {@link #VK_FORMAT_R16G16B16A16_SFLOAT FORMAT_R16G16B16A16_SFLOAT} specifies a four-component, 64-bit signed floating-point format that has a 16-bit R component in bytes 0..1, a 16-bit G component in bytes 2..3, a 16-bit B component in bytes 4..5, and a 16-bit A component in bytes 6..7.
  • *
  • {@link #VK_FORMAT_R32_UINT FORMAT_R32_UINT} specifies a one-component, 32-bit unsigned integer format that has a single 32-bit R component.
  • *
  • {@link #VK_FORMAT_R32_SINT FORMAT_R32_SINT} specifies a one-component, 32-bit signed integer format that has a single 32-bit R component.
  • *
  • {@link #VK_FORMAT_R32_SFLOAT FORMAT_R32_SFLOAT} specifies a one-component, 32-bit signed floating-point format that has a single 32-bit R component.
  • *
  • {@link #VK_FORMAT_R32G32_UINT FORMAT_R32G32_UINT} specifies a two-component, 64-bit unsigned integer format that has a 32-bit R component in bytes 0..3, and a 32-bit G component in bytes 4..7.
  • *
  • {@link #VK_FORMAT_R32G32_SINT FORMAT_R32G32_SINT} specifies a two-component, 64-bit signed integer format that has a 32-bit R component in bytes 0..3, and a 32-bit G component in bytes 4..7.
  • *
  • {@link #VK_FORMAT_R32G32_SFLOAT FORMAT_R32G32_SFLOAT} specifies a two-component, 64-bit signed floating-point format that has a 32-bit R component in bytes 0..3, and a 32-bit G component in bytes 4..7.
  • *
  • {@link #VK_FORMAT_R32G32B32_UINT FORMAT_R32G32B32_UINT} specifies a three-component, 96-bit unsigned integer format that has a 32-bit R component in bytes 0..3, a 32-bit G component in bytes 4..7, and a 32-bit B component in bytes 8..11.
  • *
  • {@link #VK_FORMAT_R32G32B32_SINT FORMAT_R32G32B32_SINT} specifies a three-component, 96-bit signed integer format that has a 32-bit R component in bytes 0..3, a 32-bit G component in bytes 4..7, and a 32-bit B component in bytes 8..11.
  • *
  • {@link #VK_FORMAT_R32G32B32_SFLOAT FORMAT_R32G32B32_SFLOAT} specifies a three-component, 96-bit signed floating-point format that has a 32-bit R component in bytes 0..3, a 32-bit G component in bytes 4..7, and a 32-bit B component in bytes 8..11.
  • *
  • {@link #VK_FORMAT_R32G32B32A32_UINT FORMAT_R32G32B32A32_UINT} specifies a four-component, 128-bit unsigned integer format that has a 32-bit R component in bytes 0..3, a 32-bit G component in bytes 4..7, a 32-bit B component in bytes 8..11, and a 32-bit A component in bytes 12..15.
  • *
  • {@link #VK_FORMAT_R32G32B32A32_SINT FORMAT_R32G32B32A32_SINT} specifies a four-component, 128-bit signed integer format that has a 32-bit R component in bytes 0..3, a 32-bit G component in bytes 4..7, a 32-bit B component in bytes 8..11, and a 32-bit A component in bytes 12..15.
  • *
  • {@link #VK_FORMAT_R32G32B32A32_SFLOAT FORMAT_R32G32B32A32_SFLOAT} specifies a four-component, 128-bit signed floating-point format that has a 32-bit R component in bytes 0..3, a 32-bit G component in bytes 4..7, a 32-bit B component in bytes 8..11, and a 32-bit A component in bytes 12..15.
  • *
  • {@link #VK_FORMAT_R64_UINT FORMAT_R64_UINT} specifies a one-component, 64-bit unsigned integer format that has a single 64-bit R component.
  • *
  • {@link #VK_FORMAT_R64_SINT FORMAT_R64_SINT} specifies a one-component, 64-bit signed integer format that has a single 64-bit R component.
  • *
  • {@link #VK_FORMAT_R64_SFLOAT FORMAT_R64_SFLOAT} specifies a one-component, 64-bit signed floating-point format that has a single 64-bit R component.
  • *
  • {@link #VK_FORMAT_R64G64_UINT FORMAT_R64G64_UINT} specifies a two-component, 128-bit unsigned integer format that has a 64-bit R component in bytes 0..7, and a 64-bit G component in bytes 8..15.
  • *
  • {@link #VK_FORMAT_R64G64_SINT FORMAT_R64G64_SINT} specifies a two-component, 128-bit signed integer format that has a 64-bit R component in bytes 0..7, and a 64-bit G component in bytes 8..15.
  • *
  • {@link #VK_FORMAT_R64G64_SFLOAT FORMAT_R64G64_SFLOAT} specifies a two-component, 128-bit signed floating-point format that has a 64-bit R component in bytes 0..7, and a 64-bit G component in bytes 8..15.
  • *
  • {@link #VK_FORMAT_R64G64B64_UINT FORMAT_R64G64B64_UINT} specifies a three-component, 192-bit unsigned integer format that has a 64-bit R component in bytes 0..7, a 64-bit G component in bytes 8..15, and a 64-bit B component in bytes 16..23.
  • *
  • {@link #VK_FORMAT_R64G64B64_SINT FORMAT_R64G64B64_SINT} specifies a three-component, 192-bit signed integer format that has a 64-bit R component in bytes 0..7, a 64-bit G component in bytes 8..15, and a 64-bit B component in bytes 16..23.
  • *
  • {@link #VK_FORMAT_R64G64B64_SFLOAT FORMAT_R64G64B64_SFLOAT} specifies a three-component, 192-bit signed floating-point format that has a 64-bit R component in bytes 0..7, a 64-bit G component in bytes 8..15, and a 64-bit B component in bytes 16..23.
  • *
  • {@link #VK_FORMAT_R64G64B64A64_UINT FORMAT_R64G64B64A64_UINT} specifies a four-component, 256-bit unsigned integer format that has a 64-bit R component in bytes 0..7, a 64-bit G component in bytes 8..15, a 64-bit B component in bytes 16..23, and a 64-bit A component in bytes 24..31.
  • *
  • {@link #VK_FORMAT_R64G64B64A64_SINT FORMAT_R64G64B64A64_SINT} specifies a four-component, 256-bit signed integer format that has a 64-bit R component in bytes 0..7, a 64-bit G component in bytes 8..15, a 64-bit B component in bytes 16..23, and a 64-bit A component in bytes 24..31.
  • *
  • {@link #VK_FORMAT_R64G64B64A64_SFLOAT FORMAT_R64G64B64A64_SFLOAT} specifies a four-component, 256-bit signed floating-point format that has a 64-bit R component in bytes 0..7, a 64-bit G component in bytes 8..15, a 64-bit B component in bytes 16..23, and a 64-bit A component in bytes 24..31.
  • *
  • {@link #VK_FORMAT_B10G11R11_UFLOAT_PACK32 FORMAT_B10G11R11_UFLOAT_PACK32} specifies a three-component, 32-bit packed unsigned floating-point format that has a 10-bit B component in bits 22..31, an 11-bit G component in bits 11..21, an 11-bit R component in bits 0..10. See the “Unsigned 10-Bit Floating-Point Numbers” section and the “Unsigned 11-Bit Floating-Point Numbers” section.
  • *
  • {@link #VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 FORMAT_E5B9G9R9_UFLOAT_PACK32} specifies a three-component, 32-bit packed unsigned floating-point format that has a 5-bit shared exponent in bits 27..31, a 9-bit B component mantissa in bits 18..26, a 9-bit G component mantissa in bits 9..17, and a 9-bit R component mantissa in bits 0..8.
  • *
  • {@link #VK_FORMAT_D16_UNORM FORMAT_D16_UNORM} specifies a one-component, 16-bit unsigned normalized format that has a single 16-bit depth component.
  • *
  • {@link #VK_FORMAT_X8_D24_UNORM_PACK32 FORMAT_X8_D24_UNORM_PACK32} specifies a two-component, 32-bit format that has 24 unsigned normalized bits in the depth component and, optionally:, 8 bits that are unused.
  • *
  • {@link #VK_FORMAT_D32_SFLOAT FORMAT_D32_SFLOAT} specifies a one-component, 32-bit signed floating-point format that has 32-bits in the depth component.
  • *
  • {@link #VK_FORMAT_S8_UINT FORMAT_S8_UINT} specifies a one-component, 8-bit unsigned integer format that has 8-bits in the stencil component.
  • *
  • {@link #VK_FORMAT_D16_UNORM_S8_UINT FORMAT_D16_UNORM_S8_UINT} specifies a two-component, 24-bit format that has 16 unsigned normalized bits in the depth component and 8 unsigned integer bits in the stencil component.
  • *
  • {@link #VK_FORMAT_D24_UNORM_S8_UINT FORMAT_D24_UNORM_S8_UINT} specifies a two-component, 32-bit packed format that has 8 unsigned integer bits in the stencil component, and 24 unsigned normalized bits in the depth component.
  • *
  • {@link #VK_FORMAT_D32_SFLOAT_S8_UINT FORMAT_D32_SFLOAT_S8_UINT} specifies a two-component format that has 32 signed float bits in the depth component and 8 unsigned integer bits in the stencil component. There are optionally: 24-bits that are unused.
  • *
  • {@link #VK_FORMAT_BC1_RGB_UNORM_BLOCK FORMAT_BC1_RGB_UNORM_BLOCK} specifies a three-component, block-compressed format where each 64-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized RGB texel data. This format has no alpha and is considered opaque.
  • *
  • {@link #VK_FORMAT_BC1_RGB_SRGB_BLOCK FORMAT_BC1_RGB_SRGB_BLOCK} specifies a three-component, block-compressed format where each 64-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized RGB texel data with sRGB nonlinear encoding. This format has no alpha and is considered opaque.
  • *
  • {@link #VK_FORMAT_BC1_RGBA_UNORM_BLOCK FORMAT_BC1_RGBA_UNORM_BLOCK} specifies a four-component, block-compressed format where each 64-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized RGB texel data, and provides 1 bit of alpha.
  • *
  • {@link #VK_FORMAT_BC1_RGBA_SRGB_BLOCK FORMAT_BC1_RGBA_SRGB_BLOCK} specifies a four-component, block-compressed format where each 64-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized RGB texel data with sRGB nonlinear encoding, and provides 1 bit of alpha.
  • *
  • {@link #VK_FORMAT_BC2_UNORM_BLOCK FORMAT_BC2_UNORM_BLOCK} specifies a four-component, block-compressed format where each 128-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized RGBA texel data with the first 64 bits encoding alpha values followed by 64 bits encoding RGB values.
  • *
  • {@link #VK_FORMAT_BC2_SRGB_BLOCK FORMAT_BC2_SRGB_BLOCK} specifies a four-component, block-compressed format where each 128-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized RGBA texel data with the first 64 bits encoding alpha values followed by 64 bits encoding RGB values with sRGB nonlinear encoding.
  • *
  • {@link #VK_FORMAT_BC3_UNORM_BLOCK FORMAT_BC3_UNORM_BLOCK} specifies a four-component, block-compressed format where each 128-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized RGBA texel data with the first 64 bits encoding alpha values followed by 64 bits encoding RGB values.
  • *
  • {@link #VK_FORMAT_BC3_SRGB_BLOCK FORMAT_BC3_SRGB_BLOCK} specifies a four-component, block-compressed format where each 128-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized RGBA texel data with the first 64 bits encoding alpha values followed by 64 bits encoding RGB values with sRGB nonlinear encoding.
  • *
  • {@link #VK_FORMAT_BC4_UNORM_BLOCK FORMAT_BC4_UNORM_BLOCK} specifies a one-component, block-compressed format where each 64-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized red texel data.
  • *
  • {@link #VK_FORMAT_BC4_SNORM_BLOCK FORMAT_BC4_SNORM_BLOCK} specifies a one-component, block-compressed format where each 64-bit compressed texel block encodes a 4×4 rectangle of signed normalized red texel data.
  • *
  • {@link #VK_FORMAT_BC5_UNORM_BLOCK FORMAT_BC5_UNORM_BLOCK} specifies a two-component, block-compressed format where each 128-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized RG texel data with the first 64 bits encoding red values followed by 64 bits encoding green values.
  • *
  • {@link #VK_FORMAT_BC5_SNORM_BLOCK FORMAT_BC5_SNORM_BLOCK} specifies a two-component, block-compressed format where each 128-bit compressed texel block encodes a 4×4 rectangle of signed normalized RG texel data with the first 64 bits encoding red values followed by 64 bits encoding green values.
  • *
  • {@link #VK_FORMAT_BC6H_UFLOAT_BLOCK FORMAT_BC6H_UFLOAT_BLOCK} specifies a three-component, block-compressed format where each 128-bit compressed texel block encodes a 4×4 rectangle of unsigned floating-point RGB texel data.
  • *
  • {@link #VK_FORMAT_BC6H_SFLOAT_BLOCK FORMAT_BC6H_SFLOAT_BLOCK} specifies a three-component, block-compressed format where each 128-bit compressed texel block encodes a 4×4 rectangle of signed floating-point RGB texel data.
  • *
  • {@link #VK_FORMAT_BC7_UNORM_BLOCK FORMAT_BC7_UNORM_BLOCK} specifies a four-component, block-compressed format where each 128-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized RGBA texel data.
  • *
  • {@link #VK_FORMAT_BC7_SRGB_BLOCK FORMAT_BC7_SRGB_BLOCK} specifies a four-component, block-compressed format where each 128-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized RGBA texel data with sRGB nonlinear encoding applied to the RGB components.
  • *
  • {@link #VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK FORMAT_ETC2_R8G8B8_UNORM_BLOCK} specifies a three-component, ETC2 compressed format where each 64-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized RGB texel data. This format has no alpha and is considered opaque.
  • *
  • {@link #VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK FORMAT_ETC2_R8G8B8_SRGB_BLOCK} specifies a three-component, ETC2 compressed format where each 64-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized RGB texel data with sRGB nonlinear encoding. This format has no alpha and is considered opaque.
  • *
  • {@link #VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK} specifies a four-component, ETC2 compressed format where each 64-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized RGB texel data, and provides 1 bit of alpha.
  • *
  • {@link #VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK} specifies a four-component, ETC2 compressed format where each 64-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized RGB texel data with sRGB nonlinear encoding, and provides 1 bit of alpha.
  • *
  • {@link #VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK} specifies a four-component, ETC2 compressed format where each 128-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized RGBA texel data with the first 64 bits encoding alpha values followed by 64 bits encoding RGB values.
  • *
  • {@link #VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK} specifies a four-component, ETC2 compressed format where each 128-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized RGBA texel data with the first 64 bits encoding alpha values followed by 64 bits encoding RGB values with sRGB nonlinear encoding applied.
  • *
  • {@link #VK_FORMAT_EAC_R11_UNORM_BLOCK FORMAT_EAC_R11_UNORM_BLOCK} specifies a one-component, ETC2 compressed format where each 64-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized red texel data.
  • *
  • {@link #VK_FORMAT_EAC_R11_SNORM_BLOCK FORMAT_EAC_R11_SNORM_BLOCK} specifies a one-component, ETC2 compressed format where each 64-bit compressed texel block encodes a 4×4 rectangle of signed normalized red texel data.
  • *
  • {@link #VK_FORMAT_EAC_R11G11_UNORM_BLOCK FORMAT_EAC_R11G11_UNORM_BLOCK} specifies a two-component, ETC2 compressed format where each 128-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized RG texel data with the first 64 bits encoding red values followed by 64 bits encoding green values.
  • *
  • {@link #VK_FORMAT_EAC_R11G11_SNORM_BLOCK FORMAT_EAC_R11G11_SNORM_BLOCK} specifies a two-component, ETC2 compressed format where each 128-bit compressed texel block encodes a 4×4 rectangle of signed normalized RG texel data with the first 64 bits encoding red values followed by 64 bits encoding green values.
  • *
  • {@link #VK_FORMAT_ASTC_4x4_UNORM_BLOCK FORMAT_ASTC_4x4_UNORM_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized RGBA texel data.
  • *
  • {@link #VK_FORMAT_ASTC_4x4_SRGB_BLOCK FORMAT_ASTC_4x4_SRGB_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 4×4 rectangle of unsigned normalized RGBA texel data with sRGB nonlinear encoding applied to the RGB components.
  • *
  • {@link #VK_FORMAT_ASTC_5x4_UNORM_BLOCK FORMAT_ASTC_5x4_UNORM_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 5×4 rectangle of unsigned normalized RGBA texel data.
  • *
  • {@link #VK_FORMAT_ASTC_5x4_SRGB_BLOCK FORMAT_ASTC_5x4_SRGB_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 5×4 rectangle of unsigned normalized RGBA texel data with sRGB nonlinear encoding applied to the RGB components.
  • *
  • {@link #VK_FORMAT_ASTC_5x5_UNORM_BLOCK FORMAT_ASTC_5x5_UNORM_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 5×5 rectangle of unsigned normalized RGBA texel data.
  • *
  • {@link #VK_FORMAT_ASTC_5x5_SRGB_BLOCK FORMAT_ASTC_5x5_SRGB_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 5×5 rectangle of unsigned normalized RGBA texel data with sRGB nonlinear encoding applied to the RGB components.
  • *
  • {@link #VK_FORMAT_ASTC_6x5_UNORM_BLOCK FORMAT_ASTC_6x5_UNORM_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 6×5 rectangle of unsigned normalized RGBA texel data.
  • *
  • {@link #VK_FORMAT_ASTC_6x5_SRGB_BLOCK FORMAT_ASTC_6x5_SRGB_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 6×5 rectangle of unsigned normalized RGBA texel data with sRGB nonlinear encoding applied to the RGB components.
  • *
  • {@link #VK_FORMAT_ASTC_6x6_UNORM_BLOCK FORMAT_ASTC_6x6_UNORM_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 6×6 rectangle of unsigned normalized RGBA texel data.
  • *
  • {@link #VK_FORMAT_ASTC_6x6_SRGB_BLOCK FORMAT_ASTC_6x6_SRGB_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 6×6 rectangle of unsigned normalized RGBA texel data with sRGB nonlinear encoding applied to the RGB components.
  • *
  • {@link #VK_FORMAT_ASTC_8x5_UNORM_BLOCK FORMAT_ASTC_8x5_UNORM_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes an 8×5 rectangle of unsigned normalized RGBA texel data.
  • *
  • {@link #VK_FORMAT_ASTC_8x5_SRGB_BLOCK FORMAT_ASTC_8x5_SRGB_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes an 8×5 rectangle of unsigned normalized RGBA texel data with sRGB nonlinear encoding applied to the RGB components.
  • *
  • {@link #VK_FORMAT_ASTC_8x6_UNORM_BLOCK FORMAT_ASTC_8x6_UNORM_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes an 8×6 rectangle of unsigned normalized RGBA texel data.
  • *
  • {@link #VK_FORMAT_ASTC_8x6_SRGB_BLOCK FORMAT_ASTC_8x6_SRGB_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes an 8×6 rectangle of unsigned normalized RGBA texel data with sRGB nonlinear encoding applied to the RGB components.
  • *
  • {@link #VK_FORMAT_ASTC_8x8_UNORM_BLOCK FORMAT_ASTC_8x8_UNORM_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes an 8×8 rectangle of unsigned normalized RGBA texel data.
  • *
  • {@link #VK_FORMAT_ASTC_8x8_SRGB_BLOCK FORMAT_ASTC_8x8_SRGB_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes an 8×8 rectangle of unsigned normalized RGBA texel data with sRGB nonlinear encoding applied to the RGB components.
  • *
  • {@link #VK_FORMAT_ASTC_10x5_UNORM_BLOCK FORMAT_ASTC_10x5_UNORM_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 10×5 rectangle of unsigned normalized RGBA texel data.
  • *
  • {@link #VK_FORMAT_ASTC_10x5_SRGB_BLOCK FORMAT_ASTC_10x5_SRGB_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 10×5 rectangle of unsigned normalized RGBA texel data with sRGB nonlinear encoding applied to the RGB components.
  • *
  • {@link #VK_FORMAT_ASTC_10x6_UNORM_BLOCK FORMAT_ASTC_10x6_UNORM_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 10×6 rectangle of unsigned normalized RGBA texel data.
  • *
  • {@link #VK_FORMAT_ASTC_10x6_SRGB_BLOCK FORMAT_ASTC_10x6_SRGB_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 10×6 rectangle of unsigned normalized RGBA texel data with sRGB nonlinear encoding applied to the RGB components.
  • *
  • {@link #VK_FORMAT_ASTC_10x8_UNORM_BLOCK FORMAT_ASTC_10x8_UNORM_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 10×8 rectangle of unsigned normalized RGBA texel data.
  • *
  • {@link #VK_FORMAT_ASTC_10x8_SRGB_BLOCK FORMAT_ASTC_10x8_SRGB_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 10×8 rectangle of unsigned normalized RGBA texel data with sRGB nonlinear encoding applied to the RGB components.
  • *
  • {@link #VK_FORMAT_ASTC_10x10_UNORM_BLOCK FORMAT_ASTC_10x10_UNORM_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 10×10 rectangle of unsigned normalized RGBA texel data.
  • *
  • {@link #VK_FORMAT_ASTC_10x10_SRGB_BLOCK FORMAT_ASTC_10x10_SRGB_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 10×10 rectangle of unsigned normalized RGBA texel data with sRGB nonlinear encoding applied to the RGB components.
  • *
  • {@link #VK_FORMAT_ASTC_12x10_UNORM_BLOCK FORMAT_ASTC_12x10_UNORM_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 12×10 rectangle of unsigned normalized RGBA texel data.
  • *
  • {@link #VK_FORMAT_ASTC_12x10_SRGB_BLOCK FORMAT_ASTC_12x10_SRGB_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 12×10 rectangle of unsigned normalized RGBA texel data with sRGB nonlinear encoding applied to the RGB components.
  • *
  • {@link #VK_FORMAT_ASTC_12x12_UNORM_BLOCK FORMAT_ASTC_12x12_UNORM_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 12×12 rectangle of unsigned normalized RGBA texel data.
  • *
  • {@link #VK_FORMAT_ASTC_12x12_SRGB_BLOCK FORMAT_ASTC_12x12_SRGB_BLOCK} specifies a four-component, ASTC compressed format where each 128-bit compressed texel block encodes a 12×12 rectangle of unsigned normalized RGBA texel data with sRGB nonlinear encoding applied to the RGB components.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G8B8G8R8_422_UNORM_KHR FORMAT_G8B8G8R8_422_UNORM_KHR} specifies a four-component, 32-bit format containing a pair of G components, an R component, and a B component, collectively encoding a 2×1 rectangle of unsigned normalized RGB texel data. One G value is present at each i coordinate, with the B and R values shared across both G values and thus recorded at half the horizontal resolution of the image. This format has an 8-bit G component for the even i coordinate in byte 0, an 8-bit B component in byte 1, an 8-bit G component for the odd i coordinate in byte 2, and an 8-bit R component in byte 3. Images in this format must be defined with a width that is a multiple of two. For the purposes of the constraints on copy extents, this format is treated as a compressed format with a 2×1 compressed texel block.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_B8G8R8G8_422_UNORM_KHR FORMAT_B8G8R8G8_422_UNORM_KHR} specifies a four-component, 32-bit format containing a pair of G components, an R component, and a B component, collectively encoding a 2×1 rectangle of unsigned normalized RGB texel data. One G value is present at each i coordinate, with the B and R values shared across both G values and thus recorded at half the horizontal resolution of the image. This format has an 8-bit B component in byte 0, an 8-bit G component for the even i coordinate in byte 1, an 8-bit R component in byte 2, and an 8-bit G component for the odd i coordinate in byte 3. Images in this format must be defined with a width that is a multiple of two. For the purposes of the constraints on copy extents, this format is treated as a compressed format with a 2×1 compressed texel block.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR} specifies a unsigned normalized multi-planar format that has an 8-bit G component in plane 0, an 8-bit B component in plane 1, and an 8-bit R component in plane 2. The horizontal and vertical dimensions of the R and B planes are halved relative to the image dimensions, and each R and B component is shared with the G components for which floor(iG × 0.5) = iB = iR and floor(jG × 0.5) = jB = jR. The location of each plane when this image is in linear layout can be determined via {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout}, using {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} for the G plane, {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} for the B plane, and {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_2_BIT_KHR IMAGE_ASPECT_PLANE_2_BIT_KHR} for the R plane. Images in this format must be defined with a width and height that is a multiple of two.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR} specifies a unsigned normalized multi-planar format that has an 8-bit G component in plane 0, and a two-component, 16-bit BR plane 1 consisting of an 8-bit B component in byte 0 and an 8-bit R component in byte 1. The horizontal and vertical dimensions of the BR plane is halved relative to the image dimensions, and each R and B value is shared with the G components for which floor(iG × 0.5) = iB = iR and floor(jG × 0.5) = jB = jR. The location of each plane when this image is in linear layout can be determined via {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout}, using {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} for the G plane, and {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} for the BR plane. Images in this format must be defined with a width and height that is a multiple of two.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR} specifies a unsigned normalized multi-planar format that has an 8-bit G component in plane 0, an 8-bit B component in plane 1, and an 8-bit R component in plane 2. The horizontal dimension of the R and B plane is halved relative to the image dimensions, and each R and B value is shared with the G components for which floor(iG × 0.5) = iB = iR. The location of each plane when this image is in linear layout can be determined via {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout}, using {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} for the G plane, {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} for the B plane, and {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_2_BIT_KHR IMAGE_ASPECT_PLANE_2_BIT_KHR} for the R plane. Images in this format must be defined with a width that is a multiple of two.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR} specifies a unsigned normalized multi-planar format that has an 8-bit G component in plane 0, and a two-component, 16-bit BR plane 1 consisting of an 8-bit B component in byte 0 and an 8-bit R component in byte 1. The horizontal dimensions of the BR plane is halved relative to the image dimensions, and each R and B value is shared with the G components for which floor(iG × 0.5) = iB = iR. The location of each plane when this image is in linear layout can be determined via {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout}, using {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} for the G plane, and {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} for the BR plane. Images in this format must be defined with a width that is a multiple of two.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR} specifies a unsigned normalized multi-planar format that has an 8-bit G component in plane 0, an 8-bit B component in plane 1, and an 8-bit R component in plane 2. Each plane has the same dimensions and each R, G and B component contributes to a single texel. The location of each plane when this image is in linear layout can be determined via {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout}, using {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} for the G plane, {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} for the B plane, and {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_2_BIT_KHR IMAGE_ASPECT_PLANE_2_BIT_KHR} for the R plane.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_R10X6_UNORM_PACK16_KHR FORMAT_R10X6_UNORM_PACK16_KHR} specifies a one-component, 16-bit unsigned normalized format that has a single 10-bit R component in the top 10 bits of a 16-bit word, with the bottom 6 bits set to 0.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_R10X6G10X6_UNORM_2PACK16_KHR FORMAT_R10X6G10X6_UNORM_2PACK16_KHR} specifies a two-component, 32-bit unsigned normalized format that has a 10-bit R component in the top 10 bits of the word in bytes 0..1, and a 10-bit G component in the top 10 bits of the word in bytes 2..3, with the bottom 6 bits of each word set to 0.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16_KHR FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16_KHR} specifies a four-component, 64-bit unsigned normalized format that has a 10-bit R component in the top 10 bits of the word in bytes 0..1, a 10-bit G component in the top 10 bits of the word in bytes 2..3, a 10-bit B component in the top 10 bits of the word in bytes 4..5, and a 10-bit A component in the top 10 bits of the word in bytes 6..7, with the bottom 6 bits of each word set to 0.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16_KHR FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16_KHR} specifies a four-component, 64-bit format containing a pair of G components, an R component, and a B component, collectively encoding a 2×1 rectangle of unsigned normalized RGB texel data. One G value is present at each i coordinate, with the B and R values shared across both G values and thus recorded at half the horizontal resolution of the image. This format has a 10-bit G component for the even i coordinate in the top 10 bits of the word in bytes 0..1, a 10-bit B component in the top 10 bits of the word in bytes 2..3, a 10-bit G component for the odd i coordinate in the top 10 bits of the word in bytes 4..5, and a 10-bit R component in the top 10 bits of the word in bytes 6..7, with the bottom 6 bits of each word set to 0. Images in this format must be defined with a width that is a multiple of two. For the purposes of the constraints on copy extents, this format is treated as a compressed format with a 2×1 compressed texel block.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16_KHR FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16_KHR} specifies a four-component, 64-bit format containing a pair of G components, an R component, and a B component, collectively encoding a 2×1 rectangle of unsigned normalized RGB texel data. One G value is present at each i coordinate, with the B and R values shared across both G values and thus recorded at half the horizontal resolution of the image. This format has a 10-bit B component in the top 10 bits of the word in bytes 0..1, a 10-bit G component for the even i coordinate in the top 10 bits of the word in bytes 2..3, a 10-bit R component in the top 10 bits of the word in bytes 4..5, and a 10-bit G component for the odd i coordinate in the top 10 bits of the word in bytes 6..7, with the bottom 6 bits of each word set to 0. Images in this format must be defined with a width that is a multiple of two. For the purposes of the constraints on copy extents, this format is treated as a compressed format with a 2×1 compressed texel block.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR} specifies a unsigned normalized multi-planar format that has a 10-bit G component in the top 10 bits of each 16-bit word of plane 0, a 10-bit B component in the top 10 bits of each 16-bit word of plane 1, and a 10-bit R component in the top 10 bits of each 16-bit word of plane 2, with the bottom 6 bits of each word set to 0. The horizontal and vertical dimensions of the R and B planes are halved relative to the image dimensions, and each R and B component is shared with the G components for which floor(iG × 0.5) = iB = iR and floor(jG × 0.5) = jB = jR. The location of each plane when this image is in linear layout can be determined via {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout}, using {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} for the G plane, {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} for the B plane, and {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_2_BIT_KHR IMAGE_ASPECT_PLANE_2_BIT_KHR} for the R plane. Images in this format must be defined with a width and height that is a multiple of two.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16_KHR FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16_KHR} specifies a unsigned normalized multi-planar format that has a 10-bit G component in the top 10 bits of each 16-bit word of plane 0, and a two-component, 32-bit BR plane 1 consisting of a 10-bit B component in the top 10 bits of the word in bytes 0..1, and a 10-bit R component in the top 10 bits of the word in bytes 2..3, the bottom 6 bits of each word set to 0. The horizontal and vertical dimensions of the BR plane is halved relative to the image dimensions, and each R and B value is shared with the G components for which floor(iG × 0.5) = iB = iR and floor(jG × 0.5) = jB = jR. The location of each plane when this image is in linear layout can be determined via {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout}, using {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} for the G plane, and {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} for the BR plane. Images in this format must be defined with a width and height that is a multiple of two.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16_KHR FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16_KHR} specifies a unsigned normalized multi-planar format that has a 10-bit G component in the top 10 bits of each 16-bit word of plane 0, a 10-bit B component in the top 10 bits of each 16-bit word of plane 1, and a 10-bit R component in the top 10 bits of each 16-bit word of plane 2, with the bottom 6 bits of each word set to 0. The horizontal dimension of the R and B plane is halved relative to the image dimensions, and each R and B value is shared with the G components for which floor(iG × 0.5) = iB = iR. The location of each plane when this image is in linear layout can be determined via {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout}, using {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} for the G plane, {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} for the B plane, and {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_2_BIT_KHR IMAGE_ASPECT_PLANE_2_BIT_KHR} for the R plane. Images in this format must be defined with a width that is a multiple of two.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16_KHR FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16_KHR} specifies a unsigned normalized multi-planar format that has a 10-bit G component in the top 10 bits of each 16-bit word of plane 0, and a two-component, 32-bit BR plane 1 consisting of a 10-bit B component in the top 10 bits of the word in bytes 0..1, and a 10-bit R component in the top 10 bits of the word in bytes 2..3, the bottom 6 bits of each word set to 0. The horizontal dimensions of the BR plane is halved relative to the image dimensions, and each R and B value is shared with the G components for which floor(iG × 0.5) = iB = iR. The location of each plane when this image is in linear layout can be determined via {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout}, using {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} for the G plane, and {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} for the BR plane. Images in this format must be defined with a width that is a multiple of two.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16_KHR FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16_KHR} specifies a unsigned normalized multi-planar format that has a 10-bit G component in the top 10 bits of each 16-bit word of plane 0, a 10-bit B component in the top 10 bits of each 16-bit word of plane 1, and a 10-bit R component in the top 10 bits of each 16-bit word of plane 2, with the bottom 6 bits of each word set to 0. Each plane has the same dimensions and each R, G and B component contributes to a single texel. The location of each plane when this image is in linear layout can be determined via {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout}, using {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} for the G plane, {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} for the B plane, and {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_2_BIT_KHR IMAGE_ASPECT_PLANE_2_BIT_KHR} for the R plane.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_R12X4_UNORM_PACK16_KHR FORMAT_R12X4_UNORM_PACK16_KHR} specifies a one-component, 16-bit unsigned normalized format that has a single 12-bit R component in the top 12 bits of a 16-bit word, with the bottom 4 bits set to 0.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_R12X4G12X4_UNORM_2PACK16_KHR FORMAT_R12X4G12X4_UNORM_2PACK16_KHR} specifies a two-component, 32-bit unsigned normalized format that has a 12-bit R component in the top 12 bits of the word in bytes 0..1, and a 12-bit G component in the top 12 bits of the word in bytes 2..3, with the bottom 4 bits of each word set to 0.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16_KHR FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16_KHR} specifies a four-component, 64-bit unsigned normalized format that has a 12-bit R component in the top 12 bits of the word in bytes 0..1, a 12-bit G component in the top 12 bits of the word in bytes 2..3, a 12-bit B component in the top 12 bits of the word in bytes 4..5, and a 12-bit A component in the top 12 bits of the word in bytes 6..7, with the bottom 4 bits of each word set to 0.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16_KHR FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16_KHR} specifies a four-component, 64-bit format containing a pair of G components, an R component, and a B component, collectively encoding a 2×1 rectangle of unsigned normalized RGB texel data. One G value is present at each i coordinate, with the B and R values shared across both G values and thus recorded at half the horizontal resolution of the image. This format has a 12-bit G component for the even i coordinate in the top 12 bits of the word in bytes 0..1, a 12-bit B component in the top 12 bits of the word in bytes 2..3, a 12-bit G component for the odd i coordinate in the top 12 bits of the word in bytes 4..5, and a 12-bit R component in the top 12 bits of the word in bytes 6..7, with the bottom 4 bits of each word set to 0. Images in this format must be defined with a width that is a multiple of two. For the purposes of the constraints on copy extents, this format is treated as a compressed format with a 2×1 compressed texel block.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16_KHR FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16_KHR} specifies a four-component, 64-bit format containing a pair of G components, an R component, and a B component, collectively encoding a 2×1 rectangle of unsigned normalized RGB texel data. One G value is present at each i coordinate, with the B and R values shared across both G values and thus recorded at half the horizontal resolution of the image. This format has a 12-bit B component in the top 12 bits of the word in bytes 0..1, a 12-bit G component for the even i coordinate in the top 12 bits of the word in bytes 2..3, a 12-bit R component in the top 12 bits of the word in bytes 4..5, and a 12-bit G component for the odd i coordinate in the top 12 bits of the word in bytes 6..7, with the bottom 4 bits of each word set to 0. Images in this format must be defined with a width that is a multiple of two. For the purposes of the constraints on copy extents, this format is treated as a compressed format with a 2×1 compressed texel block.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16_KHR FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16_KHR} specifies a unsigned normalized multi-planar format that has a 12-bit G component in the top 12 bits of each 16-bit word of plane 0, a 12-bit B component in the top 12 bits of each 16-bit word of plane 1, and a 12-bit R component in the top 12 bits of each 16-bit word of plane 2, with the bottom 4 bits of each word set to 0. The horizontal and vertical dimensions of the R and B planes are halved relative to the image dimensions, and each R and B component is shared with the G components for which floor(iG × 0.5) = iB = iR and floor(jG × 0.5) = jB = jR. The location of each plane when this image is in linear layout can be determined via {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout}, using {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} for the G plane, {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} for the B plane, and {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_2_BIT_KHR IMAGE_ASPECT_PLANE_2_BIT_KHR} for the R plane. Images in this format must be defined with a width and height that is a multiple of two.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16_KHR FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16_KHR} specifies a unsigned normalized multi-planar format that has a 12-bit G component in the top 12 bits of each 16-bit word of plane 0, and a two-component, 32-bit BR plane 1 consisting of a 12-bit B component in the top 12 bits of the word in bytes 0..1, and a 12-bit R component in the top 12 bits of the word in bytes 2..3, the bottom 4 bits of each word set to 0. The horizontal and vertical dimensions of the BR plane is halved relative to the image dimensions, and each R and B value is shared with the G components for which floor(iG × 0.5) = iB = iR and floor(jG × 0.5) = jB = jR. The location of each plane when this image is in linear layout can be determined via {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout}, using {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} for the G plane, and {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} for the BR plane. Images in this format must be defined with a width and height that is a multiple of two.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16_KHR FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16_KHR} specifies a unsigned normalized multi-planar format that has a 12-bit G component in the top 12 bits of each 16-bit word of plane 0, a 12-bit B component in the top 12 bits of each 16-bit word of plane 1, and a 12-bit R component in the top 12 bits of each 16-bit word of plane 2, with the bottom 4 bits of each word set to 0. The horizontal dimension of the R and B plane is halved relative to the image dimensions, and each R and B value is shared with the G components for which floor(iG × 0.5) = iB = iR. The location of each plane when this image is in linear layout can be determined via {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout}, using {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} for the G plane, {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} for the B plane, and {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_2_BIT_KHR IMAGE_ASPECT_PLANE_2_BIT_KHR} for the R plane. Images in this format must be defined with a width that is a multiple of two.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16_KHR FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16_KHR} specifies a unsigned normalized multi-planar format that has a 12-bit G component in the top 12 bits of each 16-bit word of plane 0, and a two-component, 32-bit BR plane 1 consisting of a 12-bit B component in the top 12 bits of the word in bytes 0..1, and a 12-bit R component in the top 12 bits of the word in bytes 2..3, the bottom 4 bits of each word set to 0. The horizontal dimensions of the BR plane is halved relative to the image dimensions, and each R and B value is shared with the G components for which floor(iG × 0.5) = iB = iR. The location of each plane when this image is in linear layout can be determined via {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout}, using {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} for the G plane, and {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} for the BR plane. Images in this format must be defined with a width that is a multiple of two.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16_KHR FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16_KHR} specifies a unsigned normalized multi-planar format that has a 12-bit G component in the top 12 bits of each 16-bit word of plane 0, a 12-bit B component in the top 12 bits of each 16-bit word of plane 1, and a 12-bit R component in the top 12 bits of each 16-bit word of plane 2, with the bottom 4 bits of each word set to 0. Each plane has the same dimensions and each R, G and B component contributes to a single texel. The location of each plane when this image is in linear layout can be determined via {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout}, using {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} for the G plane, {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} for the B plane, and {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_2_BIT_KHR IMAGE_ASPECT_PLANE_2_BIT_KHR} for the R plane.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G16B16G16R16_422_UNORM_KHR FORMAT_G16B16G16R16_422_UNORM_KHR} specifies a four-component, 64-bit format containing a pair of G components, an R component, and a B component, collectively encoding a 2×1 rectangle of unsigned normalized RGB texel data. One G value is present at each i coordinate, with the B and R values shared across both G values and thus recorded at half the horizontal resolution of the image. This format has a 16-bit G component for the even i coordinate in the word in bytes 0..1, a 16-bit B component in the word in bytes 2..3, a 16-bit G component for the odd i coordinate in the word in bytes 4..5, and a 16-bit R component in the word in bytes 6..7. Images in this format must be defined with a width that is a multiple of two. For the purposes of the constraints on copy extents, this format is treated as a compressed format with a 2×1 compressed texel block.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_B16G16R16G16_422_UNORM_KHR FORMAT_B16G16R16G16_422_UNORM_KHR} specifies a four-component, 64-bit format containing a pair of G components, an R component, and a B component, collectively encoding a 2×1 rectangle of unsigned normalized RGB texel data. One G value is present at each i coordinate, with the B and R values shared across both G values and thus recorded at half the horizontal resolution of the image. This format has a 16-bit B component in the word in bytes 0..1, a 16-bit G component for the even i coordinate in the word in bytes 2..3, a 16-bit R component in the word in bytes 4..5, and a 16-bit G component for the odd i coordinate in the word in bytes 6..7. Images in this format must be defined with a width that is a multiple of two. For the purposes of the constraints on copy extents, this format is treated as a compressed format with a 2×1 compressed texel block.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM_KHR FORMAT_G16_B16_R16_3PLANE_420_UNORM_KHR} specifies a unsigned normalized multi-planar format that has a 16-bit G component in each 16-bit word of plane 0, a 16-bit B component in each 16-bit word of plane 1, and a 16-bit R component in each 16-bit word of plane 2. The horizontal and vertical dimensions of the R and B planes are halved relative to the image dimensions, and each R and B component is shared with the G components for which floor(iG × 0.5) = iB = iR and floor(jG × 0.5) = jB = jR. The location of each plane when this image is in linear layout can be determined via {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout}, using {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} for the G plane, {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} for the B plane, and {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_2_BIT_KHR IMAGE_ASPECT_PLANE_2_BIT_KHR} for the R plane. Images in this format must be defined with a width and height that is a multiple of two.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G16_B16R16_2PLANE_420_UNORM_KHR FORMAT_G16_B16R16_2PLANE_420_UNORM_KHR} specifies a unsigned normalized multi-planar format that has a 16-bit G component in each 16-bit word of plane 0, and a two-component, 32-bit BR plane 1 consisting of a 16-bit B component in the word in bytes 0..1, and a 16-bit R component in the word in bytes 2..3. The horizontal and vertical dimensions of the BR plane is halved relative to the image dimensions, and each R and B value is shared with the G components for which floor(iG × 0.5) = iB = iR and floor(jG × 0.5) = jB = jR. The location of each plane when this image is in linear layout can be determined via {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout}, using {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} for the G plane, and {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} for the BR plane. Images in this format must be defined with a width and height that is a multiple of two.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM_KHR FORMAT_G16_B16_R16_3PLANE_422_UNORM_KHR} specifies a unsigned normalized multi-planar format that has a 16-bit G component in each 16-bit word of plane 0, a 16-bit B component in each 16-bit word of plane 1, and a 16-bit R component in each 16-bit word of plane 2. The horizontal dimension of the R and B plane is halved relative to the image dimensions, and each R and B value is shared with the G components for which floor(iG × 0.5) = iB = iR. The location of each plane when this image is in linear layout can be determined via {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout}, using {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} for the G plane, {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} for the B plane, and {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_2_BIT_KHR IMAGE_ASPECT_PLANE_2_BIT_KHR} for the R plane. Images in this format must be defined with a width that is a multiple of two.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G16_B16R16_2PLANE_422_UNORM_KHR FORMAT_G16_B16R16_2PLANE_422_UNORM_KHR} specifies a unsigned normalized multi-planar format that has a 16-bit G component in each 16-bit word of plane 0, and a two-component, 32-bit BR plane 1 consisting of a 16-bit B component in the word in bytes 0..1, and a 16-bit R component in the word in bytes 2..3. The horizontal dimensions of the BR plane is halved relative to the image dimensions, and each R and B value is shared with the G components for which floor(iG × 0.5) = iB = iR. The location of each plane when this image is in linear layout can be determined via {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout}, using {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} for the G plane, and {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} for the BR plane. Images in this format must be defined with a width that is a multiple of two.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM_KHR FORMAT_G16_B16_R16_3PLANE_444_UNORM_KHR} specifies a unsigned normalized multi-planar format that has a 16-bit G component in each 16-bit word of plane 0, a 16-bit B component in each 16-bit word of plane 1, and a 16-bit R component in each 16-bit word of plane 2. Each plane has the same dimensions and each R, G and B component contributes to a single texel. The location of each plane when this image is in linear layout can be determined via {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout}, using {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} for the G plane, {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} for the B plane, and {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_2_BIT_KHR IMAGE_ASPECT_PLANE_2_BIT_KHR} for the R plane.
  • *
* *
See Also
* *

{@link VkAttachmentDescription}, {@link VkBufferViewCreateInfo}, {@link VkImageCreateInfo}, {@link VkImageFormatListCreateInfoKHR}, {@link VkImageViewCreateInfo}, {@link VkPhysicalDeviceImageFormatInfo2KHR}, {@link VkPhysicalDeviceSparseImageFormatInfo2KHR}, {@link VkSamplerYcbcrConversionCreateInfoKHR}, {@link VkSurfaceFormatKHR}, {@link VkSwapchainCreateInfoKHR}, {@link VkVertexInputAttributeDescription}, {@link NVExternalMemoryCapabilities#vkGetPhysicalDeviceExternalImageFormatPropertiesNV GetPhysicalDeviceExternalImageFormatPropertiesNV}, {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}, {@link KHRGetPhysicalDeviceProperties2#vkGetPhysicalDeviceFormatProperties2KHR GetPhysicalDeviceFormatProperties2KHR}, {@link #vkGetPhysicalDeviceImageFormatProperties GetPhysicalDeviceImageFormatProperties}, {@link #vkGetPhysicalDeviceSparseImageFormatProperties GetPhysicalDeviceSparseImageFormatProperties}

*/ public static final int VK_FORMAT_UNDEFINED = 0, VK_FORMAT_R4G4_UNORM_PACK8 = 1, VK_FORMAT_R4G4B4A4_UNORM_PACK16 = 2, VK_FORMAT_B4G4R4A4_UNORM_PACK16 = 3, VK_FORMAT_R5G6B5_UNORM_PACK16 = 4, VK_FORMAT_B5G6R5_UNORM_PACK16 = 5, VK_FORMAT_R5G5B5A1_UNORM_PACK16 = 6, VK_FORMAT_B5G5R5A1_UNORM_PACK16 = 7, VK_FORMAT_A1R5G5B5_UNORM_PACK16 = 8, VK_FORMAT_R8_UNORM = 9, VK_FORMAT_R8_SNORM = 10, VK_FORMAT_R8_USCALED = 11, VK_FORMAT_R8_SSCALED = 12, VK_FORMAT_R8_UINT = 13, VK_FORMAT_R8_SINT = 14, VK_FORMAT_R8_SRGB = 15, VK_FORMAT_R8G8_UNORM = 16, VK_FORMAT_R8G8_SNORM = 17, VK_FORMAT_R8G8_USCALED = 18, VK_FORMAT_R8G8_SSCALED = 19, VK_FORMAT_R8G8_UINT = 20, VK_FORMAT_R8G8_SINT = 21, VK_FORMAT_R8G8_SRGB = 22, VK_FORMAT_R8G8B8_UNORM = 23, VK_FORMAT_R8G8B8_SNORM = 24, VK_FORMAT_R8G8B8_USCALED = 25, VK_FORMAT_R8G8B8_SSCALED = 26, VK_FORMAT_R8G8B8_UINT = 27, VK_FORMAT_R8G8B8_SINT = 28, VK_FORMAT_R8G8B8_SRGB = 29, VK_FORMAT_B8G8R8_UNORM = 30, VK_FORMAT_B8G8R8_SNORM = 31, VK_FORMAT_B8G8R8_USCALED = 32, VK_FORMAT_B8G8R8_SSCALED = 33, VK_FORMAT_B8G8R8_UINT = 34, VK_FORMAT_B8G8R8_SINT = 35, VK_FORMAT_B8G8R8_SRGB = 36, VK_FORMAT_R8G8B8A8_UNORM = 37, VK_FORMAT_R8G8B8A8_SNORM = 38, VK_FORMAT_R8G8B8A8_USCALED = 39, VK_FORMAT_R8G8B8A8_SSCALED = 40, VK_FORMAT_R8G8B8A8_UINT = 41, VK_FORMAT_R8G8B8A8_SINT = 42, VK_FORMAT_R8G8B8A8_SRGB = 43, VK_FORMAT_B8G8R8A8_UNORM = 44, VK_FORMAT_B8G8R8A8_SNORM = 45, VK_FORMAT_B8G8R8A8_USCALED = 46, VK_FORMAT_B8G8R8A8_SSCALED = 47, VK_FORMAT_B8G8R8A8_UINT = 48, VK_FORMAT_B8G8R8A8_SINT = 49, VK_FORMAT_B8G8R8A8_SRGB = 50, VK_FORMAT_A8B8G8R8_UNORM_PACK32 = 51, VK_FORMAT_A8B8G8R8_SNORM_PACK32 = 52, VK_FORMAT_A8B8G8R8_USCALED_PACK32 = 53, VK_FORMAT_A8B8G8R8_SSCALED_PACK32 = 54, VK_FORMAT_A8B8G8R8_UINT_PACK32 = 55, VK_FORMAT_A8B8G8R8_SINT_PACK32 = 56, VK_FORMAT_A8B8G8R8_SRGB_PACK32 = 57, VK_FORMAT_A2R10G10B10_UNORM_PACK32 = 58, VK_FORMAT_A2R10G10B10_SNORM_PACK32 = 59, VK_FORMAT_A2R10G10B10_USCALED_PACK32 = 60, VK_FORMAT_A2R10G10B10_SSCALED_PACK32 = 61, VK_FORMAT_A2R10G10B10_UINT_PACK32 = 62, VK_FORMAT_A2R10G10B10_SINT_PACK32 = 63, VK_FORMAT_A2B10G10R10_UNORM_PACK32 = 64, VK_FORMAT_A2B10G10R10_SNORM_PACK32 = 65, VK_FORMAT_A2B10G10R10_USCALED_PACK32 = 66, VK_FORMAT_A2B10G10R10_SSCALED_PACK32 = 67, VK_FORMAT_A2B10G10R10_UINT_PACK32 = 68, VK_FORMAT_A2B10G10R10_SINT_PACK32 = 69, VK_FORMAT_R16_UNORM = 70, VK_FORMAT_R16_SNORM = 71, VK_FORMAT_R16_USCALED = 72, VK_FORMAT_R16_SSCALED = 73, VK_FORMAT_R16_UINT = 74, VK_FORMAT_R16_SINT = 75, VK_FORMAT_R16_SFLOAT = 76, VK_FORMAT_R16G16_UNORM = 77, VK_FORMAT_R16G16_SNORM = 78, VK_FORMAT_R16G16_USCALED = 79, VK_FORMAT_R16G16_SSCALED = 80, VK_FORMAT_R16G16_UINT = 81, VK_FORMAT_R16G16_SINT = 82, VK_FORMAT_R16G16_SFLOAT = 83, VK_FORMAT_R16G16B16_UNORM = 84, VK_FORMAT_R16G16B16_SNORM = 85, VK_FORMAT_R16G16B16_USCALED = 86, VK_FORMAT_R16G16B16_SSCALED = 87, VK_FORMAT_R16G16B16_UINT = 88, VK_FORMAT_R16G16B16_SINT = 89, VK_FORMAT_R16G16B16_SFLOAT = 90, VK_FORMAT_R16G16B16A16_UNORM = 91, VK_FORMAT_R16G16B16A16_SNORM = 92, VK_FORMAT_R16G16B16A16_USCALED = 93, VK_FORMAT_R16G16B16A16_SSCALED = 94, VK_FORMAT_R16G16B16A16_UINT = 95, VK_FORMAT_R16G16B16A16_SINT = 96, VK_FORMAT_R16G16B16A16_SFLOAT = 97, VK_FORMAT_R32_UINT = 98, VK_FORMAT_R32_SINT = 99, VK_FORMAT_R32_SFLOAT = 100, VK_FORMAT_R32G32_UINT = 101, VK_FORMAT_R32G32_SINT = 102, VK_FORMAT_R32G32_SFLOAT = 103, VK_FORMAT_R32G32B32_UINT = 104, VK_FORMAT_R32G32B32_SINT = 105, VK_FORMAT_R32G32B32_SFLOAT = 106, VK_FORMAT_R32G32B32A32_UINT = 107, VK_FORMAT_R32G32B32A32_SINT = 108, VK_FORMAT_R32G32B32A32_SFLOAT = 109, VK_FORMAT_R64_UINT = 110, VK_FORMAT_R64_SINT = 111, VK_FORMAT_R64_SFLOAT = 112, VK_FORMAT_R64G64_UINT = 113, VK_FORMAT_R64G64_SINT = 114, VK_FORMAT_R64G64_SFLOAT = 115, VK_FORMAT_R64G64B64_UINT = 116, VK_FORMAT_R64G64B64_SINT = 117, VK_FORMAT_R64G64B64_SFLOAT = 118, VK_FORMAT_R64G64B64A64_UINT = 119, VK_FORMAT_R64G64B64A64_SINT = 120, VK_FORMAT_R64G64B64A64_SFLOAT = 121, VK_FORMAT_B10G11R11_UFLOAT_PACK32 = 122, VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 = 123, VK_FORMAT_D16_UNORM = 124, VK_FORMAT_X8_D24_UNORM_PACK32 = 125, VK_FORMAT_D32_SFLOAT = 126, VK_FORMAT_S8_UINT = 127, VK_FORMAT_D16_UNORM_S8_UINT = 128, VK_FORMAT_D24_UNORM_S8_UINT = 129, VK_FORMAT_D32_SFLOAT_S8_UINT = 130, VK_FORMAT_BC1_RGB_UNORM_BLOCK = 131, VK_FORMAT_BC1_RGB_SRGB_BLOCK = 132, VK_FORMAT_BC1_RGBA_UNORM_BLOCK = 133, VK_FORMAT_BC1_RGBA_SRGB_BLOCK = 134, VK_FORMAT_BC2_UNORM_BLOCK = 135, VK_FORMAT_BC2_SRGB_BLOCK = 136, VK_FORMAT_BC3_UNORM_BLOCK = 137, VK_FORMAT_BC3_SRGB_BLOCK = 138, VK_FORMAT_BC4_UNORM_BLOCK = 139, VK_FORMAT_BC4_SNORM_BLOCK = 140, VK_FORMAT_BC5_UNORM_BLOCK = 141, VK_FORMAT_BC5_SNORM_BLOCK = 142, VK_FORMAT_BC6H_UFLOAT_BLOCK = 143, VK_FORMAT_BC6H_SFLOAT_BLOCK = 144, VK_FORMAT_BC7_UNORM_BLOCK = 145, VK_FORMAT_BC7_SRGB_BLOCK = 146, VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK = 147, VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK = 148, VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK = 149, VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK = 150, VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK = 151, VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK = 152, VK_FORMAT_EAC_R11_UNORM_BLOCK = 153, VK_FORMAT_EAC_R11_SNORM_BLOCK = 154, VK_FORMAT_EAC_R11G11_UNORM_BLOCK = 155, VK_FORMAT_EAC_R11G11_SNORM_BLOCK = 156, VK_FORMAT_ASTC_4x4_UNORM_BLOCK = 157, VK_FORMAT_ASTC_4x4_SRGB_BLOCK = 158, VK_FORMAT_ASTC_5x4_UNORM_BLOCK = 159, VK_FORMAT_ASTC_5x4_SRGB_BLOCK = 160, VK_FORMAT_ASTC_5x5_UNORM_BLOCK = 161, VK_FORMAT_ASTC_5x5_SRGB_BLOCK = 162, VK_FORMAT_ASTC_6x5_UNORM_BLOCK = 163, VK_FORMAT_ASTC_6x5_SRGB_BLOCK = 164, VK_FORMAT_ASTC_6x6_UNORM_BLOCK = 165, VK_FORMAT_ASTC_6x6_SRGB_BLOCK = 166, VK_FORMAT_ASTC_8x5_UNORM_BLOCK = 167, VK_FORMAT_ASTC_8x5_SRGB_BLOCK = 168, VK_FORMAT_ASTC_8x6_UNORM_BLOCK = 169, VK_FORMAT_ASTC_8x6_SRGB_BLOCK = 170, VK_FORMAT_ASTC_8x8_UNORM_BLOCK = 171, VK_FORMAT_ASTC_8x8_SRGB_BLOCK = 172, VK_FORMAT_ASTC_10x5_UNORM_BLOCK = 173, VK_FORMAT_ASTC_10x5_SRGB_BLOCK = 174, VK_FORMAT_ASTC_10x6_UNORM_BLOCK = 175, VK_FORMAT_ASTC_10x6_SRGB_BLOCK = 176, VK_FORMAT_ASTC_10x8_UNORM_BLOCK = 177, VK_FORMAT_ASTC_10x8_SRGB_BLOCK = 178, VK_FORMAT_ASTC_10x10_UNORM_BLOCK = 179, VK_FORMAT_ASTC_10x10_SRGB_BLOCK = 180, VK_FORMAT_ASTC_12x10_UNORM_BLOCK = 181, VK_FORMAT_ASTC_12x10_SRGB_BLOCK = 182, VK_FORMAT_ASTC_12x12_UNORM_BLOCK = 183, VK_FORMAT_ASTC_12x12_SRGB_BLOCK = 184; /** * VkFormatFeatureFlagBits - Bitmask specifying features supported by a buffer * *
Description
* *

The following bits may be set in {@code linearTilingFeatures} and {@code optimalTilingFeatures}, specifying that the features are supported by images or image views created with the queried {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}{@code ::format}:

* *
    *
  • {@link #VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT FORMAT_FEATURE_SAMPLED_IMAGE_BIT} specifies that an image view can be sampled from.
  • *
  • {@link #VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT FORMAT_FEATURE_STORAGE_IMAGE_BIT} specifies that an image view can be used as a storage images.
  • *
  • {@link #VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT} specifies that an image view can be used as storage image that supports atomic operations.
  • *
  • {@link #VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT FORMAT_FEATURE_COLOR_ATTACHMENT_BIT} specifies that an image view can be used as a framebuffer color attachment and as an input attachment.
  • *
  • {@link #VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT} specifies that an image view can be used as a framebuffer color attachment that supports blending and as an input attachment.
  • *
  • {@link #VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT} specifies that an image view can be used as a framebuffer depth/stencil attachment and as an input attachment.
  • *
  • {@link #VK_FORMAT_FEATURE_BLIT_SRC_BIT FORMAT_FEATURE_BLIT_SRC_BIT} specifies that an image can be used as {@code srcImage} for the {@link #vkCmdBlitImage CmdBlitImage} command.
  • *
  • {@link #VK_FORMAT_FEATURE_BLIT_DST_BIT FORMAT_FEATURE_BLIT_DST_BIT} specifies that an image can be used as {@code dstImage} for the {@link #vkCmdBlitImage CmdBlitImage} command.
  • *
  • {@link #VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT} specifies that if {@link #VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT FORMAT_FEATURE_SAMPLED_IMAGE_BIT} is also set, an image view can be used with a sampler that has either of {@code magFilter} or {@code minFilter} set to {@link #VK_FILTER_LINEAR FILTER_LINEAR}, or {@code mipmapMode} set to {@link #VK_SAMPLER_MIPMAP_MODE_LINEAR SAMPLER_MIPMAP_MODE_LINEAR}. If {@link #VK_FORMAT_FEATURE_BLIT_SRC_BIT FORMAT_FEATURE_BLIT_SRC_BIT} is also set, an image can be used as the {@code srcImage} to {@link #vkCmdBlitImage CmdBlitImage} with a {@code filter} of {@link #VK_FILTER_LINEAR FILTER_LINEAR}. This bit must only be exposed for formats that also support the {@link #VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT FORMAT_FEATURE_SAMPLED_IMAGE_BIT} or {@link #VK_FORMAT_FEATURE_BLIT_SRC_BIT FORMAT_FEATURE_BLIT_SRC_BIT}. * If the format being queried is a depth/stencil format, this bit only indicates that the depth aspect (not the stencil aspect) of an image of this format supports linear filtering, and that linear filtering of the depth aspect is supported whether depth compare is enabled in the sampler or not. If this bit is not present, linear filtering with depth compare disabled is unsupported and linear filtering with depth compare enabled is supported, but may compute the filtered value in an implementation-dependent manner which differs from the normal rules of linear filtering. The resulting value must be in the range [0,1] and should be proportional to, or a weighted average of, the number of comparison passes or failures. *
  • *
  • {@link KHRMaintenance1#VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR} specifies that an image can be used as a source image for copy commands.
  • *
  • {@link KHRMaintenance1#VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR FORMAT_FEATURE_TRANSFER_DST_BIT_KHR} specifies that an image can be used as a destination image for copy commands and clear commands.
  • *
  • {@link EXTSamplerFilterMinmax#VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT} specifies {@code VkImage} can be used as a sampled image with a min or max {@code VkSamplerReductionModeEXT}. This bit must only be exposed for formats that also support the {@link #VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT FORMAT_FEATURE_SAMPLED_IMAGE_BIT}.
  • *
  • {@link IMGFilterCubic#VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG} specifies that {@code VkImage} can be used with a sampler that has either of {@code magFilter} or {@code minFilter} set to {@link IMGFilterCubic#VK_FILTER_CUBIC_IMG FILTER_CUBIC_IMG}, or be the source image for a blit with {@code filter} set to {@link IMGFilterCubic#VK_FILTER_CUBIC_IMG FILTER_CUBIC_IMG}. This bit must only be exposed for formats that also support the {@link #VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT FORMAT_FEATURE_SAMPLED_IMAGE_BIT}. If the format being queried is a depth/stencil format, this only indicates that the depth aspect is cubic filterable.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT_KHR FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT_KHR} specifies that an application can define a sampler Y’CBCR conversion using this format as a source, and that an image of this format can be used with a {@link VkSamplerYcbcrConversionCreateInfoKHR} {@code xChromaOffset} and/or {@code yChromaOffset} of {@link KHRSamplerYcbcrConversion#VK_CHROMA_LOCATION_MIDPOINT_KHR CHROMA_LOCATION_MIDPOINT_KHR}. Otherwise both {@code xChromaOffset} and {@code yChromaOffset} must be {@link KHRSamplerYcbcrConversion#VK_CHROMA_LOCATION_COSITED_EVEN_KHR CHROMA_LOCATION_COSITED_EVEN_KHR}. If a format does not incorporate chroma downsampling (it is not a “422” or “420” format) but the implementation supports sampler Y’CBCR conversion for this format, the implementation must set {@link KHRSamplerYcbcrConversion#VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT_KHR FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT_KHR}.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT_KHR FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT_KHR} specifies that an application can define a sampler Y’CBCR conversion using this format as a source, and that an image of this format can be used with a {@link VkSamplerYcbcrConversionCreateInfoKHR} {@code xChromaOffset} and/or {@code yChromaOffset} of {@link KHRSamplerYcbcrConversion#VK_CHROMA_LOCATION_COSITED_EVEN_KHR CHROMA_LOCATION_COSITED_EVEN_KHR}. Otherwise both {@code xChromaOffset} and {@code yChromaOffset} must be {@link KHRSamplerYcbcrConversion#VK_CHROMA_LOCATION_MIDPOINT_KHR CHROMA_LOCATION_MIDPOINT_KHR}. If neither {@link KHRSamplerYcbcrConversion#VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT_KHR FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT_KHR} nor {@link KHRSamplerYcbcrConversion#VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT_KHR FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT_KHR} is set, the application must not define a sampler Y’CBCR conversion using this format as a source.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR} specifies that the format can do linear sampler filtering (min/magFilter) whilst sampler Y’CBCR conversion is enabled.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR} specifies that the format can have different chroma, min, and mag filters.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR} specifies that reconstruction is explicit, as described in Chroma Reconstruction. If this bit is not present, reconstruction is implicit by default.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR} specifies that reconstruction can be forcibly made explicit by setting {@link VkSamplerYcbcrConversionCreateInfoKHR}{@code ::forceExplicitReconstruction} to {@link #VK_TRUE TRUE}.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_FORMAT_FEATURE_DISJOINT_BIT_KHR FORMAT_FEATURE_DISJOINT_BIT_KHR} specifies that a multi-planar image can have the {@link KHRSamplerYcbcrConversion#VK_IMAGE_CREATE_DISJOINT_BIT_KHR IMAGE_CREATE_DISJOINT_BIT_KHR} set during image creation. An implementation must not set {@link KHRSamplerYcbcrConversion#VK_FORMAT_FEATURE_DISJOINT_BIT_KHR FORMAT_FEATURE_DISJOINT_BIT_KHR} for single-plane formats.
  • *
* *

The following bits may be set in {@code bufferFeatures}, specifying that the features are supported by buffers or buffer views created with the queried {@link #vkGetPhysicalDeviceProperties GetPhysicalDeviceProperties}{@code ::format}:

* *
    *
  • {@link #VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT} specifies that the format can be used to create a buffer view that can be bound to a {@link #VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER} descriptor.
  • *
  • {@link #VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT} specifies that the format can be used to create a buffer view that can be bound to a {@link #VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER} descriptor.
  • *
  • {@link #VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT} specifies that atomic operations are supported on {@link #VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER} with this format.
  • *
  • {@link #VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT FORMAT_FEATURE_VERTEX_BUFFER_BIT} specifies that the format can be used as a vertex attribute format ({@link VkVertexInputAttributeDescription}{@code ::format}).
  • *
* *
See Also
* *

{@code VkFormatFeatureFlags}

*/ public static final int VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT = 0x1, VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT = 0x2, VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT = 0x4, VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT = 0x8, VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT = 0x10, VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x20, VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT = 0x40, VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT = 0x80, VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT = 0x100, VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x200, VK_FORMAT_FEATURE_BLIT_SRC_BIT = 0x400, VK_FORMAT_FEATURE_BLIT_DST_BIT = 0x800, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x1000; /** * VkImageType - Specifies the type of an image object * *
Description
* *
    *
  • {@link #VK_IMAGE_TYPE_1D IMAGE_TYPE_1D} specifies a one-dimensional image.
  • *
  • {@link #VK_IMAGE_TYPE_2D IMAGE_TYPE_2D} specifies a two-dimensional image.
  • *
  • {@link #VK_IMAGE_TYPE_3D IMAGE_TYPE_3D} specifies a three-dimensional image.
  • *
* *
See Also
* *

{@link VkImageCreateInfo}, {@link VkPhysicalDeviceImageFormatInfo2KHR}, {@link VkPhysicalDeviceSparseImageFormatInfo2KHR}, {@link NVExternalMemoryCapabilities#vkGetPhysicalDeviceExternalImageFormatPropertiesNV GetPhysicalDeviceExternalImageFormatPropertiesNV}, {@link #vkGetPhysicalDeviceImageFormatProperties GetPhysicalDeviceImageFormatProperties}, {@link #vkGetPhysicalDeviceSparseImageFormatProperties GetPhysicalDeviceSparseImageFormatProperties}

*/ public static final int VK_IMAGE_TYPE_1D = 0, VK_IMAGE_TYPE_2D = 1, VK_IMAGE_TYPE_3D = 2; /** * VkImageTiling - Specifies the tiling arrangement of data in an image * *
Description
* *
    *
  • {@link #VK_IMAGE_TILING_OPTIMAL IMAGE_TILING_OPTIMAL} specifies optimal tiling (texels are laid out in an implementation-dependent arrangement, for more optimal memory access).
  • *
  • {@link #VK_IMAGE_TILING_LINEAR IMAGE_TILING_LINEAR} specifies linear tiling (texels are laid out in memory in row-major order, possibly with some padding on each row).
  • *
* *
See Also
* *

{@link VkImageCreateInfo}, {@link VkPhysicalDeviceImageFormatInfo2KHR}, {@link VkPhysicalDeviceSparseImageFormatInfo2KHR}, {@link NVExternalMemoryCapabilities#vkGetPhysicalDeviceExternalImageFormatPropertiesNV GetPhysicalDeviceExternalImageFormatPropertiesNV}, {@link #vkGetPhysicalDeviceImageFormatProperties GetPhysicalDeviceImageFormatProperties}, {@link #vkGetPhysicalDeviceSparseImageFormatProperties GetPhysicalDeviceSparseImageFormatProperties}

*/ public static final int VK_IMAGE_TILING_OPTIMAL = 0, VK_IMAGE_TILING_LINEAR = 1; /** * VkImageUsageFlagBits - Bitmask specifying intended usage of an image * *
Description
* *
    *
  • {@link #VK_IMAGE_USAGE_TRANSFER_SRC_BIT IMAGE_USAGE_TRANSFER_SRC_BIT} specifies that the image can be used as the source of a transfer command.
  • *
  • {@link #VK_IMAGE_USAGE_TRANSFER_DST_BIT IMAGE_USAGE_TRANSFER_DST_BIT} specifies that the image can be used as the destination of a transfer command.
  • *
  • {@link #VK_IMAGE_USAGE_SAMPLED_BIT IMAGE_USAGE_SAMPLED_BIT} specifies that the image can be used to create a {@code VkImageView} suitable for occupying a {@code VkDescriptorSet} slot either of type {@link #VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE DESCRIPTOR_TYPE_SAMPLED_IMAGE} or {@link #VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER}, and be sampled by a shader.
  • *
  • {@link #VK_IMAGE_USAGE_STORAGE_BIT IMAGE_USAGE_STORAGE_BIT} specifies that the image can be used to create a {@code VkImageView} suitable for occupying a {@code VkDescriptorSet} slot of type {@link #VK_DESCRIPTOR_TYPE_STORAGE_IMAGE DESCRIPTOR_TYPE_STORAGE_IMAGE}.
  • *
  • {@link #VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT IMAGE_USAGE_COLOR_ATTACHMENT_BIT} specifies that the image can be used to create a {@code VkImageView} suitable for use as a color or resolve attachment in a {@code VkFramebuffer}.
  • *
  • {@link #VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT} specifies that the image can be used to create a {@code VkImageView} suitable for use as a depth/stencil attachment in a {@code VkFramebuffer}.
  • *
  • {@link #VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT} specifies that the memory bound to this image will have been allocated with the {@link #VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT} (see the “Memory Allocation” chapter for more detail). This bit can be set for any image that can be used to create a {@code VkImageView} suitable for use as a color, resolve, depth/stencil, or input attachment.
  • *
  • {@link #VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT IMAGE_USAGE_INPUT_ATTACHMENT_BIT} specifies that the image can be used to create a {@code VkImageView} suitable for occupying {@code VkDescriptorSet} slot of type {@link #VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT DESCRIPTOR_TYPE_INPUT_ATTACHMENT}; be read from a shader as an input attachment; and be used as an input attachment in a framebuffer.
  • *
* *
See Also
* *

{@code VkImageUsageFlags}

*/ public static final int VK_IMAGE_USAGE_TRANSFER_SRC_BIT = 0x1, VK_IMAGE_USAGE_TRANSFER_DST_BIT = 0x2, VK_IMAGE_USAGE_SAMPLED_BIT = 0x4, VK_IMAGE_USAGE_STORAGE_BIT = 0x8, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 0x10, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x20, VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 0x40, VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT = 0x80; /** * VkImageCreateFlagBits - Bitmask specifying additional parameters of an image * *
Description
* *
    *
  • {@link #VK_IMAGE_CREATE_SPARSE_BINDING_BIT IMAGE_CREATE_SPARSE_BINDING_BIT} specifies that the image will be backed using sparse memory binding.
  • *
  • {@link #VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT IMAGE_CREATE_SPARSE_RESIDENCY_BIT} specifies that the image can be partially backed using sparse memory binding. Images created with this flag must also be created with the {@link #VK_IMAGE_CREATE_SPARSE_BINDING_BIT IMAGE_CREATE_SPARSE_BINDING_BIT} flag.
  • *
  • {@link #VK_IMAGE_CREATE_SPARSE_ALIASED_BIT IMAGE_CREATE_SPARSE_ALIASED_BIT} specifies that the image will be backed using sparse memory binding with memory ranges that might also simultaneously be backing another image (or another portion of the same image). Images created with this flag must also be created with the {@link #VK_IMAGE_CREATE_SPARSE_BINDING_BIT IMAGE_CREATE_SPARSE_BINDING_BIT} flag
  • *
  • {@link #VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT IMAGE_CREATE_MUTABLE_FORMAT_BIT} specifies that the image can be used to create a {@code VkImageView} with a different format from the image. For multi-planar formats, {@link #VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT IMAGE_CREATE_MUTABLE_FORMAT_BIT} indicates that a {@code VkImageView} can be created of a plane of the image.
  • *
  • {@link #VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT IMAGE_CREATE_CUBE_COMPATIBLE_BIT} specifies that the image can be used to create a {@code VkImageView} of type {@link #VK_IMAGE_VIEW_TYPE_CUBE IMAGE_VIEW_TYPE_CUBE} or {@link #VK_IMAGE_VIEW_TYPE_CUBE_ARRAY IMAGE_VIEW_TYPE_CUBE_ARRAY}.
  • *
  • {@link KHRMaintenance1#VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR} specifies that the image can be used to create a {@code VkImageView} of type {@link #VK_IMAGE_VIEW_TYPE_2D IMAGE_VIEW_TYPE_2D} or {@link #VK_IMAGE_VIEW_TYPE_2D_ARRAY IMAGE_VIEW_TYPE_2D_ARRAY}.
  • *
  • {@link KHXDeviceGroup#VK_IMAGE_CREATE_BIND_SFR_BIT_KHX IMAGE_CREATE_BIND_SFR_BIT_KHX} specifies that the image can be used with a non-zero value of the {@code SFRRectCount} member of a {@link VkBindImageMemoryDeviceGroupInfoKHX} structure passed into {@link KHRBindMemory2#vkBindImageMemory2KHR BindImageMemory2KHR}. This flag also has the effect of making the image use the standard sparse image block dimensions.
  • *
  • {@link KHRMaintenance2#VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR} indicates that the image having a compressed format can be used to create a {@code VkImageView} with an uncompressed format where each texel in the image view corresponds to a compressed texel block of the image.
  • *
  • {@link KHRMaintenance2#VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR} indicates that the image can be created with usage flags that are not supported for the format the image is created with but are supported for at least one format a {@code VkImageView} created from the image can have.
  • *
  • {@link KHRSamplerYcbcrConversion#VK_IMAGE_CREATE_DISJOINT_BIT_KHR IMAGE_CREATE_DISJOINT_BIT_KHR} indicates that an image with a multi-planar format must have each plane separately bound to memory, rather than having a single memory binding for the whole image; the presence of this bit distinguishes a disjoint image from an image without this bit set.
  • *
  • {@link KHRBindMemory2#VK_IMAGE_CREATE_ALIAS_BIT_KHR IMAGE_CREATE_ALIAS_BIT_KHR} indicates that two images created with the same creation parameters and aliased to the same memory can interpret the contents of the memory consistently with each other, subject to the rules described in the Memory Aliasing section. This flag further indicates that each plane of a disjoint image can share an in-memory non-linear representation with single-plane images, and that a single-plane image can share an in-memory non-linear representation with a plane of a multi-planar disjoint image, according to the rules in the “Compatible formats of planes of multi-planar formats” section. If the {@code pNext} chain includes a {@link VkExternalMemoryImageCreateInfoKHR} or {@link VkExternalMemoryImageCreateInfoNV} structure whose {@code handleTypes} member is not 0, it is as if {@link KHRBindMemory2#VK_IMAGE_CREATE_ALIAS_BIT_KHR IMAGE_CREATE_ALIAS_BIT_KHR} is set.
  • *
  • {@link EXTSampleLocations#VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT} specifies that an image with a depth or depth/stencil format can be used with custom sample locations when used as a depth/stencil attachment.
  • *
* *

If any of the bits {@link #VK_IMAGE_CREATE_SPARSE_BINDING_BIT IMAGE_CREATE_SPARSE_BINDING_BIT}, {@link #VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT IMAGE_CREATE_SPARSE_RESIDENCY_BIT}, or {@link #VK_IMAGE_CREATE_SPARSE_ALIASED_BIT IMAGE_CREATE_SPARSE_ALIASED_BIT} are set, {@link #VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT} must not also be set.

* *

See Sparse Resource Features and Sparse Physical Device Features for more details.

* *
See Also
* *

{@code VkImageCreateFlags}

*/ public static final int VK_IMAGE_CREATE_SPARSE_BINDING_BIT = 0x1, VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT = 0x2, VK_IMAGE_CREATE_SPARSE_ALIASED_BIT = 0x4, VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = 0x8, VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT = 0x10; /** * VkSampleCountFlagBits - Bitmask specifying sample counts supported for an image used for storage operations * *
Description
* *
    *
  • {@link #VK_SAMPLE_COUNT_1_BIT SAMPLE_COUNT_1_BIT} specifies an image with one sample per pixel.
  • *
  • {@link #VK_SAMPLE_COUNT_2_BIT SAMPLE_COUNT_2_BIT} specifies an image with 2 samples per pixel.
  • *
  • {@link #VK_SAMPLE_COUNT_4_BIT SAMPLE_COUNT_4_BIT} specifies an image with 4 samples per pixel.
  • *
  • {@link #VK_SAMPLE_COUNT_8_BIT SAMPLE_COUNT_8_BIT} specifies an image with 8 samples per pixel.
  • *
  • {@link #VK_SAMPLE_COUNT_16_BIT SAMPLE_COUNT_16_BIT} specifies an image with 16 samples per pixel.
  • *
  • {@link #VK_SAMPLE_COUNT_32_BIT SAMPLE_COUNT_32_BIT} specifies an image with 32 samples per pixel.
  • *
  • {@link #VK_SAMPLE_COUNT_64_BIT SAMPLE_COUNT_64_BIT} specifies an image with 64 samples per pixel.
  • *
* *
See Also
* *

{@link VkAttachmentDescription}, {@link VkImageCreateInfo}, {@link VkPhysicalDeviceSparseImageFormatInfo2KHR}, {@link VkPipelineMultisampleStateCreateInfo}, {@code VkSampleCountFlags}, {@link VkSampleLocationsInfoEXT}, {@link EXTSampleLocations#vkGetPhysicalDeviceMultisamplePropertiesEXT GetPhysicalDeviceMultisamplePropertiesEXT}, {@link #vkGetPhysicalDeviceSparseImageFormatProperties GetPhysicalDeviceSparseImageFormatProperties}

*/ public static final int VK_SAMPLE_COUNT_1_BIT = 0x1, VK_SAMPLE_COUNT_2_BIT = 0x2, VK_SAMPLE_COUNT_4_BIT = 0x4, VK_SAMPLE_COUNT_8_BIT = 0x8, VK_SAMPLE_COUNT_16_BIT = 0x10, VK_SAMPLE_COUNT_32_BIT = 0x20, VK_SAMPLE_COUNT_64_BIT = 0x40; /** * VkPhysicalDeviceType - Supported physical device types * *
Description
* *
    *
  • {@link #VK_PHYSICAL_DEVICE_TYPE_OTHER PHYSICAL_DEVICE_TYPE_OTHER} - the device does not match any other available types.
  • *
  • {@link #VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU} - the device is typically one embedded in or tightly coupled with the host.
  • *
  • {@link #VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU PHYSICAL_DEVICE_TYPE_DISCRETE_GPU} - the device is typically a separate processor connected to the host via an interlink.
  • *
  • {@link #VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU} - the device is typically a virtual node in a virtualization environment.
  • *
  • {@link #VK_PHYSICAL_DEVICE_TYPE_CPU PHYSICAL_DEVICE_TYPE_CPU} - the device is typically running on the same processors as the host.
  • *
* *

The physical device type is advertised for informational purposes only, and does not directly affect the operation of the system. However, the device type may correlate with other advertised properties or capabilities of the system, such as how many memory heaps there are.

* *
See Also
* *

{@link VkPhysicalDeviceProperties}

*/ public static final int VK_PHYSICAL_DEVICE_TYPE_OTHER = 0, VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU = 1, VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU = 2, VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU = 3, VK_PHYSICAL_DEVICE_TYPE_CPU = 4; /** * VkQueueFlagBits - Bitmask specifying capabilities of queues in a queue family * *
Description
* *
    *
  • {@link #VK_QUEUE_GRAPHICS_BIT QUEUE_GRAPHICS_BIT} indicates that queues in this queue family support graphics operations.
  • *
  • {@link #VK_QUEUE_COMPUTE_BIT QUEUE_COMPUTE_BIT} indicates that queues in this queue family support compute operations.
  • *
  • {@link #VK_QUEUE_TRANSFER_BIT QUEUE_TRANSFER_BIT} indicates that queues in this queue family support transfer operations.
  • *
  • {@link #VK_QUEUE_SPARSE_BINDING_BIT QUEUE_SPARSE_BINDING_BIT} indicates that queues in this queue family support sparse memory management operations (see Sparse Resources). If any of the sparse resource features are enabled, then at least one queue family must support this bit.
  • *
* *

If an implementation exposes any queue family that supports graphics operations, at least one queue family of at least one physical device exposed by the implementation must support both graphics and compute operations.

* *
Note
* *

All commands that are allowed on a queue that supports transfer operations are also allowed on a queue that supports either graphics or compute operations. Thus, if the capabilities of a queue family include {@link #VK_QUEUE_GRAPHICS_BIT QUEUE_GRAPHICS_BIT} or {@link #VK_QUEUE_COMPUTE_BIT QUEUE_COMPUTE_BIT}, then reporting the {@link #VK_QUEUE_TRANSFER_BIT QUEUE_TRANSFER_BIT} capability separately for that queue family is optional:.

*
* *

For further details see Queues.

* *
See Also
* *

{@code VkQueueFlags}

*/ public static final int VK_QUEUE_GRAPHICS_BIT = 0x1, VK_QUEUE_COMPUTE_BIT = 0x2, VK_QUEUE_TRANSFER_BIT = 0x4, VK_QUEUE_SPARSE_BINDING_BIT = 0x8; /** * VkMemoryPropertyFlagBits - Bitmask specifying properties for a memory type * *
Description
* *
    *
  • {@link #VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT MEMORY_PROPERTY_DEVICE_LOCAL_BIT} bit indicates that memory allocated with this type is the most efficient for device access. This property will be set if and only if the memory type belongs to a heap with the {@link #VK_MEMORY_HEAP_DEVICE_LOCAL_BIT MEMORY_HEAP_DEVICE_LOCAL_BIT} set.
  • *
  • {@link #VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT MEMORY_PROPERTY_HOST_VISIBLE_BIT} bit indicates that memory allocated with this type can be mapped for host access using {@link #vkMapMemory MapMemory}.
  • *
  • {@link #VK_MEMORY_PROPERTY_HOST_COHERENT_BIT MEMORY_PROPERTY_HOST_COHERENT_BIT} bit indicates that the host cache management commands {@link #vkFlushMappedMemoryRanges FlushMappedMemoryRanges} and {@link #vkInvalidateMappedMemoryRanges InvalidateMappedMemoryRanges} are not needed to flush host writes to the device or make device writes visible to the host, respectively.
  • *
  • {@link #VK_MEMORY_PROPERTY_HOST_CACHED_BIT MEMORY_PROPERTY_HOST_CACHED_BIT} bit indicates that memory allocated with this type is cached on the host. Host memory accesses to uncached memory are slower than to cached memory, however uncached memory is always host coherent.
  • *
  • {@link #VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT} bit indicates that the memory type only allows device access to the memory. Memory types must not have both {@link #VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT} and {@link #VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT MEMORY_PROPERTY_HOST_VISIBLE_BIT} set. Additionally, the object’s backing memory may be provided by the implementation lazily as specified in Lazily Allocated Memory.
  • *
* *
See Also
* *

{@code VkMemoryPropertyFlags}

*/ public static final int VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT = 0x1, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT = 0x2, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT = 0x4, VK_MEMORY_PROPERTY_HOST_CACHED_BIT = 0x8, VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT = 0x10; /** * VkMemoryHeapFlagBits - Bitmask specifying attribute flags for a heap * *
Description
* *
    *
  • {@link #VK_MEMORY_HEAP_DEVICE_LOCAL_BIT MEMORY_HEAP_DEVICE_LOCAL_BIT} indicates that the heap corresponds to device local memory. Device local memory may have different performance characteristics than host local memory, and may support different memory property flags.
  • *
  • {@link KHXDeviceGroupCreation#VK_MEMORY_HEAP_MULTI_INSTANCE_BIT_KHX MEMORY_HEAP_MULTI_INSTANCE_BIT_KHX} indicates that in a logical device representing more than one physical device, there is a per-physical device instance of the heap memory. By default, an allocation from such a heap will be replicated to each physical device’s instance of the heap.
  • *
* *
See Also
* *

{@code VkMemoryHeapFlags}

*/ public static final int VK_MEMORY_HEAP_DEVICE_LOCAL_BIT = 0x1; /** * VkPipelineStageFlagBits - Bitmask specifying pipeline stages * *
Description
* *
    *
  • {@link #VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT PIPELINE_STAGE_TOP_OF_PIPE_BIT} specifies the stage of the pipeline where any commands are initially received by the queue.
  • *
  • {@link NVXDeviceGeneratedCommands#VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX} specifies the stage of the pipeline where device-side generation of commands via {@link NVXDeviceGeneratedCommands#vkCmdProcessCommandsNVX CmdProcessCommandsNVX} is handled.
  • *
  • {@link #VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT PIPELINE_STAGE_DRAW_INDIRECT_BIT} specifies the stage of the pipeline where Draw/DispatchIndirect data structures are consumed. This stage also includes reading commands written by {@link NVXDeviceGeneratedCommands#vkCmdProcessCommandsNVX CmdProcessCommandsNVX}.
  • *
  • {@link #VK_PIPELINE_STAGE_VERTEX_INPUT_BIT PIPELINE_STAGE_VERTEX_INPUT_BIT} specifies the stage of the pipeline where vertex and index buffers are consumed.
  • *
  • {@link #VK_PIPELINE_STAGE_VERTEX_SHADER_BIT PIPELINE_STAGE_VERTEX_SHADER_BIT} specifies the vertex shader stage.
  • *
  • {@link #VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT} specifies the tessellation control shader stage.
  • *
  • {@link #VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT} specifies the tessellation evaluation shader stage.
  • *
  • {@link #VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT PIPELINE_STAGE_GEOMETRY_SHADER_BIT} specifies the geometry shader stage.
  • *
  • {@link #VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT PIPELINE_STAGE_FRAGMENT_SHADER_BIT} specifies the fragment shader stage.
  • *
  • {@link #VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT} specifies the stage of the pipeline where early fragment tests (depth and stencil tests before fragment shading) are performed. This stage also includes subpass load operations for framebuffer attachments with a depth/stencil format.
  • *
  • {@link #VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT} specifies the stage of the pipeline where late fragment tests (depth and stencil tests after fragment shading) are performed. This stage also includes subpass store operations for framebuffer attachments with a depth/stencil format.
  • *
  • {@link #VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT} specifies the stage of the pipeline after blending where the final color values are output from the pipeline. This stage also includes subpass load and store operations and multisample resolve operations for framebuffer attachments with a color format.
  • *
  • {@link #VK_PIPELINE_STAGE_TRANSFER_BIT PIPELINE_STAGE_TRANSFER_BIT} specifies the execution of copy commands. This includes the operations resulting from all copy commands, clear commands (with the exception of {@link #vkCmdClearAttachments CmdClearAttachments}), and {@link #vkCmdCopyQueryPoolResults CmdCopyQueryPoolResults}.
  • *
  • {@link #VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT PIPELINE_STAGE_COMPUTE_SHADER_BIT} specifies the execution of a compute shader.
  • *
  • {@link #VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT} specifies the final stage in the pipeline where operations generated by all commands complete execution.
  • *
  • {@link #VK_PIPELINE_STAGE_HOST_BIT PIPELINE_STAGE_HOST_BIT} specifies a pseudo-stage indicating execution on the host of reads/writes of device memory. This stage is not invoked by any commands recorded in a command buffer.
  • *
  • {@link #VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT PIPELINE_STAGE_ALL_GRAPHICS_BIT} specifies the execution of all graphics pipeline stages, and is equivalent to the logical OR of: * *
      *
    • {@link #VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT PIPELINE_STAGE_TOP_OF_PIPE_BIT}
    • *
    • {@link #VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT PIPELINE_STAGE_DRAW_INDIRECT_BIT}
    • *
    • {@link #VK_PIPELINE_STAGE_VERTEX_INPUT_BIT PIPELINE_STAGE_VERTEX_INPUT_BIT}
    • *
    • {@link #VK_PIPELINE_STAGE_VERTEX_SHADER_BIT PIPELINE_STAGE_VERTEX_SHADER_BIT}
    • *
    • {@link #VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT}
    • *
    • {@link #VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT}
    • *
    • {@link #VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT PIPELINE_STAGE_GEOMETRY_SHADER_BIT}
    • *
    • {@link #VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT PIPELINE_STAGE_FRAGMENT_SHADER_BIT}
    • *
    • {@link #VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT}
    • *
    • {@link #VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT}
    • *
    • {@link #VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT}
    • *
    • {@link #VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT}
    • *
    *
  • *
  • {@link #VK_PIPELINE_STAGE_ALL_COMMANDS_BIT PIPELINE_STAGE_ALL_COMMANDS_BIT} is equivalent to the logical OR of every other pipeline stage flag that is supported on the queue it is used with.
  • *
* *
Note
* *

An execution dependency with only {@link #VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT} in the destination stage mask will only prevent that stage from executing in subsequently submitted commands. As this stage does not perform any actual execution, this is not observable - in effect, it does not delay processing of subsequent commands. Similarly an execution dependency with only {@link #VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT PIPELINE_STAGE_TOP_OF_PIPE_BIT} in the source stage mask will effectively not wait for any prior commands to complete.

* *

When defining a memory dependency, using only {@link #VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT} or {@link #VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT PIPELINE_STAGE_TOP_OF_PIPE_BIT} would never make any accesses available and/or visible because these stages do not access memory.

* *

{@link #VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT} and {@link #VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT PIPELINE_STAGE_TOP_OF_PIPE_BIT} are useful for accomplishing layout transitions and queue ownership operations when the required execution dependency is satisfied by other means - for example, semaphore operations between queues.

*
* *
See Also
* *

{@code VkPipelineStageFlags}, {@link #vkCmdWriteTimestamp CmdWriteTimestamp}

*/ public static final int VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT = 0x1, VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT = 0x2, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT = 0x4, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT = 0x8, VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT = 0x10, VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT = 0x20, VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT = 0x40, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT = 0x80, VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT = 0x100, VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT = 0x200, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT = 0x400, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT = 0x800, VK_PIPELINE_STAGE_TRANSFER_BIT = 0x1000, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT = 0x2000, VK_PIPELINE_STAGE_HOST_BIT = 0x4000, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT = 0x8000, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT = 0x10000; /** * VkImageAspectFlagBits - Bitmask specifying which aspects of an image are included in a view * *
Description
* *
    *
  • {@link #VK_IMAGE_ASPECT_COLOR_BIT IMAGE_ASPECT_COLOR_BIT} specifies the color aspect.
  • *
  • {@link #VK_IMAGE_ASPECT_DEPTH_BIT IMAGE_ASPECT_DEPTH_BIT} specifies the depth aspect.
  • *
  • {@link #VK_IMAGE_ASPECT_STENCIL_BIT IMAGE_ASPECT_STENCIL_BIT} specifies the stencil aspect.
  • *
  • {@link #VK_IMAGE_ASPECT_METADATA_BIT IMAGE_ASPECT_METADATA_BIT} specifies the metadata aspect, used for sparse sparse resource operations.
  • *
* *
See Also
* *

{@link VkBindImagePlaneMemoryInfoKHR}, {@code VkImageAspectFlags}, {@link VkImagePlaneMemoryRequirementsInfoKHR}

*/ public static final int VK_IMAGE_ASPECT_COLOR_BIT = 0x1, VK_IMAGE_ASPECT_DEPTH_BIT = 0x2, VK_IMAGE_ASPECT_STENCIL_BIT = 0x4, VK_IMAGE_ASPECT_METADATA_BIT = 0x8; /** * VkSparseImageFormatFlagBits - Bitmask specifying additional information about a sparse image resource * *
Description
* *
    *
  • {@link #VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT} specifies that the image uses a single mip tail region for all array layers.
  • *
  • {@link #VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT} specifies that the first mip level whose dimensions are not integer multiples of the corresponding dimensions of the sparse image block begins the mip tail region.
  • *
  • {@link #VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT} specifies that the image uses non-standard sparse image block dimensions, and the {@code imageGranularity} values do not match the standard sparse image block dimensions for the given format.
  • *
* *
See Also
* *

{@code VkSparseImageFormatFlags}

*/ public static final int VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT = 0x1, VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT = 0x2, VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT = 0x4; /** * VkSparseMemoryBindFlagBits - Bitmask specifying usage of a sparse memory binding operation * *
Description
* *
    *
  • {@link #VK_SPARSE_MEMORY_BIND_METADATA_BIT SPARSE_MEMORY_BIND_METADATA_BIT} specifies that the memory being bound is only for the metadata aspect.
  • *
* *
See Also
* *

{@code VkSparseMemoryBindFlags}

*/ public static final int VK_SPARSE_MEMORY_BIND_METADATA_BIT = 0x1; /** * VkFenceCreateFlagBits - Bitmask specifying initial state and behavior of a fence * *
Description
* *
    *
  • {@link #VK_FENCE_CREATE_SIGNALED_BIT FENCE_CREATE_SIGNALED_BIT} specifies that the fence object is created in the signaled state. Otherwise, it is created in the unsignaled state.
  • *
* *
See Also
* *

{@code VkFenceCreateFlags}

*/ public static final int VK_FENCE_CREATE_SIGNALED_BIT = 0x1; /** * VkQueryType - Specify the type of queries managed by a query pool * *
Description
* *
    *
  • {@link #VK_QUERY_TYPE_OCCLUSION QUERY_TYPE_OCCLUSION} specifies an occlusion query.
  • *
  • {@link #VK_QUERY_TYPE_PIPELINE_STATISTICS QUERY_TYPE_PIPELINE_STATISTICS} specifies a pipeline statistics query.
  • *
  • {@link #VK_QUERY_TYPE_TIMESTAMP QUERY_TYPE_TIMESTAMP} specifies a timestamp query.
  • *
* *
See Also
* *

{@link VkQueryPoolCreateInfo}

*/ public static final int VK_QUERY_TYPE_OCCLUSION = 0, VK_QUERY_TYPE_PIPELINE_STATISTICS = 1, VK_QUERY_TYPE_TIMESTAMP = 2; /** * VkQueryPipelineStatisticFlagBits - Bitmask specifying queried pipeline statistics * *
Description
* *
    *
  • {@link #VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT} specifies that queries managed by the pool will count the number of vertices processed by the input assembly stage. Vertices corresponding to incomplete primitives may contribute to the count.
  • *
  • {@link #VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT} specifies that queries managed by the pool will count the number of primitives processed by the input assembly stage. If primitive restart is enabled, restarting the primitive topology has no effect on the count. Incomplete primitives may be counted.
  • *
  • {@link #VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT} specifies that queries managed by the pool will count the number of vertex shader invocations. This counter’s value is incremented each time a vertex shader is invoked.
  • *
  • {@link #VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT} specifies that queries managed by the pool will count the number of geometry shader invocations. This counter’s value is incremented each time a geometry shader is invoked. In the case of instanced geometry shaders, the geometry shader invocations count is incremented for each separate instanced invocation.
  • *
  • {@link #VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT} specifies that queries managed by the pool will count the number of primitives generated by geometry shader invocations. The counter’s value is incremented each time the geometry shader emits a primitive. Restarting primitive topology using the SPIR-V instructions {@code OpEndPrimitive} or {@code OpEndStreamPrimitive} has no effect on the geometry shader output primitives count.
  • *
  • {@link #VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT} specifies that queries managed by the pool will count the number of primitives processed by the Primitive Clipping stage of the pipeline. The counter’s value is incremented each time a primitive reaches the primitive clipping stage.
  • *
  • {@link #VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT} specifies that queries managed by the pool will count the number of primitives output by the Primitive Clipping stage of the pipeline. The counter’s value is incremented each time a primitive passes the primitive clipping stage. The actual number of primitives output by the primitive clipping stage for a particular input primitive is implementation-dependent but must satisfy the following conditions: * *
      *
    • If at least one vertex of the input primitive lies inside the clipping volume, the counter is incremented by one or more.
    • *
    • Otherwise, the counter is incremented by zero or more.
    • *
    *
  • *
  • {@link #VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT} specifies that queries managed by the pool will count the number of fragment shader invocations. The counter’s value is incremented each time the fragment shader is invoked.
  • *
  • {@link #VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT} specifies that queries managed by the pool will count the number of patches processed by the tessellation control shader. The counter’s value is incremented once for each patch for which a tessellation control shader is invoked.
  • *
  • {@link #VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT} specifies that queries managed by the pool will count the number of invocations of the tessellation evaluation shader. The counter’s value is incremented each time the tessellation evaluation shader is invoked.
  • *
  • {@link #VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT} specifies that queries managed by the pool will count the number of compute shader invocations. The counter’s value is incremented every time the compute shader is invoked. Implementations may skip the execution of certain compute shader invocations or execute additional compute shader invocations for implementation-dependent reasons as long as the results of rendering otherwise remain unchanged.
  • *
* *

These values are intended to measure relative statistics on one implementation. Various device architectures will count these values differently. Any or all counters may be affected by the issues described in Query Operation.

* *
Note
* *

For example, tile-based rendering devices may need to replay the scene multiple times, affecting some of the counts.

*
* *

If a pipeline has {@code rasterizerDiscardEnable} enabled, implementations may discard primitives after the final vertex processing stage. As a result, if {@code rasterizerDiscardEnable} is enabled, the clipping input and output primitives counters may not be incremented.

* *

When a pipeline statistics query finishes, the result for that query is marked as available. The application can copy the result to a buffer (via {@link #vkCmdCopyQueryPoolResults CmdCopyQueryPoolResults}), or request it be put into host memory (via {@link #vkGetQueryPoolResults GetQueryPoolResults}).

* *
See Also
* *

{@code VkQueryPipelineStatisticFlags}

*/ public static final int VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT = 0x1, VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT = 0x2, VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT = 0x4, VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT = 0x8, VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT = 0x10, VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT = 0x20, VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT = 0x40, VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT = 0x80, VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT = 0x100, VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT = 0x200, VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT = 0x400; /** * VkQueryResultFlagBits - Bitmask specifying how and when query results are returned * *
Description
* *
    *
  • {@link #VK_QUERY_RESULT_64_BIT QUERY_RESULT_64_BIT} specifies the results will be written as an array of 64-bit unsigned integer values. If this bit is not set, the results will be written as an array of 32-bit unsigned integer values.
  • *
  • {@link #VK_QUERY_RESULT_WAIT_BIT QUERY_RESULT_WAIT_BIT} specifies that Vulkan will wait for each query’s status to become available before retrieving its results.
  • *
  • {@link #VK_QUERY_RESULT_WITH_AVAILABILITY_BIT QUERY_RESULT_WITH_AVAILABILITY_BIT} specifies that the availability status accompanies the results.
  • *
  • {@link #VK_QUERY_RESULT_PARTIAL_BIT QUERY_RESULT_PARTIAL_BIT} specifies that returning partial results is acceptable.
  • *
* *
See Also
* *

{@code VkQueryResultFlags}

*/ public static final int VK_QUERY_RESULT_64_BIT = 0x1, VK_QUERY_RESULT_WAIT_BIT = 0x2, VK_QUERY_RESULT_WITH_AVAILABILITY_BIT = 0x4, VK_QUERY_RESULT_PARTIAL_BIT = 0x8; /** * VkBufferCreateFlagBits - Bitmask specifying additional parameters of a buffer * *
Description
* *
    *
  • {@link #VK_BUFFER_CREATE_SPARSE_BINDING_BIT BUFFER_CREATE_SPARSE_BINDING_BIT} specifies that the buffer will be backed using sparse memory binding.
  • *
  • {@link #VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT BUFFER_CREATE_SPARSE_RESIDENCY_BIT} specifies that the buffer can be partially backed using sparse memory binding. Buffers created with this flag must also be created with the {@link #VK_BUFFER_CREATE_SPARSE_BINDING_BIT BUFFER_CREATE_SPARSE_BINDING_BIT} flag.
  • *
  • {@link #VK_BUFFER_CREATE_SPARSE_ALIASED_BIT BUFFER_CREATE_SPARSE_ALIASED_BIT} specifies that the buffer will be backed using sparse memory binding with memory ranges that might also simultaneously be backing another buffer (or another portion of the same buffer). Buffers created with this flag must also be created with the {@link #VK_BUFFER_CREATE_SPARSE_BINDING_BIT BUFFER_CREATE_SPARSE_BINDING_BIT} flag.
  • *
* *

See Sparse Resource Features and Physical Device Features for details of the sparse memory features supported on a device.

* *
See Also
* *

{@code VkBufferCreateFlags}

*/ public static final int VK_BUFFER_CREATE_SPARSE_BINDING_BIT = 0x1, VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT = 0x2, VK_BUFFER_CREATE_SPARSE_ALIASED_BIT = 0x4; /** * VkBufferUsageFlagBits - Bitmask specifying allowed usage of a buffer * *
Description
* *
    *
  • {@link #VK_BUFFER_USAGE_TRANSFER_SRC_BIT BUFFER_USAGE_TRANSFER_SRC_BIT} specifies that the buffer can be used as the source of a transfer command (see the definition of {@link #VK_PIPELINE_STAGE_TRANSFER_BIT PIPELINE_STAGE_TRANSFER_BIT}).
  • *
  • {@link #VK_BUFFER_USAGE_TRANSFER_DST_BIT BUFFER_USAGE_TRANSFER_DST_BIT} specifies that the buffer can be used as the destination of a transfer command.
  • *
  • {@link #VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT} specifies that the buffer can be used to create a {@code VkBufferView} suitable for occupying a {@code VkDescriptorSet} slot of type {@link #VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER}.
  • *
  • {@link #VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT} specifies that the buffer can be used to create a {@code VkBufferView} suitable for occupying a {@code VkDescriptorSet} slot of type {@link #VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER}.
  • *
  • {@link #VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT BUFFER_USAGE_UNIFORM_BUFFER_BIT} specifies that the buffer can be used in a {@link VkDescriptorBufferInfo} suitable for occupying a {@code VkDescriptorSet} slot either of type {@link #VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER DESCRIPTOR_TYPE_UNIFORM_BUFFER} or {@link #VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC}.
  • *
  • {@link #VK_BUFFER_USAGE_STORAGE_BUFFER_BIT BUFFER_USAGE_STORAGE_BUFFER_BIT} specifies that the buffer can be used in a {@link VkDescriptorBufferInfo} suitable for occupying a {@code VkDescriptorSet} slot either of type {@link #VK_DESCRIPTOR_TYPE_STORAGE_BUFFER DESCRIPTOR_TYPE_STORAGE_BUFFER} or {@link #VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC}.
  • *
  • {@link #VK_BUFFER_USAGE_INDEX_BUFFER_BIT BUFFER_USAGE_INDEX_BUFFER_BIT} specifies that the buffer is suitable for passing as the {@code buffer} parameter to {@link #vkCmdBindIndexBuffer CmdBindIndexBuffer}.
  • *
  • {@link #VK_BUFFER_USAGE_VERTEX_BUFFER_BIT BUFFER_USAGE_VERTEX_BUFFER_BIT} specifies that the buffer is suitable for passing as an element of the {@code pBuffers} array to {@link #vkCmdBindVertexBuffers CmdBindVertexBuffers}.
  • *
  • {@link #VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT BUFFER_USAGE_INDIRECT_BUFFER_BIT} specifies that the buffer is suitable for passing as the {@code buffer} parameter to {@link #vkCmdDrawIndirect CmdDrawIndirect}, {@link #vkCmdDrawIndexedIndirect CmdDrawIndexedIndirect}, or {@link #vkCmdDispatchIndirect CmdDispatchIndirect}. It is also suitable for passing as the {@code buffer} member of {@link VkIndirectCommandsTokenNVX}, or {@code sequencesCountBuffer} or {@code sequencesIndexBuffer} member of {@link VkCmdProcessCommandsInfoNVX}
  • *
* *
See Also
* *

{@code VkBufferUsageFlags}

*/ public static final int VK_BUFFER_USAGE_TRANSFER_SRC_BIT = 0x1, VK_BUFFER_USAGE_TRANSFER_DST_BIT = 0x2, VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = 0x4, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = 0x8, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = 0x10, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = 0x20, VK_BUFFER_USAGE_INDEX_BUFFER_BIT = 0x40, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = 0x80, VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = 0x100; /** * VkSharingMode - Buffer and image sharing modes * *
Description
* *
    *
  • {@link #VK_SHARING_MODE_EXCLUSIVE SHARING_MODE_EXCLUSIVE} specifies that access to any range or image subresource of the object will be exclusive to a single queue family at a time.
  • *
  • {@link #VK_SHARING_MODE_CONCURRENT SHARING_MODE_CONCURRENT} specifies that concurrent access to any range or image subresource of the object from multiple queue families is supported.
  • *
* *
Note
* *

{@link #VK_SHARING_MODE_CONCURRENT SHARING_MODE_CONCURRENT} may result in lower performance access to the buffer or image than {@link #VK_SHARING_MODE_EXCLUSIVE SHARING_MODE_EXCLUSIVE}.

*
* *

Ranges of buffers and image subresources of image objects created using {@link #VK_SHARING_MODE_EXCLUSIVE SHARING_MODE_EXCLUSIVE} must only be accessed by queues in the same queue family at any given time. In order for a different queue family to be able to interpret the memory contents of a range or image subresource, the application must perform a queue family ownership transfer.

* *

Upon creation, resources using {@link #VK_SHARING_MODE_EXCLUSIVE SHARING_MODE_EXCLUSIVE} are not owned by any queue family. A buffer or image memory barrier is not required to acquire ownership when no queue family owns the resource - it is implicitly acquired upon first use within a queue.

* *
Note
* *

Images still require a layout transition from {@link #VK_IMAGE_LAYOUT_UNDEFINED IMAGE_LAYOUT_UNDEFINED} or {@link #VK_IMAGE_LAYOUT_PREINITIALIZED IMAGE_LAYOUT_PREINITIALIZED} before being used on the first queue.

*
* *

A queue family can take ownership of an image subresource or buffer range of a resource created with {@link #VK_SHARING_MODE_EXCLUSIVE SHARING_MODE_EXCLUSIVE}, without an ownership transfer, in the same way as for a resource that was just created; however, taking ownership in this way has the effect that the contents of the image subresource or buffer range are undefined.

* *

Ranges of buffers and image subresources of image objects created using {@link #VK_SHARING_MODE_CONCURRENT SHARING_MODE_CONCURRENT} must only be accessed by queues from the queue families specified through the {@code queueFamilyIndexCount} and {@code pQueueFamilyIndices} members of the corresponding create info structures.

* *
See Also
* *

{@link VkBufferCreateInfo}, {@link VkImageCreateInfo}, {@link VkSwapchainCreateInfoKHR}

*/ public static final int VK_SHARING_MODE_EXCLUSIVE = 0, VK_SHARING_MODE_CONCURRENT = 1; /** * VkImageLayout - Layout of image and image subresources * *
Description
* *

The type(s) of device access supported by each layout are:

* *
    *
  • {@link #VK_IMAGE_LAYOUT_UNDEFINED IMAGE_LAYOUT_UNDEFINED} does not support device access. This layout must only be used as the {@code initialLayout} member of {@link VkImageCreateInfo} or {@link VkAttachmentDescription}, or as the {@code oldLayout} in an image transition. When transitioning out of this layout, the contents of the memory are not guaranteed to be preserved.
  • *
  • {@link #VK_IMAGE_LAYOUT_PREINITIALIZED IMAGE_LAYOUT_PREINITIALIZED} does not support device access. This layout must only be used as the {@code initialLayout} member of {@link VkImageCreateInfo} or {@link VkAttachmentDescription}, or as the {@code oldLayout} in an image transition. When transitioning out of this layout, the contents of the memory are preserved. This layout is intended to be used as the initial layout for an image whose contents are written by the host, and hence the data can be written to memory immediately, without first executing a layout transition. Currently, {@link #VK_IMAGE_LAYOUT_PREINITIALIZED IMAGE_LAYOUT_PREINITIALIZED} is only useful with {@link #VK_IMAGE_TILING_LINEAR IMAGE_TILING_LINEAR} images because there is not a standard layout defined for {@link #VK_IMAGE_TILING_OPTIMAL IMAGE_TILING_OPTIMAL} images.
  • *
  • {@link #VK_IMAGE_LAYOUT_GENERAL IMAGE_LAYOUT_GENERAL} supports all types of device access.
  • *
  • {@link #VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL} must only be used as a color or resolve attachment in a {@code VkFramebuffer}. This layout is valid only for image subresources of images created with the {@link #VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT IMAGE_USAGE_COLOR_ATTACHMENT_BIT} usage bit enabled.
  • *
  • {@link #VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL} must only be used as a depth/stencil attachment in a {@code VkFramebuffer}. This layout is valid only for image subresources of images created with the {@link #VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT} usage bit enabled.
  • *
  • {@link #VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL} must only be used as a read-only depth/stencil attachment in a {@code VkFramebuffer} and/or as a read-only image in a shader (which can be read as a sampled image, combined image/sampler and/or input attachment). This layout is valid only for image subresources of images created with the {@link #VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT} usage bit enabled. Only image subresources of images created with {@link #VK_IMAGE_USAGE_SAMPLED_BIT IMAGE_USAGE_SAMPLED_BIT} can be used as a sampled image or combined image/sampler in a shader. Similarly, only image subresources of images created with {@link #VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT IMAGE_USAGE_INPUT_ATTACHMENT_BIT} can be used as input attachments.
  • *
  • {@link KHRMaintenance2#VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR}: must only be used as a depth/stencil attachment in a {@code VkFramebuffer}, where the depth aspect is read-only, and/or as a read-only image in a shader (which can be read as a sampled image, combined image/sampler and/or input attachment) where only the depth aspect is accessed. This layout is valid only for image subresources of images created with the {@link #VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT} usage bit enabled. Only image subresources of images created with {@link #VK_IMAGE_USAGE_SAMPLED_BIT IMAGE_USAGE_SAMPLED_BIT} can be used as a sampled image or combined image/sampler in a shader. Similarly, only image subresources of images created with {@link #VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT IMAGE_USAGE_INPUT_ATTACHMENT_BIT} can be used as input attachments.
  • *
  • {@link KHRMaintenance2#VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR}: must only be used as a depth/stencil attachment in a {@code VkFramebuffer}, where the stencil aspect is read-only, and/or as a read-only image in a shader (which can be read as a sampled image, combined image/sampler and/or input attachment) where only the stencil aspect is accessed. This layout is valid only for image subresources of images created with the {@link #VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT} usage bit enabled. Only image subresources of images created with {@link #VK_IMAGE_USAGE_SAMPLED_BIT IMAGE_USAGE_SAMPLED_BIT} can be used as a sampled image or combined image/sampler in a shader. Similarly, only image subresources of images created with {@link #VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT IMAGE_USAGE_INPUT_ATTACHMENT_BIT} can be used as input attachments.
  • *
  • {@link #VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL} must only be used as a read-only image in a shader (which can be read as a sampled image, combined image/sampler and/or input attachment). This layout is valid only for image subresources of images created with the {@link #VK_IMAGE_USAGE_SAMPLED_BIT IMAGE_USAGE_SAMPLED_BIT} or {@link #VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT IMAGE_USAGE_INPUT_ATTACHMENT_BIT} usage bit enabled.
  • *
  • {@link #VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL} must only be used as a source image of a transfer command (see the definition of {@link #VK_PIPELINE_STAGE_TRANSFER_BIT PIPELINE_STAGE_TRANSFER_BIT}). This layout is valid only for image subresources of images created with the {@link #VK_IMAGE_USAGE_TRANSFER_SRC_BIT IMAGE_USAGE_TRANSFER_SRC_BIT} usage bit enabled.
  • *
  • {@link #VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL} must only be used as a destination image of a transfer command. This layout is valid only for image subresources of images created with the {@link #VK_IMAGE_USAGE_TRANSFER_DST_BIT IMAGE_USAGE_TRANSFER_DST_BIT} usage bit enabled.
  • *
  • {@link KHRSwapchain#VK_IMAGE_LAYOUT_PRESENT_SRC_KHR IMAGE_LAYOUT_PRESENT_SRC_KHR} must only be used for presenting a presentable image for display. A swapchain’s image must be transitioned to this layout before calling {@link KHRSwapchain#vkQueuePresentKHR QueuePresentKHR}, and must be transitioned away from this layout after calling {@link KHRSwapchain#vkAcquireNextImageKHR AcquireNextImageKHR}.
  • *
  • {@link KHRSharedPresentableImage#VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR IMAGE_LAYOUT_SHARED_PRESENT_KHR} is valid only for shared presentable images, and must be used for any usage the image supports.
  • *
* *

For each mechanism of accessing an image in the API, there is a parameter or structure member that controls the image layout used to access the image. For transfer commands, this is a parameter to the command (see the “Clear Commands” section and the “Copy Commands” section). For use as a framebuffer attachment, this is a member in the substructures of the {@link VkRenderPassCreateInfo} (see Render Pass). For use in a descriptor set, this is a member in the {@link VkDescriptorImageInfo} structure (see the “Descriptor Set Updates” section). At the time that any command buffer command accessing an image executes on any queue, the layouts of the image subresources that are accessed must all match the layout specified via the API controlling those accesses.

* *

The image layout of each image subresource must be well-defined at each point in the image subresource's lifetime. This means that when performing a layout transition on the image subresource, the old layout value must either equal the current layout of the image subresource (at the time the transition executes), or else be {@link #VK_IMAGE_LAYOUT_UNDEFINED IMAGE_LAYOUT_UNDEFINED} (implying that the contents of the image subresource need not be preserved). The new layout used in a transition must not be {@link #VK_IMAGE_LAYOUT_UNDEFINED IMAGE_LAYOUT_UNDEFINED} or {@link #VK_IMAGE_LAYOUT_PREINITIALIZED IMAGE_LAYOUT_PREINITIALIZED}.

* *

The image layout of each image subresource of a depth/stencil image created with {@link EXTSampleLocations#VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT} is dependent on the last sample locations used to render to the image subresource as a depth/stencil attachment, thus applications must provide the same sample locations that were last used to render to the given image subresource whenever a layout transition of the image subresource happens, otherwise the contents of the depth aspect of the image subresource become undefined.

* *

In addition, depth reads from a depth/stencil attachment referring to an image subresource range of a depth/stencil image created with {@link EXTSampleLocations#VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT} using different sample locations than what have been last used to perform depth writes to the image subresources of the same image subresource range produce undefined results.

* *

Similarly, depth writes to a depth/stencil attachment referring to an image subresource range of a depth/stencil image created with {@link EXTSampleLocations#VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT} using different sample locations than what have been last used to perform depth writes to the image subresources of the same image subresource range make the contents of the depth aspect of those image subresources undefined.

* *
See Also
* *

{@link VkAttachmentDescription}, {@link VkAttachmentReference}, {@link VkDescriptorImageInfo}, {@link VkImageCreateInfo}, {@link VkImageMemoryBarrier}, {@link #vkCmdBlitImage CmdBlitImage}, {@link #vkCmdClearColorImage CmdClearColorImage}, {@link #vkCmdClearDepthStencilImage CmdClearDepthStencilImage}, {@link #vkCmdCopyBufferToImage CmdCopyBufferToImage}, {@link #vkCmdCopyImage CmdCopyImage}, {@link #vkCmdCopyImageToBuffer CmdCopyImageToBuffer}, {@link #vkCmdResolveImage CmdResolveImage}

*/ public static final int VK_IMAGE_LAYOUT_UNDEFINED = 0, VK_IMAGE_LAYOUT_GENERAL = 1, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL = 2, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL = 3, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL = 4, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL = 5, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL = 6, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL = 7, VK_IMAGE_LAYOUT_PREINITIALIZED = 8; /** * VkImageViewType - Image view types * *
Description
* *

The exact image view type is partially implicit, based on the image's type and sample count, as well as the view creation parameters as described in the image view compatibility table for {@link #vkCreateImageView CreateImageView}. This table also shows which SPIR-V {@code OpTypeImage} {@code Dim} and {@code Arrayed} parameters correspond to each image view type.

* *
See Also
* *

{@link VkImageViewCreateInfo}

* *
Enum values:
* *
    *
  • {@link #VK_IMAGE_VIEW_TYPE_1D IMAGE_VIEW_TYPE_1D}
  • *
  • {@link #VK_IMAGE_VIEW_TYPE_2D IMAGE_VIEW_TYPE_2D}
  • *
  • {@link #VK_IMAGE_VIEW_TYPE_3D IMAGE_VIEW_TYPE_3D}
  • *
  • {@link #VK_IMAGE_VIEW_TYPE_CUBE IMAGE_VIEW_TYPE_CUBE}
  • *
  • {@link #VK_IMAGE_VIEW_TYPE_1D_ARRAY IMAGE_VIEW_TYPE_1D_ARRAY}
  • *
  • {@link #VK_IMAGE_VIEW_TYPE_2D_ARRAY IMAGE_VIEW_TYPE_2D_ARRAY}
  • *
  • {@link #VK_IMAGE_VIEW_TYPE_CUBE_ARRAY IMAGE_VIEW_TYPE_CUBE_ARRAY}
  • *
*/ public static final int VK_IMAGE_VIEW_TYPE_1D = 0, VK_IMAGE_VIEW_TYPE_2D = 1, VK_IMAGE_VIEW_TYPE_3D = 2, VK_IMAGE_VIEW_TYPE_CUBE = 3, VK_IMAGE_VIEW_TYPE_1D_ARRAY = 4, VK_IMAGE_VIEW_TYPE_2D_ARRAY = 5, VK_IMAGE_VIEW_TYPE_CUBE_ARRAY = 6; /** * VkComponentSwizzle - Specify how a component is swizzled * *
Description
* *
    *
  • {@link #VK_COMPONENT_SWIZZLE_IDENTITY COMPONENT_SWIZZLE_IDENTITY} specifies that the component is set to the identity swizzle.
  • *
  • {@link #VK_COMPONENT_SWIZZLE_ZERO COMPONENT_SWIZZLE_ZERO} specifies that the component is set to zero.
  • *
  • {@link #VK_COMPONENT_SWIZZLE_ONE COMPONENT_SWIZZLE_ONE} specifies that the component is set to either 1 or 1.0, depending on whether the type of the image view format is integer or floating-point respectively, as determined by the Format Definition section for each {@code VkFormat}.
  • *
  • {@link #VK_COMPONENT_SWIZZLE_R COMPONENT_SWIZZLE_R} specifies that the component is set to the value of the R component of the image.
  • *
  • {@link #VK_COMPONENT_SWIZZLE_G COMPONENT_SWIZZLE_G} specifies that the component is set to the value of the G component of the image.
  • *
  • {@link #VK_COMPONENT_SWIZZLE_B COMPONENT_SWIZZLE_B} specifies that the component is set to the value of the B component of the image.
  • *
  • {@link #VK_COMPONENT_SWIZZLE_A COMPONENT_SWIZZLE_A} specifies that the component is set to the value of the A component of the image.
  • *
* *

Setting the identity swizzle on a component is equivalent to setting the identity mapping on that component. That is:

* *
Component Mappings Equivalent To ename:VK_COMPONENT_SWIZZLE_IDENTITY
* * * * * * * * * *
ComponentIdentity Mapping
{@code components.r}{@link #VK_COMPONENT_SWIZZLE_R COMPONENT_SWIZZLE_R}
{@code components.g}{@link #VK_COMPONENT_SWIZZLE_G COMPONENT_SWIZZLE_G}
{@code components.b}{@link #VK_COMPONENT_SWIZZLE_B COMPONENT_SWIZZLE_B}
{@code components.a}{@link #VK_COMPONENT_SWIZZLE_A COMPONENT_SWIZZLE_A}
* *
See Also
* *

{@link VkComponentMapping}

*/ public static final int VK_COMPONENT_SWIZZLE_IDENTITY = 0, VK_COMPONENT_SWIZZLE_ZERO = 1, VK_COMPONENT_SWIZZLE_ONE = 2, VK_COMPONENT_SWIZZLE_R = 3, VK_COMPONENT_SWIZZLE_G = 4, VK_COMPONENT_SWIZZLE_B = 5, VK_COMPONENT_SWIZZLE_A = 6; /** * VkPipelineCreateFlagBits - Bitmask controlling how a pipeline is created * *
Description
* *
    *
  • {@link #VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT} specifies that the created pipeline will not be optimized. Using this flag may reduce the time taken to create the pipeline.
  • *
  • {@link #VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT} specifies that the pipeline to be created is allowed to be the parent of a pipeline that will be created in a subsequent call to {@link #vkCreateGraphicsPipelines CreateGraphicsPipelines} or {@link #vkCreateComputePipelines CreateComputePipelines}.
  • *
  • {@link #VK_PIPELINE_CREATE_DERIVATIVE_BIT PIPELINE_CREATE_DERIVATIVE_BIT} specifies that the pipeline to be created will be a child of a previously created parent pipeline.
  • *
  • {@link KHXDeviceGroup#VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHX PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHX} specifies that any shader input variables decorated as {@code DeviceIndex} will be assigned values as if they were decorated as {@code ViewIndex}.
  • *
  • {@link KHXDeviceGroup#VK_PIPELINE_CREATE_DISPATCH_BASE_KHX PIPELINE_CREATE_DISPATCH_BASE_KHX} specifies that a compute pipeline can be used with {@link KHXDeviceGroup#vkCmdDispatchBaseKHX CmdDispatchBaseKHX} with a non-zero base workgroup.
  • *
* *

It is valid to set both {@link #VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT} and {@link #VK_PIPELINE_CREATE_DERIVATIVE_BIT PIPELINE_CREATE_DERIVATIVE_BIT}. This allows a pipeline to be both a parent and possibly a child in a pipeline hierarchy. See Pipeline Derivatives for more information.

* *
See Also
* *

{@code VkPipelineCreateFlags}

*/ public static final int VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x1, VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = 0x2, VK_PIPELINE_CREATE_DERIVATIVE_BIT = 0x4; /** * VkShaderStageFlagBits - Bitmask specifying a pipeline stage * *
Description
* *
    *
  • {@link #VK_SHADER_STAGE_VERTEX_BIT SHADER_STAGE_VERTEX_BIT} specifies the vertex stage.
  • *
  • {@link #VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT SHADER_STAGE_TESSELLATION_CONTROL_BIT} specifies the tessellation control stage.
  • *
  • {@link #VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT SHADER_STAGE_TESSELLATION_EVALUATION_BIT} specifies the tessellation evaluation stage.
  • *
  • {@link #VK_SHADER_STAGE_GEOMETRY_BIT SHADER_STAGE_GEOMETRY_BIT} specifies the geometry stage.
  • *
  • {@link #VK_SHADER_STAGE_FRAGMENT_BIT SHADER_STAGE_FRAGMENT_BIT} specifies the fragment stage.
  • *
  • {@link #VK_SHADER_STAGE_COMPUTE_BIT SHADER_STAGE_COMPUTE_BIT} specifies the compute stage.
  • *
  • {@link #VK_SHADER_STAGE_ALL_GRAPHICS SHADER_STAGE_ALL_GRAPHICS} is a combination of bits used as shorthand to specify all graphics stages defined above (excluding the compute stage).
  • *
  • {@link #VK_SHADER_STAGE_ALL SHADER_STAGE_ALL} is a combination of bits used as shorthand to specify all shader stages supported by the device, including all additional stages which are introduced by extensions.
  • *
* *
See Also
* *

{@link VkPipelineShaderStageCreateInfo}, {@code VkShaderStageFlags}, {@link AMDShaderInfo#vkGetShaderInfoAMD GetShaderInfoAMD}

*/ public static final int VK_SHADER_STAGE_VERTEX_BIT = 0x1, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT = 0x2, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT = 0x4, VK_SHADER_STAGE_GEOMETRY_BIT = 0x8, VK_SHADER_STAGE_FRAGMENT_BIT = 0x10, VK_SHADER_STAGE_COMPUTE_BIT = 0x20, VK_SHADER_STAGE_ALL_GRAPHICS = 0x0000001F, VK_SHADER_STAGE_ALL = 0x7FFFFFFF; /** * VkVertexInputRate - Specify rate at which vertex attributes are pulled from buffers * *
Description
* *
    *
  • {@link #VK_VERTEX_INPUT_RATE_VERTEX VERTEX_INPUT_RATE_VERTEX} specifies that vertex attribute addressing is a function of the vertex index.
  • *
  • {@link #VK_VERTEX_INPUT_RATE_INSTANCE VERTEX_INPUT_RATE_INSTANCE} specifies that vertex attribute addressing is a function of the instance index.
  • *
* *
See Also
* *

{@link VkVertexInputBindingDescription}

*/ public static final int VK_VERTEX_INPUT_RATE_VERTEX = 0, VK_VERTEX_INPUT_RATE_INSTANCE = 1; /** * VkPrimitiveTopology - Supported primitive topologies * *
See Also
* *

{@link VkPipelineInputAssemblyStateCreateInfo}

* *
Enum values:
* *
    *
  • {@link #VK_PRIMITIVE_TOPOLOGY_POINT_LIST PRIMITIVE_TOPOLOGY_POINT_LIST}
  • *
  • {@link #VK_PRIMITIVE_TOPOLOGY_LINE_LIST PRIMITIVE_TOPOLOGY_LINE_LIST}
  • *
  • {@link #VK_PRIMITIVE_TOPOLOGY_LINE_STRIP PRIMITIVE_TOPOLOGY_LINE_STRIP}
  • *
  • {@link #VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST PRIMITIVE_TOPOLOGY_TRIANGLE_LIST}
  • *
  • {@link #VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP}
  • *
  • {@link #VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN PRIMITIVE_TOPOLOGY_TRIANGLE_FAN}
  • *
  • {@link #VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY}
  • *
  • {@link #VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY}
  • *
  • {@link #VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY}
  • *
  • {@link #VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY}
  • *
  • {@link #VK_PRIMITIVE_TOPOLOGY_PATCH_LIST PRIMITIVE_TOPOLOGY_PATCH_LIST}
  • *
*/ public static final int VK_PRIMITIVE_TOPOLOGY_POINT_LIST = 0, VK_PRIMITIVE_TOPOLOGY_LINE_LIST = 1, VK_PRIMITIVE_TOPOLOGY_LINE_STRIP = 2, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST = 3, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP = 4, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN = 5, VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY = 6, VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY = 7, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY = 8, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY = 9, VK_PRIMITIVE_TOPOLOGY_PATCH_LIST = 10; /** * VkPolygonMode - Control polygon rasterization mode * *
Description
* *
    *
  • {@link #VK_POLYGON_MODE_POINT POLYGON_MODE_POINT} specifies that polygon vertices are drawn as points.
  • *
  • {@link #VK_POLYGON_MODE_LINE POLYGON_MODE_LINE} specifies that polygon edges are drawn as line segments.
  • *
  • {@link #VK_POLYGON_MODE_FILL POLYGON_MODE_FILL} specifies that polygons are rendered using the polygon rasterization rules in this section.
  • *
  • {@link NVFillRectangle#VK_POLYGON_MODE_FILL_RECTANGLE_NV POLYGON_MODE_FILL_RECTANGLE_NV} specifies that polygons are rendered using polygon rasterization rules, modified to consider a sample within the primitive if the sample location is inside the axis-aligned bounding box of the triangle after projection. Note that the barycentric weights used in attribute interpolation can extend outside the range [0,1] when these primitives are shaded. Special treatment is given to a sample position on the boundary edge of the bounding box. In such a case, if two rectangles lie on either side of a common edge (with identical endpoints) on which a sample position lies, then exactly one of the triangles must produce a fragment that covers that sample during rasterization. * Polygons rendered in {@link NVFillRectangle#VK_POLYGON_MODE_FILL_RECTANGLE_NV POLYGON_MODE_FILL_RECTANGLE_NV} mode may be clipped by the frustum or by user clip planes. If clipping is applied, the triangle is culled rather than clipped. * *

    Area calculation and facingness are determined for {@link NVFillRectangle#VK_POLYGON_MODE_FILL_RECTANGLE_NV POLYGON_MODE_FILL_RECTANGLE_NV} mode using the triangle's vertices.

    *
  • *
* *

These modes affect only the final rasterization of polygons: in particular, a polygon's vertices are shaded and the polygon is clipped and possibly culled before these modes are applied.

* *
See Also
* *

{@link VkPipelineRasterizationStateCreateInfo}

*/ public static final int VK_POLYGON_MODE_FILL = 0, VK_POLYGON_MODE_LINE = 1, VK_POLYGON_MODE_POINT = 2; /** * VkCullModeFlagBits - Bitmask controlling triangle culling * *
Description
* *
    *
  • {@link #VK_CULL_MODE_NONE CULL_MODE_NONE} specifies that no triangles are discarded
  • *
  • {@link #VK_CULL_MODE_FRONT_BIT CULL_MODE_FRONT_BIT} specifies that front-facing triangles are discarded
  • *
  • {@link #VK_CULL_MODE_BACK_BIT CULL_MODE_BACK_BIT} specifies that back-facing triangles are discarded
  • *
  • {@link #VK_CULL_MODE_FRONT_AND_BACK CULL_MODE_FRONT_AND_BACK} specifies that all triangles are discarded.
  • *
* *

Following culling, fragments are produced for any triangles which have not been discarded.

* *
See Also
* *

{@code VkCullModeFlags}

*/ public static final int VK_CULL_MODE_NONE = 0, VK_CULL_MODE_FRONT_BIT = 0x1, VK_CULL_MODE_BACK_BIT = 0x2, VK_CULL_MODE_FRONT_AND_BACK = 0x00000003; /** * VkFrontFace - Interpret polygon front-facing orientation * *
Description
* *
    *
  • {@link #VK_FRONT_FACE_COUNTER_CLOCKWISE FRONT_FACE_COUNTER_CLOCKWISE} specifies that a triangle with positive area is considered front-facing.
  • *
  • {@link #VK_FRONT_FACE_CLOCKWISE FRONT_FACE_CLOCKWISE} specifies that a triangle with negative area is considered front-facing.
  • *
* *

Any triangle which is not front-facing is back-facing, including zero-area triangles.

* *
See Also
* *

{@link VkPipelineRasterizationStateCreateInfo}

*/ public static final int VK_FRONT_FACE_COUNTER_CLOCKWISE = 0, VK_FRONT_FACE_CLOCKWISE = 1; /** * VkCompareOp - Stencil comparison function * *
Description
* *
    *
  • {@link #VK_COMPARE_OP_NEVER COMPARE_OP_NEVER} specifies that the test never passes.
  • *
  • {@link #VK_COMPARE_OP_LESS COMPARE_OP_LESS} specifies that the test passes when R < S.
  • *
  • {@link #VK_COMPARE_OP_EQUAL COMPARE_OP_EQUAL} specifies that the test passes when R = S.
  • *
  • {@link #VK_COMPARE_OP_LESS_OR_EQUAL COMPARE_OP_LESS_OR_EQUAL} specifies that the test passes when R ≤ S.
  • *
  • {@link #VK_COMPARE_OP_GREATER COMPARE_OP_GREATER} specifies that the test passes when R > S.
  • *
  • {@link #VK_COMPARE_OP_NOT_EQUAL COMPARE_OP_NOT_EQUAL} specifies that the test passes when R ≠ S.
  • *
  • {@link #VK_COMPARE_OP_GREATER_OR_EQUAL COMPARE_OP_GREATER_OR_EQUAL} specifies that the test passes when R ≥ S.
  • *
  • {@link #VK_COMPARE_OP_ALWAYS COMPARE_OP_ALWAYS} specifies that the test always passes.
  • *
* *
See Also
* *

{@link VkPipelineDepthStencilStateCreateInfo}, {@link VkSamplerCreateInfo}, {@link VkStencilOpState}

*/ public static final int VK_COMPARE_OP_NEVER = 0, VK_COMPARE_OP_LESS = 1, VK_COMPARE_OP_EQUAL = 2, VK_COMPARE_OP_LESS_OR_EQUAL = 3, VK_COMPARE_OP_GREATER = 4, VK_COMPARE_OP_NOT_EQUAL = 5, VK_COMPARE_OP_GREATER_OR_EQUAL = 6, VK_COMPARE_OP_ALWAYS = 7; /** * VkStencilOp - Stencil comparison function * *
Description
* *
    *
  • {@link #VK_STENCIL_OP_KEEP STENCIL_OP_KEEP} keeps the current value.
  • *
  • {@link #VK_STENCIL_OP_ZERO STENCIL_OP_ZERO} sets the value to 0.
  • *
  • {@link #VK_STENCIL_OP_REPLACE STENCIL_OP_REPLACE} sets the value to {@code reference}.
  • *
  • {@link #VK_STENCIL_OP_INCREMENT_AND_CLAMP STENCIL_OP_INCREMENT_AND_CLAMP} increments the current value and clamps to the maximum representable unsigned value.
  • *
  • {@link #VK_STENCIL_OP_DECREMENT_AND_CLAMP STENCIL_OP_DECREMENT_AND_CLAMP} decrements the current value and clamps to 0.
  • *
  • {@link #VK_STENCIL_OP_INVERT STENCIL_OP_INVERT} bitwise-inverts the current value.
  • *
  • {@link #VK_STENCIL_OP_INCREMENT_AND_WRAP STENCIL_OP_INCREMENT_AND_WRAP} increments the current value and wraps to 0 when the maximum value would have been exceeded.
  • *
  • {@link #VK_STENCIL_OP_DECREMENT_AND_WRAP STENCIL_OP_DECREMENT_AND_WRAP} decrements the current value and wraps to the maximum possible value when the value would go below 0.
  • *
* *

For purposes of increment and decrement, the stencil bits are considered as an unsigned integer.

* *

If the stencil test fails, the sample's coverage bit is cleared in the fragment. If there is no stencil framebuffer attachment, stencil modification cannot occur, and it is as if the stencil tests always pass.

* *

If the stencil test passes, the {@code writeMask} member of the {@link VkStencilOpState} structures controls how the updated stencil value is written to the stencil framebuffer attachment.

* *

The least significant s bits of {@code writeMask}, where s is the number of bits in the stencil framebuffer attachment, specify an integer mask. Where a 1 appears in this mask, the corresponding bit in the stencil value in the depth/stencil attachment is written; where a 0 appears, the bit is not written. The {@code writeMask} value uses either the front-facing or back-facing state based on the facingness of the fragment. Fragments generated by front-facing primitives use the front mask and fragments generated by back-facing primitives use the back mask.

* *
See Also
* *

{@link VkStencilOpState}

*/ public static final int VK_STENCIL_OP_KEEP = 0, VK_STENCIL_OP_ZERO = 1, VK_STENCIL_OP_REPLACE = 2, VK_STENCIL_OP_INCREMENT_AND_CLAMP = 3, VK_STENCIL_OP_DECREMENT_AND_CLAMP = 4, VK_STENCIL_OP_INVERT = 5, VK_STENCIL_OP_INCREMENT_AND_WRAP = 6, VK_STENCIL_OP_DECREMENT_AND_WRAP = 7; /** * VkLogicOp - Framebuffer logical operations * *
Description
* *

The logical operations supported by Vulkan are summarized in the following table in which

* *
    *
  • ¬ is bitwise invert,
  • *
  • is bitwise and,
  • *
  • is bitwise or,
  • *
  • is bitwise exclusive or,
  • *
  • s is the fragment’s Rs0, Gs0, Bs0 or As0 component value for the fragment output corresponding to the color attachment being updated, and
  • *
  • d is the color attachment’s R, G, B or A component value:
  • *
* *
Logical Operations
* * * * * * * * * * * * * * * * * * * * * *
ModeOperation
{@link #VK_LOGIC_OP_CLEAR LOGIC_OP_CLEAR}0
{@link #VK_LOGIC_OP_AND LOGIC_OP_AND}s ∧ d
{@link #VK_LOGIC_OP_AND_REVERSE LOGIC_OP_AND_REVERSE}s ∧ ¬ d
{@link #VK_LOGIC_OP_COPY LOGIC_OP_COPY}s
{@link #VK_LOGIC_OP_AND_INVERTED LOGIC_OP_AND_INVERTED}¬ s ∧ d
{@link #VK_LOGIC_OP_NO_OP LOGIC_OP_NO_OP}d
{@link #VK_LOGIC_OP_XOR LOGIC_OP_XOR}s ⊕ d
{@link #VK_LOGIC_OP_OR LOGIC_OP_OR}s ∨ d
{@link #VK_LOGIC_OP_NOR LOGIC_OP_NOR}¬ (s ∨ d)
{@link #VK_LOGIC_OP_EQUIVALENT LOGIC_OP_EQUIVALENT}¬ (s ⊕ d)
{@link #VK_LOGIC_OP_INVERT LOGIC_OP_INVERT}¬ d
{@link #VK_LOGIC_OP_OR_REVERSE LOGIC_OP_OR_REVERSE}s ∨ ¬ d
{@link #VK_LOGIC_OP_COPY_INVERTED LOGIC_OP_COPY_INVERTED}¬ s
{@link #VK_LOGIC_OP_OR_INVERTED LOGIC_OP_OR_INVERTED}¬ s ∨ d
{@link #VK_LOGIC_OP_NAND LOGIC_OP_NAND}¬ (s ∧ d)
{@link #VK_LOGIC_OP_SET LOGIC_OP_SET}all 1s
* *

The result of the logical operation is then written to the color attachment as controlled by the component write mask, described in Blend Operations.

* *
See Also
* *

{@link VkPipelineColorBlendStateCreateInfo}

*/ public static final int VK_LOGIC_OP_CLEAR = 0, VK_LOGIC_OP_AND = 1, VK_LOGIC_OP_AND_REVERSE = 2, VK_LOGIC_OP_COPY = 3, VK_LOGIC_OP_AND_INVERTED = 4, VK_LOGIC_OP_NO_OP = 5, VK_LOGIC_OP_XOR = 6, VK_LOGIC_OP_OR = 7, VK_LOGIC_OP_NOR = 8, VK_LOGIC_OP_EQUIVALENT = 9, VK_LOGIC_OP_INVERT = 10, VK_LOGIC_OP_OR_REVERSE = 11, VK_LOGIC_OP_COPY_INVERTED = 12, VK_LOGIC_OP_OR_INVERTED = 13, VK_LOGIC_OP_NAND = 14, VK_LOGIC_OP_SET = 15; /** * VkBlendFactor - Framebuffer blending factors * *
Description
* *

The semantics of each enum value is described in the table below:

* *
Blend Factors
* * * * * * * * * * * * * * * * * * * * * * * * *
VkBlendFactorRGB Blend Factors (Sr,Sg,Sb) or (Dr,Dg,Db)Alpha Blend Factor (Sa or Da)
{@link #VK_BLEND_FACTOR_ZERO BLEND_FACTOR_ZERO}(0,0,0)0
{@link #VK_BLEND_FACTOR_ONE BLEND_FACTOR_ONE}(1,1,1)1
{@link #VK_BLEND_FACTOR_SRC_COLOR BLEND_FACTOR_SRC_COLOR}(Rs0,Gs0,Bs0)As0
{@link #VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR BLEND_FACTOR_ONE_MINUS_SRC_COLOR}(1-Rs0,1-Gs0,1-Bs0)1-As0
{@link #VK_BLEND_FACTOR_DST_COLOR BLEND_FACTOR_DST_COLOR}(Rd,Gd,Bd)Ad
{@link #VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR BLEND_FACTOR_ONE_MINUS_DST_COLOR}(1-Rd,1-Gd,1-Bd)1-Ad
{@link #VK_BLEND_FACTOR_SRC_ALPHA BLEND_FACTOR_SRC_ALPHA}(As0,As0,As0)As0
{@link #VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA BLEND_FACTOR_ONE_MINUS_SRC_ALPHA}(1-As0,1-As0,1-As0)1-As0
{@link #VK_BLEND_FACTOR_DST_ALPHA BLEND_FACTOR_DST_ALPHA}(Ad,Ad,Ad)Ad
{@link #VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA BLEND_FACTOR_ONE_MINUS_DST_ALPHA}(1-Ad,1-Ad,1-Ad)1-Ad
{@link #VK_BLEND_FACTOR_CONSTANT_COLOR BLEND_FACTOR_CONSTANT_COLOR}(Rc,Gc,Bc)Ac
{@link #VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR}(1-Rc,1-Gc,1-Bc)1-Ac
{@link #VK_BLEND_FACTOR_CONSTANT_ALPHA BLEND_FACTOR_CONSTANT_ALPHA}(Ac,Ac,Ac)Ac
{@link #VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA}(1-Ac,1-Ac,1-Ac)1-Ac
{@link #VK_BLEND_FACTOR_SRC_ALPHA_SATURATE BLEND_FACTOR_SRC_ALPHA_SATURATE}(f,f,f); f = min(As0,1-Ad)1
{@link #VK_BLEND_FACTOR_SRC1_COLOR BLEND_FACTOR_SRC1_COLOR}(Rs1,Gs1,Bs1)As1
{@link #VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR BLEND_FACTOR_ONE_MINUS_SRC1_COLOR}(1-Rs1,1-Gs1,1-Bs1)1-As1
{@link #VK_BLEND_FACTOR_SRC1_ALPHA BLEND_FACTOR_SRC1_ALPHA}(As1,As1,As1)As1
{@link #VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA}(1-As1,1-As1,1-As1)1-As1
* *

In this table, the following conventions are used:

* *
    *
  • Rs0,Gs0,Bs0 and As0 represent the first source color R, G, B, and A components, respectively, for the fragment output location corresponding to the color attachment being blended.
  • *
  • Rs1,Gs1,Bs1 and As1 represent the second source color R, G, B, and A components, respectively, used in dual source blending modes, for the fragment output location corresponding to the color attachment being blended.
  • *
  • Rd,Gd,Bd and Ad represent the R, G, B, and A components of the destination color. That is, the color currently in the corresponding color attachment for this fragment/sample.
  • *
  • Rc,Gc,Bc and Ac represent the blend constant R, G, B, and A components, respectively.
  • *
* *
See Also
* *

{@link VkPipelineColorBlendAttachmentState}

*/ public static final int VK_BLEND_FACTOR_ZERO = 0, VK_BLEND_FACTOR_ONE = 1, VK_BLEND_FACTOR_SRC_COLOR = 2, VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR = 3, VK_BLEND_FACTOR_DST_COLOR = 4, VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR = 5, VK_BLEND_FACTOR_SRC_ALPHA = 6, VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA = 7, VK_BLEND_FACTOR_DST_ALPHA = 8, VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA = 9, VK_BLEND_FACTOR_CONSTANT_COLOR = 10, VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR = 11, VK_BLEND_FACTOR_CONSTANT_ALPHA = 12, VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA = 13, VK_BLEND_FACTOR_SRC_ALPHA_SATURATE = 14, VK_BLEND_FACTOR_SRC1_COLOR = 15, VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR = 16, VK_BLEND_FACTOR_SRC1_ALPHA = 17, VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA = 18; /** * VkBlendOp - Framebuffer blending operations * *
Description
* *

The semantics of each basic blend operations is described in the table below:

* *
Basic Blend Operations
* * * * * * * * * * *
VkBlendOpRGB ComponentsAlpha Component
{@link #VK_BLEND_OP_ADD BLEND_OP_ADD}R = Rs0 × Sr + Rd × Dr G = Gs0 × Sg + Gd × Dg B = Bs0 × Sb + Bd × DbA = As0 × Sa + Ad × Da
{@link #VK_BLEND_OP_SUBTRACT BLEND_OP_SUBTRACT}R = Rs0 × Sr - Rd × Dr G = Gs0 × Sg - Gd × Dg B = Bs0 × Sb - Bd × DbA = As0 × Sa - Ad × Da
{@link #VK_BLEND_OP_REVERSE_SUBTRACT BLEND_OP_REVERSE_SUBTRACT}R = Rd × Dr - Rs0 × Sr G = Gd × Dg - Gs0 × Sg B = Bd × Db - Bs0 × SbA = Ad × Da - As0 × Sa
{@link #VK_BLEND_OP_MIN BLEND_OP_MIN}R = min(Rs0,Rd) G = min(Gs0,Gd) B = min(Bs0,Bd)A = min(As0,Ad)
{@link #VK_BLEND_OP_MAX BLEND_OP_MAX}R = max(Rs0,Rd) G = max(Gs0,Gd) B = max(Bs0,Bd)A = max(As0,Ad)
* *

In this table, the following conventions are used:

* *
    *
  • Rs0, Gs0, Bs0 and As0 represent the first source color R, G, B, and A components, respectively.
  • *
  • Rd, Gd, Bd and Ad represent the R, G, B, and A components of the destination color. That is, the color currently in the corresponding color attachment for this fragment/sample.
  • *
  • Sr, Sg, Sb and Sa represent the source blend factor R, G, B, and A components, respectively.
  • *
  • Dr, Dg, Db and Da represent the destination blend factor R, G, B, and A components, respectively.
  • *
* *

The blending operation produces a new set of values R, G, B and A, which are written to the framebuffer attachment. If blending is not enabled for this attachment, then R, G, B and A are assigned Rs0, Gs0, Bs0 and As0, respectively.

* *

If the color attachment is fixed-point, the components of the source and destination values and blend factors are each clamped to [0,1] or [-1,1] respectively for an unsigned normalized or signed normalized color attachment prior to evaluating the blend operations. If the color attachment is floating-point, no clamping occurs.

* *
See Also
* *

{@link VkPipelineColorBlendAttachmentState}

*/ public static final int VK_BLEND_OP_ADD = 0, VK_BLEND_OP_SUBTRACT = 1, VK_BLEND_OP_REVERSE_SUBTRACT = 2, VK_BLEND_OP_MIN = 3, VK_BLEND_OP_MAX = 4; /** * VkColorComponentFlagBits - Bitmask controlling which components are written to the framebuffer * *
Description
* *
    *
  • {@link #VK_COLOR_COMPONENT_R_BIT COLOR_COMPONENT_R_BIT} specifies that the R value is written to the color attachment for the appropriate sample. Otherwise, the value in memory is unmodified.
  • *
  • {@link #VK_COLOR_COMPONENT_G_BIT COLOR_COMPONENT_G_BIT} specifies that the G value is written to the color attachment for the appropriate sample. Otherwise, the value in memory is unmodified.
  • *
  • {@link #VK_COLOR_COMPONENT_B_BIT COLOR_COMPONENT_B_BIT} specifies that the B value is written to the color attachment for the appropriate sample. Otherwise, the value in memory is unmodified.
  • *
  • {@link #VK_COLOR_COMPONENT_A_BIT COLOR_COMPONENT_A_BIT} specifies that the A value is written to the color attachment for the appropriate sample. Otherwise, the value in memory is unmodified.
  • *
* *

The color write mask operation is applied regardless of whether blending is enabled.

* *
See Also
* *

{@code VkColorComponentFlags}

*/ public static final int VK_COLOR_COMPONENT_R_BIT = 0x1, VK_COLOR_COMPONENT_G_BIT = 0x2, VK_COLOR_COMPONENT_B_BIT = 0x4, VK_COLOR_COMPONENT_A_BIT = 0x8; /** * VkDynamicState - Indicate which dynamic state is taken from dynamic state commands * *
Description
* *
    *
  • {@link #VK_DYNAMIC_STATE_VIEWPORT DYNAMIC_STATE_VIEWPORT} specifies that the {@code pViewports} state in {@link VkPipelineViewportStateCreateInfo} will be ignored and must be set dynamically with {@link #vkCmdSetViewport CmdSetViewport} before any draw commands. The number of viewports used by a pipeline is still specified by the {@code viewportCount} member of {@link VkPipelineViewportStateCreateInfo}.
  • *
  • {@link #VK_DYNAMIC_STATE_SCISSOR DYNAMIC_STATE_SCISSOR} specifies that the {@code pScissors} state in {@link VkPipelineViewportStateCreateInfo} will be ignored and must be set dynamically with {@link #vkCmdSetScissor CmdSetScissor} before any draw commands. The number of scissor rectangles used by a pipeline is still specified by the {@code scissorCount} member of {@link VkPipelineViewportStateCreateInfo}.
  • *
  • {@link #VK_DYNAMIC_STATE_LINE_WIDTH DYNAMIC_STATE_LINE_WIDTH} specifies that the {@code lineWidth} state in {@link VkPipelineRasterizationStateCreateInfo} will be ignored and must be set dynamically with {@link #vkCmdSetLineWidth CmdSetLineWidth} before any draw commands that generate line primitives for the rasterizer.
  • *
  • {@link #VK_DYNAMIC_STATE_DEPTH_BIAS DYNAMIC_STATE_DEPTH_BIAS} specifies that the {@code depthBiasConstantFactor}, {@code depthBiasClamp} and {@code depthBiasSlopeFactor} states in {@link VkPipelineRasterizationStateCreateInfo} will be ignored and must be set dynamically with {@link #vkCmdSetDepthBias CmdSetDepthBias} before any draws are performed with {@code depthBiasEnable} in {@link VkPipelineRasterizationStateCreateInfo} set to {@link #VK_TRUE TRUE}.
  • *
  • {@link #VK_DYNAMIC_STATE_BLEND_CONSTANTS DYNAMIC_STATE_BLEND_CONSTANTS} specifies that the {@code blendConstants} state in {@link VkPipelineColorBlendStateCreateInfo} will be ignored and must be set dynamically with {@link #vkCmdSetBlendConstants CmdSetBlendConstants} before any draws are performed with a pipeline state with {@link VkPipelineColorBlendAttachmentState} member {@code blendEnable} set to {@link #VK_TRUE TRUE} and any of the blend functions using a constant blend color.
  • *
  • {@link #VK_DYNAMIC_STATE_DEPTH_BOUNDS DYNAMIC_STATE_DEPTH_BOUNDS} specifies that the {@code minDepthBounds} and {@code maxDepthBounds} states of {@link VkPipelineDepthStencilStateCreateInfo} will be ignored and must be set dynamically with {@link #vkCmdSetDepthBounds CmdSetDepthBounds} before any draws are performed with a pipeline state with {@link VkPipelineDepthStencilStateCreateInfo} member {@code depthBoundsTestEnable} set to {@link #VK_TRUE TRUE}.
  • *
  • {@link #VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK DYNAMIC_STATE_STENCIL_COMPARE_MASK} specifies that the {@code compareMask} state in {@link VkPipelineDepthStencilStateCreateInfo} for both {@code front} and {@code back} will be ignored and must be set dynamically with {@link #vkCmdSetStencilCompareMask CmdSetStencilCompareMask} before any draws are performed with a pipeline state with {@link VkPipelineDepthStencilStateCreateInfo} member {@code stencilTestEnable} set to {@link #VK_TRUE TRUE}
  • *
  • {@link #VK_DYNAMIC_STATE_STENCIL_WRITE_MASK DYNAMIC_STATE_STENCIL_WRITE_MASK} specifies that the {@code writeMask} state in {@link VkPipelineDepthStencilStateCreateInfo} for both {@code front} and {@code back} will be ignored and must be set dynamically with {@link #vkCmdSetStencilWriteMask CmdSetStencilWriteMask} before any draws are performed with a pipeline state with {@link VkPipelineDepthStencilStateCreateInfo} member {@code stencilTestEnable} set to {@link #VK_TRUE TRUE}
  • *
  • {@link #VK_DYNAMIC_STATE_STENCIL_REFERENCE DYNAMIC_STATE_STENCIL_REFERENCE} specifies that the {@code reference} state in {@link VkPipelineDepthStencilStateCreateInfo} for both {@code front} and {@code back} will be ignored and must be set dynamically with {@link #vkCmdSetStencilReference CmdSetStencilReference} before any draws are performed with a pipeline state with {@link VkPipelineDepthStencilStateCreateInfo} member {@code stencilTestEnable} set to {@link #VK_TRUE TRUE}
  • *
  • {@link NVClipSpaceWScaling#VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV DYNAMIC_STATE_VIEWPORT_W_SCALING_NV} specifies that the {@code pViewportScalings} state in {@link VkPipelineViewportWScalingStateCreateInfoNV} will be ignored and must be set dynamically with {@link NVClipSpaceWScaling#vkCmdSetViewportWScalingNV CmdSetViewportWScalingNV} before any draws are performed with a pipeline state with {@code VkPipelineViewportWScalingStateCreateInfo} member {@code viewportScalingEnable} set to {@link #VK_TRUE TRUE}
  • *
  • {@link EXTDiscardRectangles#VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT DYNAMIC_STATE_DISCARD_RECTANGLE_EXT} specifies that the {@code pDiscardRectangles} state in {@link VkPipelineDiscardRectangleStateCreateInfoEXT} will be ignored and must be set dynamically with {@link EXTDiscardRectangles#vkCmdSetDiscardRectangleEXT CmdSetDiscardRectangleEXT} before any draw or clear commands. The {@code VkDiscardRectangleModeEXT} and the number of active discard rectangles is still specified by the {@code discardRectangleMode} and {@code discardRectangleCount} members of {@link VkPipelineDiscardRectangleStateCreateInfoEXT}.
  • *
  • {@link EXTSampleLocations#VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT} specifies that the {@code sampleLocationsInfo} state in {@link VkPipelineSampleLocationsStateCreateInfoEXT} will be ignored and must be set dynamically with {@link EXTSampleLocations#vkCmdSetSampleLocationsEXT CmdSetSampleLocationsEXT} before any draw or clear commands. Enabling custom sample locations is still indicated by the {@code sampleLocationsEnable} member of {@link VkPipelineSampleLocationsStateCreateInfoEXT}.
  • *
* *
See Also
* *

{@link VkPipelineDynamicStateCreateInfo}

*/ public static final int VK_DYNAMIC_STATE_VIEWPORT = 0, VK_DYNAMIC_STATE_SCISSOR = 1, VK_DYNAMIC_STATE_LINE_WIDTH = 2, VK_DYNAMIC_STATE_DEPTH_BIAS = 3, VK_DYNAMIC_STATE_BLEND_CONSTANTS = 4, VK_DYNAMIC_STATE_DEPTH_BOUNDS = 5, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK = 6, VK_DYNAMIC_STATE_STENCIL_WRITE_MASK = 7, VK_DYNAMIC_STATE_STENCIL_REFERENCE = 8; /** * VkFilter - Specify filters used for texture lookups * *
Description
* *
    *
  • {@link #VK_FILTER_NEAREST FILTER_NEAREST} specifies nearest filtering.
  • *
  • {@link #VK_FILTER_LINEAR FILTER_LINEAR} specifies linear filtering.
  • *
* *

These filters are described in detail in Texel Filtering.

* *
See Also
* *

{@link VkSamplerCreateInfo}, {@link VkSamplerYcbcrConversionCreateInfoKHR}, {@link #vkCmdBlitImage CmdBlitImage}

*/ public static final int VK_FILTER_NEAREST = 0, VK_FILTER_LINEAR = 1; /** * VkSamplerMipmapMode - Specify mipmap mode used for texture lookups * *
Description
* *
    *
  • {@link #VK_SAMPLER_MIPMAP_MODE_NEAREST SAMPLER_MIPMAP_MODE_NEAREST} specifies nearest filtering.
  • *
  • {@link #VK_SAMPLER_MIPMAP_MODE_LINEAR SAMPLER_MIPMAP_MODE_LINEAR} specifies linear filtering.
  • *
* *

These modes are described in detail in Texel Filtering.

* *
See Also
* *

{@link VkSamplerCreateInfo}

*/ public static final int VK_SAMPLER_MIPMAP_MODE_NEAREST = 0, VK_SAMPLER_MIPMAP_MODE_LINEAR = 1; /** * VkSamplerAddressMode - Specify behavior of sampling with texture coordinates outside an image * *
Description
* *
    *
  • {@link #VK_SAMPLER_ADDRESS_MODE_REPEAT SAMPLER_ADDRESS_MODE_REPEAT} specifies that the repeat wrap mode will be used.
  • *
  • {@link #VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT} specifies that the mirrored repeat wrap mode will be used.
  • *
  • {@link #VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE} specifies that the clamp to edge wrap mode will be used.
  • *
  • {@link #VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER} specifies that the clamp to border wrap mode will be used.
  • *
  • {@link KHRSamplerMirrorClampToEdge#VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE} specifies that the mirror clamp to edge wrap mode will be used. This is only valid if the {@link KHRSamplerMirrorClampToEdge VK_KHR_sampler_mirror_clamp_to_edge} extension is enabled.
  • *
* *
See Also
* *

{@link VkSamplerCreateInfo}

*/ public static final int VK_SAMPLER_ADDRESS_MODE_REPEAT = 0, VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT = 1, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE = 2, VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER = 3; /** * VkBorderColor - Specify border color used for texture lookups * *
Description
* *
    *
  • {@link #VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK BORDER_COLOR_FLOAT_TRANSPARENT_BLACK} specifies a transparent, floating-point format, black color.
  • *
  • {@link #VK_BORDER_COLOR_INT_TRANSPARENT_BLACK BORDER_COLOR_INT_TRANSPARENT_BLACK} specifies a transparent, integer format, black color.
  • *
  • {@link #VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK BORDER_COLOR_FLOAT_OPAQUE_BLACK} specifies an opaque, floating-point format, black color.
  • *
  • {@link #VK_BORDER_COLOR_INT_OPAQUE_BLACK BORDER_COLOR_INT_OPAQUE_BLACK} specifies an opaque, integer format, black color.
  • *
  • {@link #VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE BORDER_COLOR_FLOAT_OPAQUE_WHITE} specifies an opaque, floating-point format, white color.
  • *
  • {@link #VK_BORDER_COLOR_INT_OPAQUE_WHITE BORDER_COLOR_INT_OPAQUE_WHITE} specifies an opaque, integer format, white color.
  • *
* *

These colors are described in detail in Texel Replacement.

* *
See Also
* *

{@link VkSamplerCreateInfo}

*/ public static final int VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK = 0, VK_BORDER_COLOR_INT_TRANSPARENT_BLACK = 1, VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK = 2, VK_BORDER_COLOR_INT_OPAQUE_BLACK = 3, VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE = 4, VK_BORDER_COLOR_INT_OPAQUE_WHITE = 5; /** * VkDescriptorType - Specifies the type of a descriptor in a descriptor set * *
Description
* *
    *
  • {@link #VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER DESCRIPTOR_TYPE_UNIFORM_BUFFER}, {@link #VK_DESCRIPTOR_TYPE_STORAGE_BUFFER DESCRIPTOR_TYPE_STORAGE_BUFFER}, {@link #VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC}, or {@link #VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC} specify that the elements of the {@link VkWriteDescriptorSet}{@code ::pBufferInfo} array of {@link VkDescriptorBufferInfo} structures will be used to update the descriptors, and other arrays will be ignored.
  • *
  • {@link #VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER} or {@link #VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER} specify that the {@link VkWriteDescriptorSet}{@code ::pTexelBufferView} array will be used to update the descriptors, and other arrays will be ignored.
  • *
  • {@link #VK_DESCRIPTOR_TYPE_SAMPLER DESCRIPTOR_TYPE_SAMPLER}, {@link #VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER}, {@link #VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE DESCRIPTOR_TYPE_SAMPLED_IMAGE}, {@link #VK_DESCRIPTOR_TYPE_STORAGE_IMAGE DESCRIPTOR_TYPE_STORAGE_IMAGE}, or {@link #VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT DESCRIPTOR_TYPE_INPUT_ATTACHMENT} specify that the elements of the {@link VkWriteDescriptorSet}{@code ::pImageInfo} array of {@link VkDescriptorImageInfo} structures will be used to update the descriptors, and other arrays will be ignored.
  • *
* *
See Also
* *

{@link VkDescriptorPoolSize}, {@link VkDescriptorSetLayoutBinding}, {@link VkDescriptorUpdateTemplateEntryKHR}, {@link VkWriteDescriptorSet}

*/ public static final int VK_DESCRIPTOR_TYPE_SAMPLER = 0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE = 2, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3, VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER = 4, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER = 5, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 8, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10; /** * VkDescriptorPoolCreateFlagBits - Bitmask specifying certain supported operations on a descriptor pool * *
Description
* *
    *
  • {@link #VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT} specifies that descriptor sets can return their individual allocations to the pool, i.e. all of {@link #vkAllocateDescriptorSets AllocateDescriptorSets}, {@link #vkFreeDescriptorSets FreeDescriptorSets}, and {@link #vkResetDescriptorPool ResetDescriptorPool} are allowed. Otherwise, descriptor sets allocated from the pool must not be individually freed back to the pool, i.e. only {@link #vkAllocateDescriptorSets AllocateDescriptorSets} and {@link #vkResetDescriptorPool ResetDescriptorPool} are allowed.
  • *
* *
See Also
* *

{@code VkDescriptorPoolCreateFlags}

*/ public static final int VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT = 0x1; /** * VkAttachmentDescriptionFlagBits - Bitmask specifying additional properties of an attachment * *
Description
* *
    *
  • {@link #VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT} specifies that the attachment aliases the same device memory as other attachments.
  • *
* *
See Also
* *

{@code VkAttachmentDescriptionFlags}

*/ public static final int VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT = 0x1; /** * VkAttachmentLoadOp - Specify how contents of an attachment are treated at the beginning of a subpass * *
Description
* *
    *
  • {@link #VK_ATTACHMENT_LOAD_OP_LOAD ATTACHMENT_LOAD_OP_LOAD} specifies that the previous contents of the image within the render area will be preserved. For attachments with a depth/stencil format, this uses the access type {@link #VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT}. For attachments with a color format, this uses the access type {@link #VK_ACCESS_COLOR_ATTACHMENT_READ_BIT ACCESS_COLOR_ATTACHMENT_READ_BIT}.
  • *
  • {@link #VK_ATTACHMENT_LOAD_OP_CLEAR ATTACHMENT_LOAD_OP_CLEAR} specifies that the contents within the render area will be cleared to a uniform value, which is specified when a render pass instance is begun. For attachments with a depth/stencil format, this uses the access type {@link #VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT}. For attachments with a color format, this uses the access type {@link #VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT ACCESS_COLOR_ATTACHMENT_WRITE_BIT}.
  • *
  • {@link #VK_ATTACHMENT_LOAD_OP_DONT_CARE ATTACHMENT_LOAD_OP_DONT_CARE} specifies that the previous contents within the area need not be preserved; the contents of the attachment will be undefined inside the render area. For attachments with a depth/stencil format, this uses the access type {@link #VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT}. For attachments with a color format, this uses the access type {@link #VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT ACCESS_COLOR_ATTACHMENT_WRITE_BIT}.
  • *
* *
See Also
* *

{@link VkAttachmentDescription}

*/ public static final int VK_ATTACHMENT_LOAD_OP_LOAD = 0, VK_ATTACHMENT_LOAD_OP_CLEAR = 1, VK_ATTACHMENT_LOAD_OP_DONT_CARE = 2; /** * VkAttachmentStoreOp - Specify how contents of an attachment are treated at the end of a subpass * *
Description
* *
    *
  • {@link #VK_ATTACHMENT_STORE_OP_STORE ATTACHMENT_STORE_OP_STORE} specifies the contents generated during the render pass and within the render area are written to memory. For attachments with a depth/stencil format, this uses the access type {@link #VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT}. For attachments with a color format, this uses the access type {@link #VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT ACCESS_COLOR_ATTACHMENT_WRITE_BIT}.
  • *
  • {@link #VK_ATTACHMENT_STORE_OP_DONT_CARE ATTACHMENT_STORE_OP_DONT_CARE} specifies the contents within the render area are not needed after rendering, and may be discarded; the contents of the attachment will be undefined inside the render area. For attachments with a depth/stencil format, this uses the access type {@link #VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT}. For attachments with a color format, this uses the access type {@link #VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT ACCESS_COLOR_ATTACHMENT_WRITE_BIT}.
  • *
* *
See Also
* *

{@link VkAttachmentDescription}

*/ public static final int VK_ATTACHMENT_STORE_OP_STORE = 0, VK_ATTACHMENT_STORE_OP_DONT_CARE = 1; /** * VkPipelineBindPoint - Specify the bind point of a pipeline object to a command buffer * *
Description
* *
    *
  • {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE} specifies binding as a compute pipeline.
  • *
  • {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} specifies binding as a graphics pipeline.
  • *
* *
See Also
* *

{@link VkDescriptorUpdateTemplateCreateInfoKHR}, {@link VkIndirectCommandsLayoutCreateInfoNVX}, {@link VkSubpassDescription}, {@link #vkCmdBindDescriptorSets CmdBindDescriptorSets}, {@link #vkCmdBindPipeline CmdBindPipeline}, {@link KHRPushDescriptor#vkCmdPushDescriptorSetKHR CmdPushDescriptorSetKHR}

*/ public static final int VK_PIPELINE_BIND_POINT_GRAPHICS = 0, VK_PIPELINE_BIND_POINT_COMPUTE = 1; /** * VkAccessFlagBits - Bitmask specifying memory access types that will participate in a memory dependency * *
Description
* *
    *
  • {@link #VK_ACCESS_INDIRECT_COMMAND_READ_BIT ACCESS_INDIRECT_COMMAND_READ_BIT} specifies read access to an indirect command structure read as part of an indirect drawing or dispatch command.
  • *
  • {@link #VK_ACCESS_INDEX_READ_BIT ACCESS_INDEX_READ_BIT} specifies read access to an index buffer as part of an indexed drawing command, bound by {@link #vkCmdBindIndexBuffer CmdBindIndexBuffer}.
  • *
  • {@link #VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT ACCESS_VERTEX_ATTRIBUTE_READ_BIT} specifies read access to a vertex buffer as part of a drawing command, bound by {@link #vkCmdBindVertexBuffers CmdBindVertexBuffers}.
  • *
  • {@link #VK_ACCESS_UNIFORM_READ_BIT ACCESS_UNIFORM_READ_BIT} specifies read access to a uniform buffer.
  • *
  • {@link #VK_ACCESS_INPUT_ATTACHMENT_READ_BIT ACCESS_INPUT_ATTACHMENT_READ_BIT} specifies read access to an input attachment within a render pass during fragment shading.
  • *
  • {@link #VK_ACCESS_SHADER_READ_BIT ACCESS_SHADER_READ_BIT} specifies read access to a storage buffer, uniform texel buffer, storage texel buffer, sampled image, or storage image.
  • *
  • {@link #VK_ACCESS_SHADER_WRITE_BIT ACCESS_SHADER_WRITE_BIT} specifies write access to a storage buffer, storage texel buffer, or storage image.
  • *
  • {@link #VK_ACCESS_COLOR_ATTACHMENT_READ_BIT ACCESS_COLOR_ATTACHMENT_READ_BIT} specifies read access to a color attachment, such as via blending, logic operations, or via certain subpass load operations. It does not include advanced blend operations.
  • *
  • {@link EXTBlendOperationAdvanced#VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT} is similar to {@link #VK_ACCESS_COLOR_ATTACHMENT_READ_BIT ACCESS_COLOR_ATTACHMENT_READ_BIT}, but also includes advanced blend operations.
  • *
  • {@link #VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT ACCESS_COLOR_ATTACHMENT_WRITE_BIT} specifies write access to a color or resolve attachment during a render pass or via certain subpass load and store operations.
  • *
  • {@link #VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT} specifies read access to a depth/stencil attachment, via depth or stencil operations or via certain subpass load operations.
  • *
  • {@link #VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT} specifies write access to a depth/stencil attachment, via depth or stencil operations or via certain subpass load and store operations.
  • *
  • {@link #VK_ACCESS_TRANSFER_READ_BIT ACCESS_TRANSFER_READ_BIT} specifies read access to an image or buffer in a copy operation.
  • *
  • {@link #VK_ACCESS_TRANSFER_WRITE_BIT ACCESS_TRANSFER_WRITE_BIT} specifies write access to an image or buffer in a clear or copy operation.
  • *
  • {@link #VK_ACCESS_HOST_READ_BIT ACCESS_HOST_READ_BIT} specifies read access by a host operation. Accesses of this type are not performed through a resource, but directly on memory.
  • *
  • {@link #VK_ACCESS_HOST_WRITE_BIT ACCESS_HOST_WRITE_BIT} specifies write access by a host operation. Accesses of this type are not performed through a resource, but directly on memory.
  • *
  • {@link #VK_ACCESS_MEMORY_READ_BIT ACCESS_MEMORY_READ_BIT} specifies read access via non-specific entities. These entities include the Vulkan device and host, but may also include entities external to the Vulkan device or otherwise not part of the core Vulkan pipeline. When included in a destination access mask, makes all available writes visible to all future read accesses on entities known to the Vulkan device.
  • *
  • {@link #VK_ACCESS_MEMORY_WRITE_BIT ACCESS_MEMORY_WRITE_BIT} specifies write access via non-specific entities. These entities include the Vulkan device and host, but may also include entities external to the Vulkan device or otherwise not part of the core Vulkan pipeline. When included in a source access mask, all writes that are performed by entities known to the Vulkan device are made available. When included in a destination access mask, makes all available writes visible to all future write accesses on entities known to the Vulkan device.
  • *
  • {@link NVXDeviceGeneratedCommands#VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX ACCESS_COMMAND_PROCESS_READ_BIT_NVX} specifies reads from {@code VkBuffer} inputs to {@link NVXDeviceGeneratedCommands#vkCmdProcessCommandsNVX CmdProcessCommandsNVX}.
  • *
  • {@link NVXDeviceGeneratedCommands#VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX} specifies writes to the target command buffer in {@link NVXDeviceGeneratedCommands#vkCmdProcessCommandsNVX CmdProcessCommandsNVX}.
  • *
* *

Certain access types are only performed by a subset of pipeline stages. Any synchronization command that takes both stage masks and access masks uses both to define the access scopes - only the specified access types performed by the specified stages are included in the access scope. An application must not specify an access flag in a synchronization command if it does not include a pipeline stage in the corresponding stage mask that is able to perform accesses of that type. The following table lists, for each access flag, which pipeline stages can perform that type of access.

* *
Supported access types
* * * * * * * * * * * * * * * * * * * * * * * * * *
Access flagSupported pipeline stages
{@link #VK_ACCESS_INDIRECT_COMMAND_READ_BIT ACCESS_INDIRECT_COMMAND_READ_BIT}{@link #VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT PIPELINE_STAGE_DRAW_INDIRECT_BIT}
{@link #VK_ACCESS_INDEX_READ_BIT ACCESS_INDEX_READ_BIT}{@link #VK_PIPELINE_STAGE_VERTEX_INPUT_BIT PIPELINE_STAGE_VERTEX_INPUT_BIT}
{@link #VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT ACCESS_VERTEX_ATTRIBUTE_READ_BIT}{@link #VK_PIPELINE_STAGE_VERTEX_INPUT_BIT PIPELINE_STAGE_VERTEX_INPUT_BIT}
{@link #VK_ACCESS_UNIFORM_READ_BIT ACCESS_UNIFORM_READ_BIT}{@link #VK_PIPELINE_STAGE_VERTEX_SHADER_BIT PIPELINE_STAGE_VERTEX_SHADER_BIT}, {@link #VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT}, {@link #VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT}, {@link #VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT PIPELINE_STAGE_GEOMETRY_SHADER_BIT}, {@link #VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT PIPELINE_STAGE_FRAGMENT_SHADER_BIT}, or {@link #VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT PIPELINE_STAGE_COMPUTE_SHADER_BIT}
{@link #VK_ACCESS_INPUT_ATTACHMENT_READ_BIT ACCESS_INPUT_ATTACHMENT_READ_BIT}{@link #VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT PIPELINE_STAGE_FRAGMENT_SHADER_BIT}
{@link #VK_ACCESS_SHADER_READ_BIT ACCESS_SHADER_READ_BIT}{@link #VK_PIPELINE_STAGE_VERTEX_SHADER_BIT PIPELINE_STAGE_VERTEX_SHADER_BIT}, {@link #VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT}, {@link #VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT}, {@link #VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT PIPELINE_STAGE_GEOMETRY_SHADER_BIT}, {@link #VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT PIPELINE_STAGE_FRAGMENT_SHADER_BIT}, or {@link #VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT PIPELINE_STAGE_COMPUTE_SHADER_BIT}
{@link #VK_ACCESS_SHADER_WRITE_BIT ACCESS_SHADER_WRITE_BIT}{@link #VK_PIPELINE_STAGE_VERTEX_SHADER_BIT PIPELINE_STAGE_VERTEX_SHADER_BIT}, {@link #VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT}, {@link #VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT}, {@link #VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT PIPELINE_STAGE_GEOMETRY_SHADER_BIT}, {@link #VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT PIPELINE_STAGE_FRAGMENT_SHADER_BIT}, or {@link #VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT PIPELINE_STAGE_COMPUTE_SHADER_BIT}
{@link #VK_ACCESS_COLOR_ATTACHMENT_READ_BIT ACCESS_COLOR_ATTACHMENT_READ_BIT}{@link #VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT}
{@link EXTBlendOperationAdvanced#VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT}{@link #VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT}
{@link #VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT ACCESS_COLOR_ATTACHMENT_WRITE_BIT}{@link #VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT}
{@link #VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT}{@link #VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT}, or {@link #VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT}
{@link #VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT}{@link #VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT}, or {@link #VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT}
{@link #VK_ACCESS_TRANSFER_READ_BIT ACCESS_TRANSFER_READ_BIT}{@link #VK_PIPELINE_STAGE_TRANSFER_BIT PIPELINE_STAGE_TRANSFER_BIT}
{@link #VK_ACCESS_TRANSFER_WRITE_BIT ACCESS_TRANSFER_WRITE_BIT}{@link #VK_PIPELINE_STAGE_TRANSFER_BIT PIPELINE_STAGE_TRANSFER_BIT}
{@link #VK_ACCESS_HOST_READ_BIT ACCESS_HOST_READ_BIT}{@link #VK_PIPELINE_STAGE_HOST_BIT PIPELINE_STAGE_HOST_BIT}
{@link #VK_ACCESS_HOST_WRITE_BIT ACCESS_HOST_WRITE_BIT}{@link #VK_PIPELINE_STAGE_HOST_BIT PIPELINE_STAGE_HOST_BIT}
{@link #VK_ACCESS_MEMORY_READ_BIT ACCESS_MEMORY_READ_BIT}N/A
{@link #VK_ACCESS_MEMORY_WRITE_BIT ACCESS_MEMORY_WRITE_BIT}N/A
{@link NVXDeviceGeneratedCommands#VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX ACCESS_COMMAND_PROCESS_READ_BIT_NVX}{@link NVXDeviceGeneratedCommands#VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX}
{@link NVXDeviceGeneratedCommands#VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX}{@link NVXDeviceGeneratedCommands#VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX}
* *

If a memory object does not have the {@link #VK_MEMORY_PROPERTY_HOST_COHERENT_BIT MEMORY_PROPERTY_HOST_COHERENT_BIT} property, then {@link #vkFlushMappedMemoryRanges FlushMappedMemoryRanges} must be called in order to guarantee that writes to the memory object from the host are made visible to the {@link #VK_ACCESS_HOST_WRITE_BIT ACCESS_HOST_WRITE_BIT} access type, where it can be further made available to the device by synchronization commands. Similarly, {@link #vkInvalidateMappedMemoryRanges InvalidateMappedMemoryRanges} must be called to guarantee that writes which are visible to the {@link #VK_ACCESS_HOST_READ_BIT ACCESS_HOST_READ_BIT} access type are made visible to host operations.

* *

If the memory object does have the {@link #VK_MEMORY_PROPERTY_HOST_COHERENT_BIT MEMORY_PROPERTY_HOST_COHERENT_BIT} property flag, writes to the memory object from the host are automatically made visible to the {@link #VK_ACCESS_HOST_WRITE_BIT ACCESS_HOST_WRITE_BIT} access type. Similarly, writes made visible to the {@link #VK_ACCESS_HOST_READ_BIT ACCESS_HOST_READ_BIT} access type are automatically made visible to the host.

* *
Note
* *

The {@link #vkQueueSubmit QueueSubmit} command automatically guarantees that host writes flushed to {@link #VK_ACCESS_HOST_WRITE_BIT ACCESS_HOST_WRITE_BIT} are made available if they were flushed before the command executed, so in most cases an explicit memory barrier is not needed for this case. In the few circumstances where a submit does not occur between the host write and the device read access, writes can be made available by using an explicit memory barrier.

*
* *
See Also
* *

{@code VkAccessFlags}

*/ public static final int VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0x1, VK_ACCESS_INDEX_READ_BIT = 0x2, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 0x4, VK_ACCESS_UNIFORM_READ_BIT = 0x8, VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 0x10, VK_ACCESS_SHADER_READ_BIT = 0x20, VK_ACCESS_SHADER_WRITE_BIT = 0x40, VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 0x80, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 0x100, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x200, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x400, VK_ACCESS_TRANSFER_READ_BIT = 0x800, VK_ACCESS_TRANSFER_WRITE_BIT = 0x1000, VK_ACCESS_HOST_READ_BIT = 0x2000, VK_ACCESS_HOST_WRITE_BIT = 0x4000, VK_ACCESS_MEMORY_READ_BIT = 0x8000, VK_ACCESS_MEMORY_WRITE_BIT = 0x10000; /** * VkDependencyFlagBits - Bitmask specifying how execution and memory dependencies are formed * *
Description
* *
    *
  • {@link #VK_DEPENDENCY_BY_REGION_BIT DEPENDENCY_BY_REGION_BIT} specifies that dependencies will be framebuffer-local.
  • *
  • {@link KHXMultiview#VK_DEPENDENCY_VIEW_LOCAL_BIT_KHX DEPENDENCY_VIEW_LOCAL_BIT_KHX} specifies that a subpass has more than one view.
  • *
  • {@link KHXDeviceGroup#VK_DEPENDENCY_DEVICE_GROUP_BIT_KHX DEPENDENCY_DEVICE_GROUP_BIT_KHX} specifies that dependencies are non-device-local dependency.
  • *
* *
See Also
* *

{@code VkDependencyFlags}

*/ public static final int VK_DEPENDENCY_BY_REGION_BIT = 0x1; /** * VkCommandPoolCreateFlagBits - Bitmask specifying usage behavior for a command pool * *
Description
* *
    *
  • {@link #VK_COMMAND_POOL_CREATE_TRANSIENT_BIT COMMAND_POOL_CREATE_TRANSIENT_BIT} indicates that command buffers allocated from the pool will be short-lived, meaning that they will be reset or freed in a relatively short timeframe. This flag may be used by the implementation to control memory allocation behavior within the pool.
  • *
  • {@link #VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT} allows any command buffer allocated from a pool to be individually reset to the initial state; either by calling {@link #vkResetCommandBuffer ResetCommandBuffer}, or via the implicit reset when calling {@link #vkBeginCommandBuffer BeginCommandBuffer}. If this flag is not set on a pool, then {@link #vkResetCommandBuffer ResetCommandBuffer} must not be called for any command buffer allocated from that pool.
  • *
* *
See Also
* *

{@code VkCommandPoolCreateFlags}

*/ public static final int VK_COMMAND_POOL_CREATE_TRANSIENT_BIT = 0x1, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT = 0x2; /** * VkCommandPoolResetFlagBits - Bitmask controlling behavior of a command pool reset * *
Description
* *
    *
  • {@link #VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT} specifies that resetting a command pool recycles all of the resources from the command pool back to the system.
  • *
* *
See Also
* *

{@code VkCommandPoolResetFlags}

*/ public static final int VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT = 0x1; /** * VkCommandBufferLevel - Enumerant specifying a command buffer level * *
Description
* *
    *
  • {@link #VK_COMMAND_BUFFER_LEVEL_PRIMARY COMMAND_BUFFER_LEVEL_PRIMARY} specifies a primary command buffer.
  • *
  • {@link #VK_COMMAND_BUFFER_LEVEL_SECONDARY COMMAND_BUFFER_LEVEL_SECONDARY} specifies a secondary command buffer.
  • *
* *
See Also
* *

{@link VkCommandBufferAllocateInfo}

*/ public static final int VK_COMMAND_BUFFER_LEVEL_PRIMARY = 0, VK_COMMAND_BUFFER_LEVEL_SECONDARY = 1; /** * VkCommandBufferUsageFlagBits - Bitmask specifying usage behavior for command buffer * *
Description
* *
    *
  • {@link #VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT} specifies that each recording of the command buffer will only be submitted once, and the command buffer will be reset and recorded again between each submission.
  • *
  • {@link #VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT} specifies that a secondary command buffer is considered to be entirely inside a render pass. If this is a primary command buffer, then this bit is ignored.
  • *
  • {@link #VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT} specifies that a command buffer can be resubmitted to a queue while it is in the pending state, and recorded into multiple primary command buffers.
  • *
* *
See Also
* *

{@code VkCommandBufferUsageFlags}

*/ public static final int VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT = 0x1, VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT = 0x2, VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT = 0x4; /** * VkQueryControlFlagBits - Bitmask specifying constraints on a query * *
Description
* *
    *
  • {@link #VK_QUERY_CONTROL_PRECISE_BIT QUERY_CONTROL_PRECISE_BIT} specifies the precision of occlusion queries.
  • *
* *
See Also
* *

{@code VkQueryControlFlags}

*/ public static final int VK_QUERY_CONTROL_PRECISE_BIT = 0x1; /** * VkCommandBufferResetFlagBits - Bitmask controlling behavior of a command buffer reset * *
Description
* *
    *
  • {@link #VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT} specifies that most or all memory resources currently owned by the command buffer should be returned to the parent command pool. If this flag is not set, then the command buffer may hold onto memory resources and reuse them when recording commands. {@code commandBuffer} is moved to the initial state.
  • *
* *
See Also
* *

{@code VkCommandBufferResetFlags}

*/ public static final int VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT = 0x1; /** * VkStencilFaceFlagBits - Bitmask specifying sets of stencil state for which to update the compare mask * *
Description
* *
    *
  • {@link #VK_STENCIL_FACE_FRONT_BIT STENCIL_FACE_FRONT_BIT} specifies that only the front set of stencil state is updated.
  • *
  • {@link #VK_STENCIL_FACE_BACK_BIT STENCIL_FACE_BACK_BIT} specifies that only the back set of stencil state is updated.
  • *
  • {@link #VK_STENCIL_FRONT_AND_BACK STENCIL_FRONT_AND_BACK} is the combination of {@link #VK_STENCIL_FACE_FRONT_BIT STENCIL_FACE_FRONT_BIT} and {@link #VK_STENCIL_FACE_BACK_BIT STENCIL_FACE_BACK_BIT}, and specifies that both sets of stencil state are updated.
  • *
* *
See Also
* *

{@code VkStencilFaceFlags}

*/ public static final int VK_STENCIL_FACE_FRONT_BIT = 0x1, VK_STENCIL_FACE_BACK_BIT = 0x2, VK_STENCIL_FRONT_AND_BACK = 0x00000003; /** * VkIndexType - Type of index buffer indices * *
Description
* *
    *
  • {@link #VK_INDEX_TYPE_UINT16 INDEX_TYPE_UINT16} specifies that indices are 16-bit unsigned integer values.
  • *
  • {@link #VK_INDEX_TYPE_UINT32 INDEX_TYPE_UINT32} specifies that indices are 32-bit unsigned integer values.
  • *
* *
See Also
* *

{@link VkObjectTableIndexBufferEntryNVX}, {@link #vkCmdBindIndexBuffer CmdBindIndexBuffer}

*/ public static final int VK_INDEX_TYPE_UINT16 = 0, VK_INDEX_TYPE_UINT32 = 1; /** * VkSubpassContents - Specify how commands in the first subpass of a render pass are provided * *
Description
* *
    *
  • {@link #VK_SUBPASS_CONTENTS_INLINE SUBPASS_CONTENTS_INLINE} specifies that the contents of the subpass will be recorded inline in the primary command buffer, and secondary command buffers must not be executed within the subpass.
  • *
  • {@link #VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS} specifies that the contents are recorded in secondary command buffers that will be called from the primary command buffer, and {@link #vkCmdExecuteCommands CmdExecuteCommands} is the only valid command on the command buffer until {@link #vkCmdNextSubpass CmdNextSubpass} or {@link #vkCmdEndRenderPass CmdEndRenderPass}.
  • *
* *
See Also
* *

{@link #vkCmdBeginRenderPass CmdBeginRenderPass}, {@link #vkCmdNextSubpass CmdNextSubpass}

*/ public static final int VK_SUBPASS_CONTENTS_INLINE = 0, VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS = 1; /** * VkObjectType - Specify an enumeration to track object handle types * *
Description
* *
VkObjectType and Vulkan Handle Relationship
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
{@code VkObjectType}Vulkan Handle Type
{@link #VK_OBJECT_TYPE_UNKNOWN OBJECT_TYPE_UNKNOWN}Unknown/Undefined Handle
{@link #VK_OBJECT_TYPE_INSTANCE OBJECT_TYPE_INSTANCE}{@code VkInstance}
{@link #VK_OBJECT_TYPE_PHYSICAL_DEVICE OBJECT_TYPE_PHYSICAL_DEVICE}{@code VkPhysicalDevice}
{@link #VK_OBJECT_TYPE_DEVICE OBJECT_TYPE_DEVICE}{@code VkDevice}
{@link #VK_OBJECT_TYPE_QUEUE OBJECT_TYPE_QUEUE}{@code VkQueue}
{@link #VK_OBJECT_TYPE_SEMAPHORE OBJECT_TYPE_SEMAPHORE}{@code VkSemaphore}
{@link #VK_OBJECT_TYPE_COMMAND_BUFFER OBJECT_TYPE_COMMAND_BUFFER}{@code VkCommandBuffer}
{@link #VK_OBJECT_TYPE_FENCE OBJECT_TYPE_FENCE}{@code VkFence}
{@link #VK_OBJECT_TYPE_DEVICE_MEMORY OBJECT_TYPE_DEVICE_MEMORY}{@code VkDeviceMemory}
{@link #VK_OBJECT_TYPE_BUFFER OBJECT_TYPE_BUFFER}{@code VkBuffer}
{@link #VK_OBJECT_TYPE_IMAGE OBJECT_TYPE_IMAGE}{@code VkImage}
{@link #VK_OBJECT_TYPE_EVENT OBJECT_TYPE_EVENT}{@code VkEvent}
{@link #VK_OBJECT_TYPE_QUERY_POOL OBJECT_TYPE_QUERY_POOL}{@code VkQueryPool}
{@link #VK_OBJECT_TYPE_BUFFER_VIEW OBJECT_TYPE_BUFFER_VIEW}{@code VkBufferView}
{@link #VK_OBJECT_TYPE_IMAGE_VIEW OBJECT_TYPE_IMAGE_VIEW}{@code VkImageView}
{@link #VK_OBJECT_TYPE_SHADER_MODULE OBJECT_TYPE_SHADER_MODULE}{@code VkShaderModule}
{@link #VK_OBJECT_TYPE_PIPELINE_CACHE OBJECT_TYPE_PIPELINE_CACHE}{@code VkPipelineCache}
{@link #VK_OBJECT_TYPE_PIPELINE_LAYOUT OBJECT_TYPE_PIPELINE_LAYOUT}{@code VkPipelineLayout}
{@link #VK_OBJECT_TYPE_RENDER_PASS OBJECT_TYPE_RENDER_PASS}{@code VkRenderPass}
{@link #VK_OBJECT_TYPE_PIPELINE OBJECT_TYPE_PIPELINE}{@code VkPipeline}
{@link #VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT}{@code VkDescriptorSetLayout}
{@link #VK_OBJECT_TYPE_SAMPLER OBJECT_TYPE_SAMPLER}{@code VkSampler}
{@link #VK_OBJECT_TYPE_DESCRIPTOR_POOL OBJECT_TYPE_DESCRIPTOR_POOL}{@code VkDescriptorPool}
{@link #VK_OBJECT_TYPE_DESCRIPTOR_SET OBJECT_TYPE_DESCRIPTOR_SET}{@code VkDescriptorSet}
{@link #VK_OBJECT_TYPE_FRAMEBUFFER OBJECT_TYPE_FRAMEBUFFER}{@code VkFramebuffer}
{@link #VK_OBJECT_TYPE_COMMAND_POOL OBJECT_TYPE_COMMAND_POOL}{@code VkCommandPool}
{@link KHRSurface#VK_OBJECT_TYPE_SURFACE_KHR OBJECT_TYPE_SURFACE_KHR}{@code VkSurfaceKHR}
{@link KHRSwapchain#VK_OBJECT_TYPE_SWAPCHAIN_KHR OBJECT_TYPE_SWAPCHAIN_KHR}{@code VkSwapchainKHR}
{@link KHRDisplay#VK_OBJECT_TYPE_DISPLAY_KHR OBJECT_TYPE_DISPLAY_KHR}{@code VkDisplayKHR}
{@link KHRDisplay#VK_OBJECT_TYPE_DISPLAY_MODE_KHR OBJECT_TYPE_DISPLAY_MODE_KHR}{@code VkDisplayModeKHR}
{@link EXTDebugReport#VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT}{@code VkDebugReportCallbackEXT}
{@link KHRDescriptorUpdateTemplate#VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR}{@code VkDescriptorUpdateTemplateKHR}
{@link NVXDeviceGeneratedCommands#VK_OBJECT_TYPE_OBJECT_TABLE_NVX OBJECT_TYPE_OBJECT_TABLE_NVX}{@code VkObjectTableNVX}
{@link NVXDeviceGeneratedCommands#VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX}{@code VkIndirectCommandsLayoutNVX}
{@link EXTValidationCache#VK_OBJECT_TYPE_VALIDATION_CACHE_EXT OBJECT_TYPE_VALIDATION_CACHE_EXT}{@code VkValidationCacheEXT}
* *
See Also
* *

No cross-references are available

*/ public static final int VK_OBJECT_TYPE_UNKNOWN = 0, VK_OBJECT_TYPE_INSTANCE = 1, VK_OBJECT_TYPE_PHYSICAL_DEVICE = 2, VK_OBJECT_TYPE_DEVICE = 3, VK_OBJECT_TYPE_QUEUE = 4, VK_OBJECT_TYPE_SEMAPHORE = 5, VK_OBJECT_TYPE_COMMAND_BUFFER = 6, VK_OBJECT_TYPE_FENCE = 7, VK_OBJECT_TYPE_DEVICE_MEMORY = 8, VK_OBJECT_TYPE_BUFFER = 9, VK_OBJECT_TYPE_IMAGE = 10, VK_OBJECT_TYPE_EVENT = 11, VK_OBJECT_TYPE_QUERY_POOL = 12, VK_OBJECT_TYPE_BUFFER_VIEW = 13, VK_OBJECT_TYPE_IMAGE_VIEW = 14, VK_OBJECT_TYPE_SHADER_MODULE = 15, VK_OBJECT_TYPE_PIPELINE_CACHE = 16, VK_OBJECT_TYPE_PIPELINE_LAYOUT = 17, VK_OBJECT_TYPE_RENDER_PASS = 18, VK_OBJECT_TYPE_PIPELINE = 19, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT = 20, VK_OBJECT_TYPE_SAMPLER = 21, VK_OBJECT_TYPE_DESCRIPTOR_POOL = 22, VK_OBJECT_TYPE_DESCRIPTOR_SET = 23, VK_OBJECT_TYPE_FRAMEBUFFER = 24, VK_OBJECT_TYPE_COMMAND_POOL = 25; /** * The API version number for Vulkan 1.0. * *

The patch version number in this macro will always be zero. The supported patch version for a physical device can be queried with * {@link #vkGetPhysicalDeviceProperties GetPhysicalDeviceProperties}.

*/ public static final int VK_API_VERSION_1_0 = VK_MAKE_VERSION(1, 0, 0); /** * The reserved handle {@code VK_NULL_HANDLE} can be passed in place of valid object handles when explicitly called out in the specification. Any * command that creates an object successfully must not return {@code VK_NULL_HANDLE}. It is valid to pass {@code VK_NULL_HANDLE} to any * {@code vkDestroy*} or {@code vkFree*} command, which will silently ignore these values. */ public static final long VK_NULL_HANDLE = 0x0L; protected VK10() { throw new UnsupportedOperationException(); } static boolean isAvailable(VKCapabilitiesInstance caps) { return checkFunctions( caps.vkDestroyInstance, caps.vkEnumeratePhysicalDevices, caps.vkGetPhysicalDeviceFeatures, caps.vkGetPhysicalDeviceFormatProperties, caps.vkGetPhysicalDeviceImageFormatProperties, caps.vkGetPhysicalDeviceProperties, caps.vkGetPhysicalDeviceQueueFamilyProperties, caps.vkGetPhysicalDeviceMemoryProperties, caps.vkCreateDevice, caps.vkEnumerateDeviceExtensionProperties, caps.vkEnumerateDeviceLayerProperties, caps.vkGetPhysicalDeviceSparseImageFormatProperties ); } static boolean isAvailable(VKCapabilitiesInstance capsInstance, VKCapabilitiesDevice caps) { return isAvailable(capsInstance) && checkFunctions( caps.vkGetDeviceProcAddr, caps.vkDestroyDevice, caps.vkGetDeviceQueue, caps.vkQueueSubmit, caps.vkQueueWaitIdle, caps.vkDeviceWaitIdle, caps.vkAllocateMemory, caps.vkFreeMemory, caps.vkMapMemory, caps.vkUnmapMemory, caps.vkFlushMappedMemoryRanges, caps.vkInvalidateMappedMemoryRanges, caps.vkGetDeviceMemoryCommitment, caps.vkBindBufferMemory, caps.vkBindImageMemory, caps.vkGetBufferMemoryRequirements, caps.vkGetImageMemoryRequirements, caps.vkGetImageSparseMemoryRequirements, caps.vkQueueBindSparse, caps.vkCreateFence, caps.vkDestroyFence, caps.vkResetFences, caps.vkGetFenceStatus, caps.vkWaitForFences, caps.vkCreateSemaphore, caps.vkDestroySemaphore, caps.vkCreateEvent, caps.vkDestroyEvent, caps.vkGetEventStatus, caps.vkSetEvent, caps.vkResetEvent, caps.vkCreateQueryPool, caps.vkDestroyQueryPool, caps.vkGetQueryPoolResults, caps.vkCreateBuffer, caps.vkDestroyBuffer, caps.vkCreateBufferView, caps.vkDestroyBufferView, caps.vkCreateImage, caps.vkDestroyImage, caps.vkGetImageSubresourceLayout, caps.vkCreateImageView, caps.vkDestroyImageView, caps.vkCreateShaderModule, caps.vkDestroyShaderModule, caps.vkCreatePipelineCache, caps.vkDestroyPipelineCache, caps.vkGetPipelineCacheData, caps.vkMergePipelineCaches, caps.vkCreateGraphicsPipelines, caps.vkCreateComputePipelines, caps.vkDestroyPipeline, caps.vkCreatePipelineLayout, caps.vkDestroyPipelineLayout, caps.vkCreateSampler, caps.vkDestroySampler, caps.vkCreateDescriptorSetLayout, caps.vkDestroyDescriptorSetLayout, caps.vkCreateDescriptorPool, caps.vkDestroyDescriptorPool, caps.vkResetDescriptorPool, caps.vkAllocateDescriptorSets, caps.vkFreeDescriptorSets, caps.vkUpdateDescriptorSets, caps.vkCreateFramebuffer, caps.vkDestroyFramebuffer, caps.vkCreateRenderPass, caps.vkDestroyRenderPass, caps.vkGetRenderAreaGranularity, caps.vkCreateCommandPool, caps.vkDestroyCommandPool, caps.vkResetCommandPool, caps.vkAllocateCommandBuffers, caps.vkFreeCommandBuffers, caps.vkBeginCommandBuffer, caps.vkEndCommandBuffer, caps.vkResetCommandBuffer, caps.vkCmdBindPipeline, caps.vkCmdSetViewport, caps.vkCmdSetScissor, caps.vkCmdSetLineWidth, caps.vkCmdSetDepthBias, caps.vkCmdSetBlendConstants, caps.vkCmdSetDepthBounds, caps.vkCmdSetStencilCompareMask, caps.vkCmdSetStencilWriteMask, caps.vkCmdSetStencilReference, caps.vkCmdBindDescriptorSets, caps.vkCmdBindIndexBuffer, caps.vkCmdBindVertexBuffers, caps.vkCmdDraw, caps.vkCmdDrawIndexed, caps.vkCmdDrawIndirect, caps.vkCmdDrawIndexedIndirect, caps.vkCmdDispatch, caps.vkCmdDispatchIndirect, caps.vkCmdCopyBuffer, caps.vkCmdCopyImage, caps.vkCmdBlitImage, caps.vkCmdCopyBufferToImage, caps.vkCmdCopyImageToBuffer, caps.vkCmdUpdateBuffer, caps.vkCmdFillBuffer, caps.vkCmdClearColorImage, caps.vkCmdClearDepthStencilImage, caps.vkCmdClearAttachments, caps.vkCmdResolveImage, caps.vkCmdSetEvent, caps.vkCmdResetEvent, caps.vkCmdWaitEvents, caps.vkCmdPipelineBarrier, caps.vkCmdBeginQuery, caps.vkCmdEndQuery, caps.vkCmdResetQueryPool, caps.vkCmdWriteTimestamp, caps.vkCmdCopyQueryPoolResults, caps.vkCmdPushConstants, caps.vkCmdBeginRenderPass, caps.vkCmdNextSubpass, caps.vkCmdEndRenderPass, caps.vkCmdExecuteCommands ); } // --- [ vkCreateInstance ] --- /** Unsafe version of: {@link #vkCreateInstance CreateInstance} */ public static int nvkCreateInstance(long pCreateInfo, long pAllocator, long pInstance) { long __functionAddress = VK.getGlobalCommands().vkCreateInstance; if (CHECKS) { VkInstanceCreateInfo.validate(pCreateInfo); if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPPPI(__functionAddress, pCreateInfo, pAllocator, pInstance); } /** * Create a new Vulkan instance. * *
C Specification
* *

To create an instance object, call:

* *
     * VkResult vkCreateInstance(
     *     const VkInstanceCreateInfo*                 pCreateInfo,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkInstance*                                 pInstance);
* *
Description
* *

{@link #vkCreateInstance CreateInstance} verifies that the requested layers exist. If not, {@link #vkCreateInstance CreateInstance} will return {@link #VK_ERROR_LAYER_NOT_PRESENT ERROR_LAYER_NOT_PRESENT}. Next {@link #vkCreateInstance CreateInstance} verifies that the requested extensions are supported (e.g. in the implementation or in any enabled instance layer) and if any requested extension is not supported, {@link #vkCreateInstance CreateInstance} must return {@link #VK_ERROR_EXTENSION_NOT_PRESENT ERROR_EXTENSION_NOT_PRESENT}. After verifying and enabling the instance layers and extensions the {@code VkInstance} object is created and returned to the application. If a requested extension is only supported by a layer, both the layer and the extension need to be specified at {@link #vkCreateInstance CreateInstance} time for the creation to succeed.

* *
Valid Usage
* *
    *
  • All required extensions for each extension in the {@link VkInstanceCreateInfo}{@code ::ppEnabledExtensionNames} list must also be present in that list.
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code pCreateInfo} must be a valid pointer to a valid {@link VkInstanceCreateInfo} structure
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pInstance} must be a valid pointer to a {@code VkInstance} handle
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_INITIALIZATION_FAILED ERROR_INITIALIZATION_FAILED}
  • *
  • {@link #VK_ERROR_LAYER_NOT_PRESENT ERROR_LAYER_NOT_PRESENT}
  • *
  • {@link #VK_ERROR_EXTENSION_NOT_PRESENT ERROR_EXTENSION_NOT_PRESENT}
  • *
  • {@link #VK_ERROR_INCOMPATIBLE_DRIVER ERROR_INCOMPATIBLE_DRIVER}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkInstanceCreateInfo}

* * @param pCreateInfo points to an instance of {@link VkInstanceCreateInfo} controlling creation of the instance. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pInstance points a {@code VkInstance} handle in which the resulting instance is returned. */ @NativeType("VkResult") public static int vkCreateInstance(@NativeType("const VkInstanceCreateInfo *") VkInstanceCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkInstance *") PointerBuffer pInstance) { if (CHECKS) { check(pInstance, 1); } return nvkCreateInstance(pCreateInfo.address(), memAddressSafe(pAllocator), memAddress(pInstance)); } // --- [ vkDestroyInstance ] --- /** Unsafe version of: {@link #vkDestroyInstance DestroyInstance} */ public static void nvkDestroyInstance(VkInstance instance, long pAllocator) { long __functionAddress = instance.getCapabilities().vkDestroyInstance; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPPV(__functionAddress, instance.address(), pAllocator); } /** * Destroy an instance of Vulkan. * *
C Specification
* *

To destroy an instance, call:

* *
     * void vkDestroyInstance(
     *     VkInstance                                  instance,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Valid Usage
* *
    *
  • All child objects created using {@code instance} must have been destroyed prior to destroying {@code instance}
  • *
  • If {@link VkAllocationCallbacks} were provided when {@code instance} was created, a compatible set of callbacks must be provided here
  • *
  • If no {@link VkAllocationCallbacks} were provided when {@code instance} was created, {@code pAllocator} must be {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • If {@code instance} is not {@code NULL}, {@code instance} must be a valid {@code VkInstance} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code instance} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param instance the handle of the instance to destroy. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkDestroyInstance(VkInstance instance, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator) { nvkDestroyInstance(instance, memAddressSafe(pAllocator)); } // --- [ vkEnumeratePhysicalDevices ] --- /** * Unsafe version of: {@link #vkEnumeratePhysicalDevices EnumeratePhysicalDevices} * * @param pPhysicalDeviceCount a pointer to an integer related to the number of physical devices available or queried, as described below. */ public static int nvkEnumeratePhysicalDevices(VkInstance instance, long pPhysicalDeviceCount, long pPhysicalDevices) { long __functionAddress = instance.getCapabilities().vkEnumeratePhysicalDevices; return callPPPI(__functionAddress, instance.address(), pPhysicalDeviceCount, pPhysicalDevices); } /** * Enumerates the physical devices accessible to a Vulkan instance. * *
C Specification
* *

To retrieve a list of physical device objects representing the physical devices installed in the system, call:

* *
     * VkResult vkEnumeratePhysicalDevices(
     *     VkInstance                                  instance,
     *     uint32_t*                                   pPhysicalDeviceCount,
     *     VkPhysicalDevice*                           pPhysicalDevices);
* *
Description
* *

If {@code pPhysicalDevices} is {@code NULL}, then the number of physical devices available is returned in {@code pPhysicalDeviceCount}. Otherwise, {@code pPhysicalDeviceCount} must point to a variable set by the user to the number of elements in the {@code pPhysicalDevices} array, and on return the variable is overwritten with the number of handles actually written to {@code pPhysicalDevices}. If {@code pPhysicalDeviceCount} is less than the number of physical devices available, at most {@code pPhysicalDeviceCount} structures will be written. If {@code pPhysicalDeviceCount} is smaller than the number of physical devices available, {@link #VK_INCOMPLETE INCOMPLETE} will be returned instead of {@link #VK_SUCCESS SUCCESS}, to indicate that not all the available physical devices were returned.

* *
Valid Usage (Implicit)
* *
    *
  • {@code instance} must be a valid {@code VkInstance} handle
  • *
  • {@code pPhysicalDeviceCount} must be a valid pointer to a {@code uint32_t} value
  • *
  • If the value referenced by {@code pPhysicalDeviceCount} is not 0, and {@code pPhysicalDevices} is not {@code NULL}, {@code pPhysicalDevices} must be a valid pointer to an array of {@code pPhysicalDeviceCount} {@code VkPhysicalDevice} handles
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
  • {@link #VK_INCOMPLETE INCOMPLETE}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_INITIALIZATION_FAILED ERROR_INITIALIZATION_FAILED}
  • *
*
* * @param instance a handle to a Vulkan instance previously created with {@link #vkCreateInstance CreateInstance}. * @param pPhysicalDeviceCount a pointer to an integer related to the number of physical devices available or queried, as described below. * @param pPhysicalDevices either {@code NULL} or a pointer to an array of {@code VkPhysicalDevice} handles. */ @NativeType("VkResult") public static int vkEnumeratePhysicalDevices(VkInstance instance, @NativeType("uint32_t *") IntBuffer pPhysicalDeviceCount, @Nullable @NativeType("VkPhysicalDevice *") PointerBuffer pPhysicalDevices) { if (CHECKS) { check(pPhysicalDeviceCount, 1); checkSafe(pPhysicalDevices, pPhysicalDeviceCount.get(pPhysicalDeviceCount.position())); } return nvkEnumeratePhysicalDevices(instance, memAddress(pPhysicalDeviceCount), memAddressSafe(pPhysicalDevices)); } // --- [ vkGetPhysicalDeviceFeatures ] --- /** Unsafe version of: {@link #vkGetPhysicalDeviceFeatures GetPhysicalDeviceFeatures} */ public static void nvkGetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice, long pFeatures) { long __functionAddress = physicalDevice.getCapabilities().vkGetPhysicalDeviceFeatures; callPPV(__functionAddress, physicalDevice.address(), pFeatures); } /** * Reports capabilities of a physical device. * *
C Specification
* *

To query supported features, call:

* *
     * void vkGetPhysicalDeviceFeatures(
     *     VkPhysicalDevice                            physicalDevice,
     *     VkPhysicalDeviceFeatures*                   pFeatures);
* *
Valid Usage (Implicit)
* *
    *
  • {@code physicalDevice} must be a valid {@code VkPhysicalDevice} handle
  • *
  • {@code pFeatures} must be a valid pointer to a {@link VkPhysicalDeviceFeatures} structure
  • *
* *
See Also
* *

{@link VkPhysicalDeviceFeatures}

* * @param physicalDevice the physical device from which to query the supported features. * @param pFeatures a pointer to a {@link VkPhysicalDeviceFeatures} structure in which the physical device features are returned. For each feature, a value of {@link #VK_TRUE TRUE} indicates that the feature is supported on this physical device, and {@link #VK_FALSE FALSE} indicates that the feature is not supported. */ public static void vkGetPhysicalDeviceFeatures(VkPhysicalDevice physicalDevice, @NativeType("VkPhysicalDeviceFeatures *") VkPhysicalDeviceFeatures pFeatures) { nvkGetPhysicalDeviceFeatures(physicalDevice, pFeatures.address()); } // --- [ vkGetPhysicalDeviceFormatProperties ] --- /** Unsafe version of: {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties} */ public static void nvkGetPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice, int format, long pFormatProperties) { long __functionAddress = physicalDevice.getCapabilities().vkGetPhysicalDeviceFormatProperties; callPPV(__functionAddress, physicalDevice.address(), format, pFormatProperties); } /** * Lists physical device's format capabilities. * *
C Specification
* *

To query supported format features which are properties of the physical device, call:

* *
     * void vkGetPhysicalDeviceFormatProperties(
     *     VkPhysicalDevice                            physicalDevice,
     *     VkFormat                                    format,
     *     VkFormatProperties*                         pFormatProperties);
* *
Valid Usage (Implicit)
* *
    *
  • {@code physicalDevice} must be a valid {@code VkPhysicalDevice} handle
  • *
  • {@code format} must be a valid {@code VkFormat} value
  • *
  • {@code pFormatProperties} must be a valid pointer to a {@link VkFormatProperties} structure
  • *
* *
See Also
* *

{@link VkFormatProperties}

* * @param physicalDevice the physical device from which to query the format properties. * @param format the format whose properties are queried. * @param pFormatProperties a pointer to a {@link VkFormatProperties} structure in which physical device properties for {@code format} are returned. */ public static void vkGetPhysicalDeviceFormatProperties(VkPhysicalDevice physicalDevice, @NativeType("VkFormat") int format, @NativeType("VkFormatProperties *") VkFormatProperties pFormatProperties) { nvkGetPhysicalDeviceFormatProperties(physicalDevice, format, pFormatProperties.address()); } // --- [ vkGetPhysicalDeviceImageFormatProperties ] --- /** Unsafe version of: {@link #vkGetPhysicalDeviceImageFormatProperties GetPhysicalDeviceImageFormatProperties} */ public static int nvkGetPhysicalDeviceImageFormatProperties(VkPhysicalDevice physicalDevice, int format, int type, int tiling, int usage, int flags, long pImageFormatProperties) { long __functionAddress = physicalDevice.getCapabilities().vkGetPhysicalDeviceImageFormatProperties; return callPPI(__functionAddress, physicalDevice.address(), format, type, tiling, usage, flags, pImageFormatProperties); } /** * Lists physical device's image format capabilities. * *
C Specification
* *

To query additional capabilities specific to image types, call:

* *
     * VkResult vkGetPhysicalDeviceImageFormatProperties(
     *     VkPhysicalDevice                            physicalDevice,
     *     VkFormat                                    format,
     *     VkImageType                                 type,
     *     VkImageTiling                               tiling,
     *     VkImageUsageFlags                           usage,
     *     VkImageCreateFlags                          flags,
     *     VkImageFormatProperties*                    pImageFormatProperties);
* *
Description
* *

The {@code format}, {@code type}, {@code tiling}, {@code usage}, and {@code flags} parameters correspond to parameters that would be consumed by {@link #vkCreateImage CreateImage} (as members of {@link VkImageCreateInfo}).

* *

If {@code format} is not a supported image format, or if the combination of {@code format}, {@code type}, {@code tiling}, {@code usage}, and {@code flags} is not supported for images, then {@link #vkGetPhysicalDeviceImageFormatProperties GetPhysicalDeviceImageFormatProperties} returns {@link #VK_ERROR_FORMAT_NOT_SUPPORTED ERROR_FORMAT_NOT_SUPPORTED}.

* *

The limitations on an image format that are reported by {@link #vkGetPhysicalDeviceImageFormatProperties GetPhysicalDeviceImageFormatProperties} have the following property: if {@code usage1} and {@code usage2} of type {@code VkImageUsageFlags} are such that the bits set in {@code usage1} are a subset of the bits set in {@code usage2}, and {@code flags1} and {@code flags2} of type {@code VkImageCreateFlags} are such that the bits set in {@code flags1} are a subset of the bits set in {@code flags2}, then the limitations for {@code usage1} and {@code flags1} must be no more strict than the limitations for {@code usage2} and {@code flags2}, for all values of {@code format}, {@code type}, and {@code tiling}.

* *
Valid Usage (Implicit)
* *
    *
  • {@code physicalDevice} must be a valid {@code VkPhysicalDevice} handle
  • *
  • {@code format} must be a valid {@code VkFormat} value
  • *
  • {@code type} must be a valid {@code VkImageType} value
  • *
  • {@code tiling} must be a valid {@code VkImageTiling} value
  • *
  • {@code usage} must be a valid combination of {@code VkImageUsageFlagBits} values
  • *
  • {@code usage} must not be 0
  • *
  • {@code flags} must be a valid combination of {@code VkImageCreateFlagBits} values
  • *
  • {@code pImageFormatProperties} must be a valid pointer to a {@link VkImageFormatProperties} structure
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_FORMAT_NOT_SUPPORTED ERROR_FORMAT_NOT_SUPPORTED}
  • *
*
* *
See Also
* *

{@link VkImageFormatProperties}

* * @param physicalDevice the physical device from which to query the image capabilities. * @param format a {@code VkFormat} value specifying the image format, corresponding to {@link VkImageCreateInfo}{@code ::format}. * @param type a {@code VkImageType} value specifying the image type, corresponding to {@link VkImageCreateInfo}{@code ::imageType}. * @param tiling a {@code VkImageTiling} value specifying the image tiling, corresponding to {@link VkImageCreateInfo}{@code ::tiling}. * @param usage a bitmask of {@code VkImageUsageFlagBits} specifying the intended usage of the image, corresponding to {@link VkImageCreateInfo}{@code ::usage}. * @param flags a bitmask of {@code VkImageCreateFlagBits} specifying additional parameters of the image, corresponding to {@link VkImageCreateInfo}{@code ::flags}. * @param pImageFormatProperties points to an instance of the {@link VkImageFormatProperties} structure in which capabilities are returned. */ @NativeType("VkResult") public static int vkGetPhysicalDeviceImageFormatProperties(VkPhysicalDevice physicalDevice, @NativeType("VkFormat") int format, @NativeType("VkImageType") int type, @NativeType("VkImageTiling") int tiling, @NativeType("VkImageUsageFlags") int usage, @NativeType("VkImageCreateFlags") int flags, @NativeType("VkImageFormatProperties *") VkImageFormatProperties pImageFormatProperties) { return nvkGetPhysicalDeviceImageFormatProperties(physicalDevice, format, type, tiling, usage, flags, pImageFormatProperties.address()); } // --- [ vkGetPhysicalDeviceProperties ] --- /** Unsafe version of: {@link #vkGetPhysicalDeviceProperties GetPhysicalDeviceProperties} */ public static void nvkGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice, long pProperties) { long __functionAddress = physicalDevice.getCapabilities().vkGetPhysicalDeviceProperties; callPPV(__functionAddress, physicalDevice.address(), pProperties); } /** * Returns properties of a physical device. * *
C Specification
* *

To query general properties of physical devices once enumerated, call:

* *
     * void vkGetPhysicalDeviceProperties(
     *     VkPhysicalDevice                            physicalDevice,
     *     VkPhysicalDeviceProperties*                 pProperties);
* *
Valid Usage (Implicit)
* *
    *
  • {@code physicalDevice} must be a valid {@code VkPhysicalDevice} handle
  • *
  • {@code pProperties} must be a valid pointer to a {@link VkPhysicalDeviceProperties} structure
  • *
* *
See Also
* *

{@link VkPhysicalDeviceProperties}

* * @param physicalDevice the handle to the physical device whose properties will be queried. * @param pProperties points to an instance of the {@link VkPhysicalDeviceProperties} structure, that will be filled with returned information. */ public static void vkGetPhysicalDeviceProperties(VkPhysicalDevice physicalDevice, @NativeType("VkPhysicalDeviceProperties *") VkPhysicalDeviceProperties pProperties) { nvkGetPhysicalDeviceProperties(physicalDevice, pProperties.address()); } // --- [ vkGetPhysicalDeviceQueueFamilyProperties ] --- /** * Unsafe version of: {@link #vkGetPhysicalDeviceQueueFamilyProperties GetPhysicalDeviceQueueFamilyProperties} * * @param pQueueFamilyPropertyCount a pointer to an integer related to the number of queue families available or queried, as described below. */ public static void nvkGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, long pQueueFamilyPropertyCount, long pQueueFamilyProperties) { long __functionAddress = physicalDevice.getCapabilities().vkGetPhysicalDeviceQueueFamilyProperties; callPPPV(__functionAddress, physicalDevice.address(), pQueueFamilyPropertyCount, pQueueFamilyProperties); } /** * Reports properties of the queues of the specified physical device. * *
C Specification
* *

To query properties of queues available on a physical device, call:

* *
     * void vkGetPhysicalDeviceQueueFamilyProperties(
     *     VkPhysicalDevice                            physicalDevice,
     *     uint32_t*                                   pQueueFamilyPropertyCount,
     *     VkQueueFamilyProperties*                    pQueueFamilyProperties);
* *
Description
* *

If {@code pQueueFamilyProperties} is {@code NULL}, then the number of queue families available is returned in {@code pQueueFamilyPropertyCount}. Otherwise, {@code pQueueFamilyPropertyCount} must point to a variable set by the user to the number of elements in the {@code pQueueFamilyProperties} array, and on return the variable is overwritten with the number of structures actually written to {@code pQueueFamilyProperties}. If {@code pQueueFamilyPropertyCount} is less than the number of queue families available, at most {@code pQueueFamilyPropertyCount} structures will be written.

* *
Valid Usage (Implicit)
* *
    *
  • {@code physicalDevice} must be a valid {@code VkPhysicalDevice} handle
  • *
  • {@code pQueueFamilyPropertyCount} must be a valid pointer to a {@code uint32_t} value
  • *
  • If the value referenced by {@code pQueueFamilyPropertyCount} is not 0, and {@code pQueueFamilyProperties} is not {@code NULL}, {@code pQueueFamilyProperties} must be a valid pointer to an array of {@code pQueueFamilyPropertyCount} {@link VkQueueFamilyProperties} structures
  • *
* *
See Also
* *

{@link VkQueueFamilyProperties}

* * @param physicalDevice the handle to the physical device whose properties will be queried. * @param pQueueFamilyPropertyCount a pointer to an integer related to the number of queue families available or queried, as described below. * @param pQueueFamilyProperties either {@code NULL} or a pointer to an array of {@link VkQueueFamilyProperties} structures. */ public static void vkGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, @NativeType("uint32_t *") IntBuffer pQueueFamilyPropertyCount, @Nullable @NativeType("VkQueueFamilyProperties *") VkQueueFamilyProperties.Buffer pQueueFamilyProperties) { if (CHECKS) { check(pQueueFamilyPropertyCount, 1); checkSafe(pQueueFamilyProperties, pQueueFamilyPropertyCount.get(pQueueFamilyPropertyCount.position())); } nvkGetPhysicalDeviceQueueFamilyProperties(physicalDevice, memAddress(pQueueFamilyPropertyCount), memAddressSafe(pQueueFamilyProperties)); } // --- [ vkGetPhysicalDeviceMemoryProperties ] --- /** Unsafe version of: {@link #vkGetPhysicalDeviceMemoryProperties GetPhysicalDeviceMemoryProperties} */ public static void nvkGetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, long pMemoryProperties) { long __functionAddress = physicalDevice.getCapabilities().vkGetPhysicalDeviceMemoryProperties; callPPV(__functionAddress, physicalDevice.address(), pMemoryProperties); } /** * Reports memory information for the specified physical device. * *
C Specification
* *

To query memory properties, call:

* *
     * void vkGetPhysicalDeviceMemoryProperties(
     *     VkPhysicalDevice                            physicalDevice,
     *     VkPhysicalDeviceMemoryProperties*           pMemoryProperties);
* *
Valid Usage (Implicit)
* *
    *
  • {@code physicalDevice} must be a valid {@code VkPhysicalDevice} handle
  • *
  • {@code pMemoryProperties} must be a valid pointer to a {@link VkPhysicalDeviceMemoryProperties} structure
  • *
* *
See Also
* *

{@link VkPhysicalDeviceMemoryProperties}

* * @param physicalDevice the handle to the device to query. * @param pMemoryProperties points to an instance of {@link VkPhysicalDeviceMemoryProperties} structure in which the properties are returned. */ public static void vkGetPhysicalDeviceMemoryProperties(VkPhysicalDevice physicalDevice, @NativeType("VkPhysicalDeviceMemoryProperties *") VkPhysicalDeviceMemoryProperties pMemoryProperties) { nvkGetPhysicalDeviceMemoryProperties(physicalDevice, pMemoryProperties.address()); } // --- [ vkGetInstanceProcAddr ] --- /** Unsafe version of: {@link #vkGetInstanceProcAddr GetInstanceProcAddr} */ public static long nvkGetInstanceProcAddr(VkInstance instance, long pName) { long __functionAddress = VK.getGlobalCommands().vkGetInstanceProcAddr; return callPPP(__functionAddress, instance.address(), pName); } /** * Return a function pointer for a command. * *
C Specification
* *

Vulkan commands are not necessarily exposed statically on a platform. Function pointers for all Vulkan commands can be obtained with the command:

* *
     * PFN_vkVoidFunction vkGetInstanceProcAddr(
     *     VkInstance                                  instance,
     *     const char*                                 pName);
* *
Description
* *

{@link #vkGetInstanceProcAddr GetInstanceProcAddr} itself is obtained in a platform- and loader- specific manner. Typically, the loader library will export this command as a function symbol, so applications can link against the loader library, or load it dynamically and look up the symbol using platform-specific APIs. Loaders are encouraged to export function symbols for all other core Vulkan commands as well; if this is done, then applications that use only the core Vulkan commands have no need to use {@link #vkGetInstanceProcAddr GetInstanceProcAddr}.

* *

The table below defines the various use cases for {@link #vkGetInstanceProcAddr GetInstanceProcAddr} and expected return value ("{@code fp}" is "{@code function pointer}") for each case.

* *

The returned function pointer is of type {@code PFN_vkVoidFunction}, and must be cast to the type of the command being queried.

* *
vkGetInstanceProcAddr behavior
* * * * * * * * * * * * * * * *
{@code instance}{@code pName}return value
*{@code NULL}undefined
invalid instance*undefined
{@code NULL}{@link #vkEnumerateInstanceExtensionProperties EnumerateInstanceExtensionProperties}fp
{@code NULL}{@link #vkEnumerateInstanceLayerProperties EnumerateInstanceLayerProperties}fp
{@code NULL}{@link #vkCreateInstance CreateInstance}fp
{@code NULL}* (any {@code pName} not covered above){@code NULL}
instancecore Vulkan commandfp1
instanceenabled instance extension commands for {@code instance}fp1
instanceavailable device extension2 commands for {@code instance}fp1
instance* (any {@code pName} not covered above){@code NULL}
* *
*
1
*
The returned function pointer must only be called with a dispatchable object (the first parameter) that is {@code instance} or a child of {@code instance}. e.g. {@code VkInstance}, {@code VkPhysicalDevice}, {@code VkDevice}, {@code VkQueue}, or {@code VkCommandBuffer}.
*
2
*
An “available extension” is an extension function supported by any of the loader, driver or layer.
*
* *
Valid Usage (Implicit)
* *
    *
  • If {@code instance} is not {@code NULL}, {@code instance} must be a valid {@code VkInstance} handle
  • *
  • {@code pName} must be a null-terminated UTF-8 string
  • *
* *
See Also
* *

{@code PFN_vkVoidFunction}

* * @param instance the instance that the function pointer will be compatible with, or {@code NULL} for commands not dependent on any instance. * @param pName the name of the command to obtain. */ @NativeType("PFN_vkVoidFunction") public static long vkGetInstanceProcAddr(VkInstance instance, @NativeType("const char *") ByteBuffer pName) { if (CHECKS) { checkNT1(pName); } return nvkGetInstanceProcAddr(instance, memAddress(pName)); } /** * Return a function pointer for a command. * *
C Specification
* *

Vulkan commands are not necessarily exposed statically on a platform. Function pointers for all Vulkan commands can be obtained with the command:

* *
     * PFN_vkVoidFunction vkGetInstanceProcAddr(
     *     VkInstance                                  instance,
     *     const char*                                 pName);
* *
Description
* *

{@link #vkGetInstanceProcAddr GetInstanceProcAddr} itself is obtained in a platform- and loader- specific manner. Typically, the loader library will export this command as a function symbol, so applications can link against the loader library, or load it dynamically and look up the symbol using platform-specific APIs. Loaders are encouraged to export function symbols for all other core Vulkan commands as well; if this is done, then applications that use only the core Vulkan commands have no need to use {@link #vkGetInstanceProcAddr GetInstanceProcAddr}.

* *

The table below defines the various use cases for {@link #vkGetInstanceProcAddr GetInstanceProcAddr} and expected return value ("{@code fp}" is "{@code function pointer}") for each case.

* *

The returned function pointer is of type {@code PFN_vkVoidFunction}, and must be cast to the type of the command being queried.

* *
vkGetInstanceProcAddr behavior
* * * * * * * * * * * * * * * *
{@code instance}{@code pName}return value
*{@code NULL}undefined
invalid instance*undefined
{@code NULL}{@link #vkEnumerateInstanceExtensionProperties EnumerateInstanceExtensionProperties}fp
{@code NULL}{@link #vkEnumerateInstanceLayerProperties EnumerateInstanceLayerProperties}fp
{@code NULL}{@link #vkCreateInstance CreateInstance}fp
{@code NULL}* (any {@code pName} not covered above){@code NULL}
instancecore Vulkan commandfp1
instanceenabled instance extension commands for {@code instance}fp1
instanceavailable device extension2 commands for {@code instance}fp1
instance* (any {@code pName} not covered above){@code NULL}
* *
*
1
*
The returned function pointer must only be called with a dispatchable object (the first parameter) that is {@code instance} or a child of {@code instance}. e.g. {@code VkInstance}, {@code VkPhysicalDevice}, {@code VkDevice}, {@code VkQueue}, or {@code VkCommandBuffer}.
*
2
*
An “available extension” is an extension function supported by any of the loader, driver or layer.
*
* *
Valid Usage (Implicit)
* *
    *
  • If {@code instance} is not {@code NULL}, {@code instance} must be a valid {@code VkInstance} handle
  • *
  • {@code pName} must be a null-terminated UTF-8 string
  • *
* *
See Also
* *

{@code PFN_vkVoidFunction}

* * @param instance the instance that the function pointer will be compatible with, or {@code NULL} for commands not dependent on any instance. * @param pName the name of the command to obtain. */ @NativeType("PFN_vkVoidFunction") public static long vkGetInstanceProcAddr(VkInstance instance, @NativeType("const char *") CharSequence pName) { MemoryStack stack = stackGet(); int stackPointer = stack.getPointer(); try { ByteBuffer pNameEncoded = stack.ASCII(pName); return nvkGetInstanceProcAddr(instance, memAddress(pNameEncoded)); } finally { stack.setPointer(stackPointer); } } // --- [ vkGetDeviceProcAddr ] --- /** Unsafe version of: {@link #vkGetDeviceProcAddr GetDeviceProcAddr} */ public static long nvkGetDeviceProcAddr(VkDevice device, long pName) { long __functionAddress = device.getCapabilities().vkGetDeviceProcAddr; return callPPP(__functionAddress, device.address(), pName); } /** * Return a function pointer for a command. * *
C Specification
* *

In order to support systems with multiple Vulkan implementations comprising heterogeneous collections of hardware and software, the function pointers returned by {@link #vkGetInstanceProcAddr GetInstanceProcAddr} may point to dispatch code, which calls a different real implementation for different {@code VkDevice} objects (and objects created from them). The overhead of this internal dispatch can be avoided by obtaining device-specific function pointers for any commands that use a device or device-child object as their dispatchable object. Such function pointers can be obtained with the command:

* *
     * PFN_vkVoidFunction vkGetDeviceProcAddr(
     *     VkDevice                                    device,
     *     const char*                                 pName);
* *
Description
* *

The returned function pointer is of type {@code PFN_vkVoidFunction}, and must be cast to the type of the command being queried.

* *
vkGetDeviceProcAddr behavior
* * * * * * * * * * * *
{@code device}{@code pName}return value
{@code NULL}*undefined
invalid device*undefined
device{@code NULL}undefined
devicecore Vulkan commandfp1
deviceenabled extension commandsfp1
device* (any {@code pName} not covered above){@code NULL}
* *
*
1
*
The returned function pointer must only be called with a dispatchable object (the first parameter) that is {@code device} or a child of {@code device}. e.g. {@code VkDevice}, {@code VkQueue}, or {@code VkCommandBuffer}.
*
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pName} must be a null-terminated UTF-8 string
  • *
* *
See Also
* *

{@code PFN_vkVoidFunction}

* * @param device * @param pName */ @NativeType("PFN_vkVoidFunction") public static long vkGetDeviceProcAddr(VkDevice device, @NativeType("const char *") ByteBuffer pName) { if (CHECKS) { checkNT1(pName); } return nvkGetDeviceProcAddr(device, memAddress(pName)); } /** * Return a function pointer for a command. * *
C Specification
* *

In order to support systems with multiple Vulkan implementations comprising heterogeneous collections of hardware and software, the function pointers returned by {@link #vkGetInstanceProcAddr GetInstanceProcAddr} may point to dispatch code, which calls a different real implementation for different {@code VkDevice} objects (and objects created from them). The overhead of this internal dispatch can be avoided by obtaining device-specific function pointers for any commands that use a device or device-child object as their dispatchable object. Such function pointers can be obtained with the command:

* *
     * PFN_vkVoidFunction vkGetDeviceProcAddr(
     *     VkDevice                                    device,
     *     const char*                                 pName);
* *
Description
* *

The returned function pointer is of type {@code PFN_vkVoidFunction}, and must be cast to the type of the command being queried.

* *
vkGetDeviceProcAddr behavior
* * * * * * * * * * * *
{@code device}{@code pName}return value
{@code NULL}*undefined
invalid device*undefined
device{@code NULL}undefined
devicecore Vulkan commandfp1
deviceenabled extension commandsfp1
device* (any {@code pName} not covered above){@code NULL}
* *
*
1
*
The returned function pointer must only be called with a dispatchable object (the first parameter) that is {@code device} or a child of {@code device}. e.g. {@code VkDevice}, {@code VkQueue}, or {@code VkCommandBuffer}.
*
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pName} must be a null-terminated UTF-8 string
  • *
* *
See Also
* *

{@code PFN_vkVoidFunction}

* * @param device * @param pName */ @NativeType("PFN_vkVoidFunction") public static long vkGetDeviceProcAddr(VkDevice device, @NativeType("const char *") CharSequence pName) { MemoryStack stack = stackGet(); int stackPointer = stack.getPointer(); try { ByteBuffer pNameEncoded = stack.ASCII(pName); return nvkGetDeviceProcAddr(device, memAddress(pNameEncoded)); } finally { stack.setPointer(stackPointer); } } // --- [ vkCreateDevice ] --- /** Unsafe version of: {@link #vkCreateDevice CreateDevice} */ public static int nvkCreateDevice(VkPhysicalDevice physicalDevice, long pCreateInfo, long pAllocator, long pDevice) { long __functionAddress = physicalDevice.getCapabilities().vkCreateDevice; if (CHECKS) { VkDeviceCreateInfo.validate(pCreateInfo); if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPPPPI(__functionAddress, physicalDevice.address(), pCreateInfo, pAllocator, pDevice); } /** * Create a new device instance. * *
C Specification
* *

A logical device is created as a connection to a physical device. To create a logical device, call:

* *
     * VkResult vkCreateDevice(
     *     VkPhysicalDevice                            physicalDevice,
     *     const VkDeviceCreateInfo*                   pCreateInfo,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkDevice*                                   pDevice);
* *
Description
* *

{@link #vkCreateDevice CreateDevice} verifies that extensions and features requested in the {@code ppEnabledExtensionNames} and {@code pEnabledFeatures} members of {@code pCreateInfo}, respectively, are supported by the implementation. If any requested extension is not supported, {@link #vkCreateDevice CreateDevice} must return {@link #VK_ERROR_EXTENSION_NOT_PRESENT ERROR_EXTENSION_NOT_PRESENT}. If any requested feature is not supported, {@link #vkCreateDevice CreateDevice} must return {@link #VK_ERROR_FEATURE_NOT_PRESENT ERROR_FEATURE_NOT_PRESENT}. Support for extensions can be checked before creating a device by querying {@link #vkEnumerateDeviceExtensionProperties EnumerateDeviceExtensionProperties}. Support for features can similarly be checked by querying {@link #vkGetPhysicalDeviceFeatures GetPhysicalDeviceFeatures}.

* *

After verifying and enabling the extensions the {@code VkDevice} object is created and returned to the application. If a requested extension is only supported by a layer, both the layer and the extension need to be specified at {@link #vkCreateInstance CreateInstance} time for the creation to succeed.

* *

Multiple logical devices can be created from the same physical device. Logical device creation may fail due to lack of device-specific resources (in addition to the other errors). If that occurs, {@link #vkCreateDevice CreateDevice} will return {@link #VK_ERROR_TOO_MANY_OBJECTS ERROR_TOO_MANY_OBJECTS}.

* *
Valid Usage
* *
    *
  • All required extensions for each extension in the {@link VkDeviceCreateInfo}{@code ::ppEnabledExtensionNames} list must also be present in that list.
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code physicalDevice} must be a valid {@code VkPhysicalDevice} handle
  • *
  • {@code pCreateInfo} must be a valid pointer to a valid {@link VkDeviceCreateInfo} structure
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pDevice} must be a valid pointer to a {@code VkDevice} handle
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_INITIALIZATION_FAILED ERROR_INITIALIZATION_FAILED}
  • *
  • {@link #VK_ERROR_EXTENSION_NOT_PRESENT ERROR_EXTENSION_NOT_PRESENT}
  • *
  • {@link #VK_ERROR_FEATURE_NOT_PRESENT ERROR_FEATURE_NOT_PRESENT}
  • *
  • {@link #VK_ERROR_TOO_MANY_OBJECTS ERROR_TOO_MANY_OBJECTS}
  • *
  • {@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkDeviceCreateInfo}

* * @param physicalDevice must be one of the device handles returned from a call to {@link #vkEnumeratePhysicalDevices EnumeratePhysicalDevices} (see Physical Device Enumeration). * @param pCreateInfo a pointer to a {@link VkDeviceCreateInfo} structure containing information about how to create the device. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pDevice points to a handle in which the created {@code VkDevice} is returned. */ @NativeType("VkResult") public static int vkCreateDevice(VkPhysicalDevice physicalDevice, @NativeType("const VkDeviceCreateInfo *") VkDeviceCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkDevice *") PointerBuffer pDevice) { if (CHECKS) { check(pDevice, 1); } return nvkCreateDevice(physicalDevice, pCreateInfo.address(), memAddressSafe(pAllocator), memAddress(pDevice)); } // --- [ vkDestroyDevice ] --- /** Unsafe version of: {@link #vkDestroyDevice DestroyDevice} */ public static void nvkDestroyDevice(VkDevice device, long pAllocator) { long __functionAddress = device.getCapabilities().vkDestroyDevice; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPPV(__functionAddress, device.address(), pAllocator); } /** * Destroy a logical device. * *
C Specification
* *

To destroy a device, call:

* *
     * void vkDestroyDevice(
     *     VkDevice                                    device,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Description
* *

To ensure that no work is active on the device, {@link #vkDeviceWaitIdle DeviceWaitIdle} can be used to gate the destruction of the device. Prior to destroying a device, an application is responsible for destroying/freeing any Vulkan objects that were created using that device as the first parameter of the corresponding ftext:vkCreate* or ftext:vkAllocate* command.

* *
Note
* *

The lifetime of each of these objects is bound by the lifetime of the {@code VkDevice} object. Therefore, to avoid resource leaks, it is critical that an application explicitly free all of these resources prior to calling {@link #vkDestroyDevice DestroyDevice}.

*
* *
Valid Usage
* *
    *
  • All child objects created on {@code device} must have been destroyed prior to destroying {@code device}
  • *
  • If {@link VkAllocationCallbacks} were provided when {@code device} was created, a compatible set of callbacks must be provided here
  • *
  • If no {@link VkAllocationCallbacks} were provided when {@code device} was created, {@code pAllocator} must be {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • If {@code device} is not {@code NULL}, {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code device} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param device the logical device to destroy. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkDestroyDevice(VkDevice device, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator) { nvkDestroyDevice(device, memAddressSafe(pAllocator)); } // --- [ vkEnumerateInstanceExtensionProperties ] --- /** * Unsafe version of: {@link #vkEnumerateInstanceExtensionProperties EnumerateInstanceExtensionProperties} * * @param pPropertyCount a pointer to an integer related to the number of extension properties available or queried, as described below. */ public static int nvkEnumerateInstanceExtensionProperties(long pLayerName, long pPropertyCount, long pProperties) { long __functionAddress = VK.getGlobalCommands().vkEnumerateInstanceExtensionProperties; return callPPPI(__functionAddress, pLayerName, pPropertyCount, pProperties); } /** * Returns up to requested number of global extension properties. * *
C Specification
* *

To query the available instance extensions, call:

* *
     * VkResult vkEnumerateInstanceExtensionProperties(
     *     const char*                                 pLayerName,
     *     uint32_t*                                   pPropertyCount,
     *     VkExtensionProperties*                      pProperties);
* *
Description
* *

When {@code pLayerName} parameter is {@code NULL}, only extensions provided by the Vulkan implementation or by implicitly enabled layers are returned. When {@code pLayerName} is the name of a layer, the instance extensions provided by that layer are returned.

* *

If {@code pProperties} is {@code NULL}, then the number of extensions properties available is returned in {@code pPropertyCount}. Otherwise, {@code pPropertyCount} must point to a variable set by the user to the number of elements in the {@code pProperties} array, and on return the variable is overwritten with the number of structures actually written to {@code pProperties}. If {@code pPropertyCount} is less than the number of extension properties available, at most {@code pPropertyCount} structures will be written. If {@code pPropertyCount} is smaller than the number of extensions available, {@link #VK_INCOMPLETE INCOMPLETE} will be returned instead of {@link #VK_SUCCESS SUCCESS}, to indicate that not all the available properties were returned.

* *

Because the list of available layers may change externally between calls to {@code vkEnumerateInstanceExtensionProperties}, two calls may retrieve different results if a {@code pLayerName} is available in one call but not in another. The extensions supported by a layer may also change between two calls, e.g. if the layer implementation is replaced by a different version between those calls.

* *
Valid Usage (Implicit)
* *
    *
  • If {@code pLayerName} is not {@code NULL}, {@code pLayerName} must be a null-terminated UTF-8 string
  • *
  • {@code pPropertyCount} must be a valid pointer to a {@code uint32_t} value
  • *
  • If the value referenced by {@code pPropertyCount} is not 0, and {@code pProperties} is not {@code NULL}, {@code pProperties} must be a valid pointer to an array of {@code pPropertyCount} {@link VkExtensionProperties} structures
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
  • {@link #VK_INCOMPLETE INCOMPLETE}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_LAYER_NOT_PRESENT ERROR_LAYER_NOT_PRESENT}
  • *
*
* *
See Also
* *

{@link VkExtensionProperties}

* * @param pLayerName either {@code NULL} or a pointer to a null-terminated UTF-8 string naming the layer to retrieve extensions from. * @param pPropertyCount a pointer to an integer related to the number of extension properties available or queried, as described below. * @param pProperties either {@code NULL} or a pointer to an array of {@link VkExtensionProperties} structures. */ @NativeType("VkResult") public static int vkEnumerateInstanceExtensionProperties(@Nullable @NativeType("const char *") ByteBuffer pLayerName, @NativeType("uint32_t *") IntBuffer pPropertyCount, @Nullable @NativeType("VkExtensionProperties *") VkExtensionProperties.Buffer pProperties) { if (CHECKS) { checkNT1Safe(pLayerName); check(pPropertyCount, 1); checkSafe(pProperties, pPropertyCount.get(pPropertyCount.position())); } return nvkEnumerateInstanceExtensionProperties(memAddressSafe(pLayerName), memAddress(pPropertyCount), memAddressSafe(pProperties)); } /** * Returns up to requested number of global extension properties. * *
C Specification
* *

To query the available instance extensions, call:

* *
     * VkResult vkEnumerateInstanceExtensionProperties(
     *     const char*                                 pLayerName,
     *     uint32_t*                                   pPropertyCount,
     *     VkExtensionProperties*                      pProperties);
* *
Description
* *

When {@code pLayerName} parameter is {@code NULL}, only extensions provided by the Vulkan implementation or by implicitly enabled layers are returned. When {@code pLayerName} is the name of a layer, the instance extensions provided by that layer are returned.

* *

If {@code pProperties} is {@code NULL}, then the number of extensions properties available is returned in {@code pPropertyCount}. Otherwise, {@code pPropertyCount} must point to a variable set by the user to the number of elements in the {@code pProperties} array, and on return the variable is overwritten with the number of structures actually written to {@code pProperties}. If {@code pPropertyCount} is less than the number of extension properties available, at most {@code pPropertyCount} structures will be written. If {@code pPropertyCount} is smaller than the number of extensions available, {@link #VK_INCOMPLETE INCOMPLETE} will be returned instead of {@link #VK_SUCCESS SUCCESS}, to indicate that not all the available properties were returned.

* *

Because the list of available layers may change externally between calls to {@code vkEnumerateInstanceExtensionProperties}, two calls may retrieve different results if a {@code pLayerName} is available in one call but not in another. The extensions supported by a layer may also change between two calls, e.g. if the layer implementation is replaced by a different version between those calls.

* *
Valid Usage (Implicit)
* *
    *
  • If {@code pLayerName} is not {@code NULL}, {@code pLayerName} must be a null-terminated UTF-8 string
  • *
  • {@code pPropertyCount} must be a valid pointer to a {@code uint32_t} value
  • *
  • If the value referenced by {@code pPropertyCount} is not 0, and {@code pProperties} is not {@code NULL}, {@code pProperties} must be a valid pointer to an array of {@code pPropertyCount} {@link VkExtensionProperties} structures
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
  • {@link #VK_INCOMPLETE INCOMPLETE}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_LAYER_NOT_PRESENT ERROR_LAYER_NOT_PRESENT}
  • *
*
* *
See Also
* *

{@link VkExtensionProperties}

* * @param pLayerName either {@code NULL} or a pointer to a null-terminated UTF-8 string naming the layer to retrieve extensions from. * @param pPropertyCount a pointer to an integer related to the number of extension properties available or queried, as described below. * @param pProperties either {@code NULL} or a pointer to an array of {@link VkExtensionProperties} structures. */ @NativeType("VkResult") public static int vkEnumerateInstanceExtensionProperties(@Nullable @NativeType("const char *") CharSequence pLayerName, @NativeType("uint32_t *") IntBuffer pPropertyCount, @Nullable @NativeType("VkExtensionProperties *") VkExtensionProperties.Buffer pProperties) { if (CHECKS) { check(pPropertyCount, 1); checkSafe(pProperties, pPropertyCount.get(pPropertyCount.position())); } MemoryStack stack = stackGet(); int stackPointer = stack.getPointer(); try { ByteBuffer pLayerNameEncoded = stack.UTF8Safe(pLayerName); return nvkEnumerateInstanceExtensionProperties(memAddressSafe(pLayerNameEncoded), memAddress(pPropertyCount), memAddressSafe(pProperties)); } finally { stack.setPointer(stackPointer); } } // --- [ vkEnumerateDeviceExtensionProperties ] --- /** * Unsafe version of: {@link #vkEnumerateDeviceExtensionProperties EnumerateDeviceExtensionProperties} * * @param pPropertyCount a pointer to an integer related to the number of extension properties available or queried, and is treated in the same fashion as the {@link #vkEnumerateInstanceExtensionProperties EnumerateInstanceExtensionProperties}{@code ::pPropertyCount} parameter. */ public static int nvkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, long pLayerName, long pPropertyCount, long pProperties) { long __functionAddress = physicalDevice.getCapabilities().vkEnumerateDeviceExtensionProperties; return callPPPPI(__functionAddress, physicalDevice.address(), pLayerName, pPropertyCount, pProperties); } /** * Returns properties of available physical device extensions. * *
C Specification
* *

To query the extensions available to a given physical device, call:

* *
     * VkResult vkEnumerateDeviceExtensionProperties(
     *     VkPhysicalDevice                            physicalDevice,
     *     const char*                                 pLayerName,
     *     uint32_t*                                   pPropertyCount,
     *     VkExtensionProperties*                      pProperties);
* *
Description
* *

When {@code pLayerName} parameter is {@code NULL}, only extensions provided by the Vulkan implementation or by implicitly enabled layers are returned. When {@code pLayerName} is the name of a layer, the device extensions provided by that layer are returned.

* *
Valid Usage (Implicit)
* *
    *
  • {@code physicalDevice} must be a valid {@code VkPhysicalDevice} handle
  • *
  • If {@code pLayerName} is not {@code NULL}, {@code pLayerName} must be a null-terminated UTF-8 string
  • *
  • {@code pPropertyCount} must be a valid pointer to a {@code uint32_t} value
  • *
  • If the value referenced by {@code pPropertyCount} is not 0, and {@code pProperties} is not {@code NULL}, {@code pProperties} must be a valid pointer to an array of {@code pPropertyCount} {@link VkExtensionProperties} structures
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
  • {@link #VK_INCOMPLETE INCOMPLETE}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_LAYER_NOT_PRESENT ERROR_LAYER_NOT_PRESENT}
  • *
*
* *
See Also
* *

{@link VkExtensionProperties}

* * @param physicalDevice the physical device that will be queried. * @param pLayerName either {@code NULL} or a pointer to a null-terminated UTF-8 string naming the layer to retrieve extensions from. * @param pPropertyCount a pointer to an integer related to the number of extension properties available or queried, and is treated in the same fashion as the {@link #vkEnumerateInstanceExtensionProperties EnumerateInstanceExtensionProperties}{@code ::pPropertyCount} parameter. * @param pProperties either {@code NULL} or a pointer to an array of {@link VkExtensionProperties} structures. */ @NativeType("VkResult") public static int vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, @Nullable @NativeType("const char *") ByteBuffer pLayerName, @NativeType("uint32_t *") IntBuffer pPropertyCount, @Nullable @NativeType("VkExtensionProperties *") VkExtensionProperties.Buffer pProperties) { if (CHECKS) { checkNT1Safe(pLayerName); check(pPropertyCount, 1); checkSafe(pProperties, pPropertyCount.get(pPropertyCount.position())); } return nvkEnumerateDeviceExtensionProperties(physicalDevice, memAddressSafe(pLayerName), memAddress(pPropertyCount), memAddressSafe(pProperties)); } /** * Returns properties of available physical device extensions. * *
C Specification
* *

To query the extensions available to a given physical device, call:

* *
     * VkResult vkEnumerateDeviceExtensionProperties(
     *     VkPhysicalDevice                            physicalDevice,
     *     const char*                                 pLayerName,
     *     uint32_t*                                   pPropertyCount,
     *     VkExtensionProperties*                      pProperties);
* *
Description
* *

When {@code pLayerName} parameter is {@code NULL}, only extensions provided by the Vulkan implementation or by implicitly enabled layers are returned. When {@code pLayerName} is the name of a layer, the device extensions provided by that layer are returned.

* *
Valid Usage (Implicit)
* *
    *
  • {@code physicalDevice} must be a valid {@code VkPhysicalDevice} handle
  • *
  • If {@code pLayerName} is not {@code NULL}, {@code pLayerName} must be a null-terminated UTF-8 string
  • *
  • {@code pPropertyCount} must be a valid pointer to a {@code uint32_t} value
  • *
  • If the value referenced by {@code pPropertyCount} is not 0, and {@code pProperties} is not {@code NULL}, {@code pProperties} must be a valid pointer to an array of {@code pPropertyCount} {@link VkExtensionProperties} structures
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
  • {@link #VK_INCOMPLETE INCOMPLETE}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_LAYER_NOT_PRESENT ERROR_LAYER_NOT_PRESENT}
  • *
*
* *
See Also
* *

{@link VkExtensionProperties}

* * @param physicalDevice the physical device that will be queried. * @param pLayerName either {@code NULL} or a pointer to a null-terminated UTF-8 string naming the layer to retrieve extensions from. * @param pPropertyCount a pointer to an integer related to the number of extension properties available or queried, and is treated in the same fashion as the {@link #vkEnumerateInstanceExtensionProperties EnumerateInstanceExtensionProperties}{@code ::pPropertyCount} parameter. * @param pProperties either {@code NULL} or a pointer to an array of {@link VkExtensionProperties} structures. */ @NativeType("VkResult") public static int vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, @Nullable @NativeType("const char *") CharSequence pLayerName, @NativeType("uint32_t *") IntBuffer pPropertyCount, @Nullable @NativeType("VkExtensionProperties *") VkExtensionProperties.Buffer pProperties) { if (CHECKS) { check(pPropertyCount, 1); checkSafe(pProperties, pPropertyCount.get(pPropertyCount.position())); } MemoryStack stack = stackGet(); int stackPointer = stack.getPointer(); try { ByteBuffer pLayerNameEncoded = stack.UTF8Safe(pLayerName); return nvkEnumerateDeviceExtensionProperties(physicalDevice, memAddressSafe(pLayerNameEncoded), memAddress(pPropertyCount), memAddressSafe(pProperties)); } finally { stack.setPointer(stackPointer); } } // --- [ vkEnumerateInstanceLayerProperties ] --- /** * Unsafe version of: {@link #vkEnumerateInstanceLayerProperties EnumerateInstanceLayerProperties} * * @param pPropertyCount a pointer to an integer related to the number of layer properties available or queried, as described below. */ public static int nvkEnumerateInstanceLayerProperties(long pPropertyCount, long pProperties) { long __functionAddress = VK.getGlobalCommands().vkEnumerateInstanceLayerProperties; return callPPI(__functionAddress, pPropertyCount, pProperties); } /** * Returns up to requested number of global layer properties. * *
C Specification
* *

To query the available layers, call:

* *
     * VkResult vkEnumerateInstanceLayerProperties(
     *     uint32_t*                                   pPropertyCount,
     *     VkLayerProperties*                          pProperties);
* *
Description
* *

If {@code pProperties} is {@code NULL}, then the number of layer properties available is returned in {@code pPropertyCount}. Otherwise, {@code pPropertyCount} must point to a variable set by the user to the number of elements in the {@code pProperties} array, and on return the variable is overwritten with the number of structures actually written to {@code pProperties}. If {@code pPropertyCount} is less than the number of layer properties available, at most {@code pPropertyCount} structures will be written. If {@code pPropertyCount} is smaller than the number of layers available, {@link #VK_INCOMPLETE INCOMPLETE} will be returned instead of {@link #VK_SUCCESS SUCCESS}, to indicate that not all the available layer properties were returned.

* *

The list of available layers may change at any time due to actions outside of the Vulkan implementation, so two calls to {@link #vkEnumerateInstanceLayerProperties EnumerateInstanceLayerProperties} with the same parameters may return different results, or retrieve different {@code pPropertyCount} values or {@code pProperties} contents. Once an instance has been created, the layers enabled for that instance will continue to be enabled and valid for the lifetime of that instance, even if some of them become unavailable for future instances.

* *
Valid Usage (Implicit)
* *
    *
  • {@code pPropertyCount} must be a valid pointer to a {@code uint32_t} value
  • *
  • If the value referenced by {@code pPropertyCount} is not 0, and {@code pProperties} is not {@code NULL}, {@code pProperties} must be a valid pointer to an array of {@code pPropertyCount} {@link VkLayerProperties} structures
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
  • {@link #VK_INCOMPLETE INCOMPLETE}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkLayerProperties}

* * @param pPropertyCount a pointer to an integer related to the number of layer properties available or queried, as described below. * @param pProperties either {@code NULL} or a pointer to an array of {@link VkLayerProperties} structures. */ @NativeType("VkResult") public static int vkEnumerateInstanceLayerProperties(@NativeType("uint32_t *") IntBuffer pPropertyCount, @Nullable @NativeType("VkLayerProperties *") VkLayerProperties.Buffer pProperties) { if (CHECKS) { check(pPropertyCount, 1); checkSafe(pProperties, pPropertyCount.get(pPropertyCount.position())); } return nvkEnumerateInstanceLayerProperties(memAddress(pPropertyCount), memAddressSafe(pProperties)); } // --- [ vkEnumerateDeviceLayerProperties ] --- /** * Unsafe version of: {@link #vkEnumerateDeviceLayerProperties EnumerateDeviceLayerProperties} * * @param pPropertyCount a pointer to an integer related to the number of layer properties available or queried. */ public static int nvkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, long pPropertyCount, long pProperties) { long __functionAddress = physicalDevice.getCapabilities().vkEnumerateDeviceLayerProperties; return callPPPI(__functionAddress, physicalDevice.address(), pPropertyCount, pProperties); } /** * Returns properties of available physical device layers. * *
C Specification
* *

To enumerate device layers, call:

* *
     * VkResult vkEnumerateDeviceLayerProperties(
     *     VkPhysicalDevice                            physicalDevice,
     *     uint32_t*                                   pPropertyCount,
     *     VkLayerProperties*                          pProperties);
* *
Description
* *

If {@code pProperties} is {@code NULL}, then the number of layer properties available is returned in {@code pPropertyCount}. Otherwise, {@code pPropertyCount} must point to a variable set by the user to the number of elements in the {@code pProperties} array, and on return the variable is overwritten with the number of structures actually written to {@code pProperties}. If {@code pPropertyCount} is less than the number of layer properties available, at most {@code pPropertyCount} structures will be written. If {@code pPropertyCount} is smaller than the number of layers available, {@link #VK_INCOMPLETE INCOMPLETE} will be returned instead of {@link #VK_SUCCESS SUCCESS}, to indicate that not all the available layer properties were returned.

* *

The list of layers enumerated by {@link #vkEnumerateDeviceLayerProperties EnumerateDeviceLayerProperties} must be exactly the sequence of layers enabled for the instance. The members of {@link VkLayerProperties} for each enumerated layer must be the same as the properties when the layer was enumerated by {@link #vkEnumerateInstanceLayerProperties EnumerateInstanceLayerProperties}.

* *
Valid Usage (Implicit)
* *
    *
  • {@code physicalDevice} must be a valid {@code VkPhysicalDevice} handle
  • *
  • {@code pPropertyCount} must be a valid pointer to a {@code uint32_t} value
  • *
  • If the value referenced by {@code pPropertyCount} is not 0, and {@code pProperties} is not {@code NULL}, {@code pProperties} must be a valid pointer to an array of {@code pPropertyCount} {@link VkLayerProperties} structures
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
  • {@link #VK_INCOMPLETE INCOMPLETE}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkLayerProperties}

* * @param physicalDevice * @param pPropertyCount a pointer to an integer related to the number of layer properties available or queried. * @param pProperties either {@code NULL} or a pointer to an array of {@link VkLayerProperties} structures. */ @NativeType("VkResult") public static int vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, @NativeType("uint32_t *") IntBuffer pPropertyCount, @Nullable @NativeType("VkLayerProperties *") VkLayerProperties.Buffer pProperties) { if (CHECKS) { check(pPropertyCount, 1); checkSafe(pProperties, pPropertyCount.get(pPropertyCount.position())); } return nvkEnumerateDeviceLayerProperties(physicalDevice, memAddress(pPropertyCount), memAddressSafe(pProperties)); } // --- [ vkGetDeviceQueue ] --- /** Unsafe version of: {@link #vkGetDeviceQueue GetDeviceQueue} */ public static void nvkGetDeviceQueue(VkDevice device, int queueFamilyIndex, int queueIndex, long pQueue) { long __functionAddress = device.getCapabilities().vkGetDeviceQueue; callPPV(__functionAddress, device.address(), queueFamilyIndex, queueIndex, pQueue); } /** * Get a queue handle from a device. * *
C Specification
* *

To retrieve a handle to a VkQueue object, call:

* *
     * void vkGetDeviceQueue(
     *     VkDevice                                    device,
     *     uint32_t                                    queueFamilyIndex,
     *     uint32_t                                    queueIndex,
     *     VkQueue*                                    pQueue);
* *
Valid Usage
* *
    *
  • {@code queueFamilyIndex} must be one of the queue family indices specified when {@code device} was created, via the {@link VkDeviceQueueCreateInfo} structure
  • *
  • {@code queueIndex} must be less than the number of queues created for the specified queue family index when {@code device} was created, via the {@code queueCount} member of the {@link VkDeviceQueueCreateInfo} structure
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pQueue} must be a valid pointer to a {@code VkQueue} handle
  • *
* * @param device the logical device that owns the queue. * @param queueFamilyIndex the index of the queue family to which the queue belongs. * @param queueIndex the index within this queue family of the queue to retrieve. * @param pQueue a pointer to a {@code VkQueue} object that will be filled with the handle for the requested queue. */ public static void vkGetDeviceQueue(VkDevice device, @NativeType("uint32_t") int queueFamilyIndex, @NativeType("uint32_t") int queueIndex, @NativeType("VkQueue *") PointerBuffer pQueue) { if (CHECKS) { check(pQueue, 1); } nvkGetDeviceQueue(device, queueFamilyIndex, queueIndex, memAddress(pQueue)); } // --- [ vkQueueSubmit ] --- /** * Unsafe version of: {@link #vkQueueSubmit QueueSubmit} * * @param submitCount the number of elements in the {@code pSubmits} array. */ public static int nvkQueueSubmit(VkQueue queue, int submitCount, long pSubmits, long fence) { long __functionAddress = queue.getCapabilities().vkQueueSubmit; if (CHECKS) { if (pSubmits != NULL) { VkSubmitInfo.validate(pSubmits, submitCount); } } return callPPJI(__functionAddress, queue.address(), submitCount, pSubmits, fence); } /** * Submits a sequence of semaphores or command buffers to a queue. * *
C Specification
* *

To submit command buffers to a queue, call:

* *
     * VkResult vkQueueSubmit(
     *     VkQueue                                     queue,
     *     uint32_t                                    submitCount,
     *     const VkSubmitInfo*                         pSubmits,
     *     VkFence                                     fence);
* *
Description
* *
Note
* *

Submission can be a high overhead operation, and applications should attempt to batch work together into as few calls to {@link #vkQueueSubmit QueueSubmit} as possible.

*
* *

{@link #vkQueueSubmit QueueSubmit} is a queue submission command, with each batch defined by an element of {@code pSubmits} as an instance of the {@link VkSubmitInfo} structure. Batches begin execution in the order they appear in {@code pSubmits}, but may complete out of order.

* *

Fence and semaphore operations submitted with {@link #vkQueueSubmit QueueSubmit} have additional ordering constraints compared to other submission commands, with dependencies involving previous and subsequent queue operations. Information about these additional constraints can be found in the semaphore and fence sections of the synchronization chapter.

* *

Details on the interaction of {@code pWaitDstStageMask} with synchronization are described in the semaphore wait operation section of the synchronization chapter.

* *

The order that batches appear in {@code pSubmits} is used to determine submission order, and thus all the implicit ordering guarantees that respect it. Other than these implicit ordering guarantees and any explicit synchronization primitives, these batches may overlap or otherwise execute out of order.

* *

If any command buffer submitted to this queue is in the executable state, it is moved to the pending state. Once execution of all submissions of a command buffer complete, it moves from the pending state, back to the executable state. If a command buffer was recorded with the {@link #VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT} flag, it instead moves back to the invalid state.

* *

If {@link #vkQueueSubmit QueueSubmit} fails, it may return {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY} or {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}. If it does, the implementation must ensure that the state and contents of any resources or synchronization primitives referenced by the submitted command buffers and any semaphores referenced by {@code pSubmits} is unaffected by the call or its failure. If {@link #vkQueueSubmit QueueSubmit} fails in such a way that the implementation can not make that guarantee, the implementation must return {@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST}. See Lost Device.

* *
Valid Usage
* *
    *
  • If {@code fence} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code fence} must be unsignaled
  • *
  • If {@code fence} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code fence} must not be associated with any other queue command that has not yet completed execution on that queue
  • *
  • Any calls to {@link #vkCmdSetEvent CmdSetEvent}, {@link #vkCmdResetEvent CmdResetEvent} or {@link #vkCmdWaitEvents CmdWaitEvents} that have been recorded into any of the command buffer elements of the {@code pCommandBuffers} member of any element of {@code pSubmits}, must not reference any {@code VkEvent} that is referenced by any of those commands in a command buffer that has been submitted to another queue and is still in the pending state.
  • *
  • Any stage flag included in any element of the {@code pWaitDstStageMask} member of any element of {@code pSubmits} must be a pipeline stage supported by one of the capabilities of {@code queue}, as specified in the table of supported pipeline stages.
  • *
  • Each element of the {@code pSignalSemaphores} member of any element of {@code pSubmits} must be unsignaled when the semaphore signal operation it defines is executed on the device
  • *
  • When a semaphore unsignal operation defined by any element of the {@code pWaitSemaphores} member of any element of {@code pSubmits} executes on {@code queue}, no other queue must be waiting on the same semaphore.
  • *
  • All elements of the {@code pWaitSemaphores} member of all elements of {@code pSubmits} must be semaphores that are signaled, or have semaphore signal operations previously submitted for execution.
  • *
  • Each element of the {@code pCommandBuffers} member of each element of {@code pSubmits} must be in the pending or executable state.
  • *
  • If any element of the {@code pCommandBuffers} member of any element of {@code pSubmits} was not recorded with the {@link #VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT}, it must not be in the pending state.
  • *
  • Any secondary command buffers recorded into any element of the {@code pCommandBuffers} member of any element of {@code pSubmits} must be in the pending or executable state.
  • *
  • If any secondary command buffers recorded into any element of the {@code pCommandBuffers} member of any element of {@code pSubmits} was not recorded with the {@link #VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT}, it must not be in the pending state.
  • *
  • Each element of the {@code pCommandBuffers} member of each element of {@code pSubmits} must have been allocated from a {@code VkCommandPool} that was created for the same queue family {@code queue} belongs to.
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code queue} must be a valid {@code VkQueue} handle
  • *
  • If {@code submitCount} is not 0, {@code pSubmits} must be a valid pointer to an array of {@code submitCount} valid {@link VkSubmitInfo} structures
  • *
  • If {@code fence} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code fence} must be a valid {@code VkFence} handle
  • *
  • Both of {@code fence}, and {@code queue} that are valid handles must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code queue} must be externally synchronized
  • *
  • Host access to {@code pSubmits}[].pWaitSemaphores[] must be externally synchronized
  • *
  • Host access to {@code pSubmits}[].pSignalSemaphores[] must be externally synchronized
  • *
  • Host access to {@code fence} must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
--Any-
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST}
  • *
*
* *
See Also
* *

{@link VkSubmitInfo}

* * @param queue the queue that the command buffers will be submitted to. * @param pSubmits a pointer to an array of {@link VkSubmitInfo} structures, each specifying a command buffer submission batch. * @param fence an optional: handle to a fence to be signaled once all submitted command buffers have completed execution. If {@code fence} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, it defines a fence signal operation. */ @NativeType("VkResult") public static int vkQueueSubmit(VkQueue queue, @Nullable @NativeType("const VkSubmitInfo *") VkSubmitInfo.Buffer pSubmits, @NativeType("VkFence") long fence) { return nvkQueueSubmit(queue, remainingSafe(pSubmits), memAddressSafe(pSubmits), fence); } /** * Submits a sequence of semaphores or command buffers to a queue. * *
C Specification
* *

To submit command buffers to a queue, call:

* *
     * VkResult vkQueueSubmit(
     *     VkQueue                                     queue,
     *     uint32_t                                    submitCount,
     *     const VkSubmitInfo*                         pSubmits,
     *     VkFence                                     fence);
* *
Description
* *
Note
* *

Submission can be a high overhead operation, and applications should attempt to batch work together into as few calls to {@link #vkQueueSubmit QueueSubmit} as possible.

*
* *

{@link #vkQueueSubmit QueueSubmit} is a queue submission command, with each batch defined by an element of {@code pSubmits} as an instance of the {@link VkSubmitInfo} structure. Batches begin execution in the order they appear in {@code pSubmits}, but may complete out of order.

* *

Fence and semaphore operations submitted with {@link #vkQueueSubmit QueueSubmit} have additional ordering constraints compared to other submission commands, with dependencies involving previous and subsequent queue operations. Information about these additional constraints can be found in the semaphore and fence sections of the synchronization chapter.

* *

Details on the interaction of {@code pWaitDstStageMask} with synchronization are described in the semaphore wait operation section of the synchronization chapter.

* *

The order that batches appear in {@code pSubmits} is used to determine submission order, and thus all the implicit ordering guarantees that respect it. Other than these implicit ordering guarantees and any explicit synchronization primitives, these batches may overlap or otherwise execute out of order.

* *

If any command buffer submitted to this queue is in the executable state, it is moved to the pending state. Once execution of all submissions of a command buffer complete, it moves from the pending state, back to the executable state. If a command buffer was recorded with the {@link #VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT} flag, it instead moves back to the invalid state.

* *

If {@link #vkQueueSubmit QueueSubmit} fails, it may return {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY} or {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}. If it does, the implementation must ensure that the state and contents of any resources or synchronization primitives referenced by the submitted command buffers and any semaphores referenced by {@code pSubmits} is unaffected by the call or its failure. If {@link #vkQueueSubmit QueueSubmit} fails in such a way that the implementation can not make that guarantee, the implementation must return {@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST}. See Lost Device.

* *
Valid Usage
* *
    *
  • If {@code fence} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code fence} must be unsignaled
  • *
  • If {@code fence} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code fence} must not be associated with any other queue command that has not yet completed execution on that queue
  • *
  • Any calls to {@link #vkCmdSetEvent CmdSetEvent}, {@link #vkCmdResetEvent CmdResetEvent} or {@link #vkCmdWaitEvents CmdWaitEvents} that have been recorded into any of the command buffer elements of the {@code pCommandBuffers} member of any element of {@code pSubmits}, must not reference any {@code VkEvent} that is referenced by any of those commands in a command buffer that has been submitted to another queue and is still in the pending state.
  • *
  • Any stage flag included in any element of the {@code pWaitDstStageMask} member of any element of {@code pSubmits} must be a pipeline stage supported by one of the capabilities of {@code queue}, as specified in the table of supported pipeline stages.
  • *
  • Each element of the {@code pSignalSemaphores} member of any element of {@code pSubmits} must be unsignaled when the semaphore signal operation it defines is executed on the device
  • *
  • When a semaphore unsignal operation defined by any element of the {@code pWaitSemaphores} member of any element of {@code pSubmits} executes on {@code queue}, no other queue must be waiting on the same semaphore.
  • *
  • All elements of the {@code pWaitSemaphores} member of all elements of {@code pSubmits} must be semaphores that are signaled, or have semaphore signal operations previously submitted for execution.
  • *
  • Each element of the {@code pCommandBuffers} member of each element of {@code pSubmits} must be in the pending or executable state.
  • *
  • If any element of the {@code pCommandBuffers} member of any element of {@code pSubmits} was not recorded with the {@link #VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT}, it must not be in the pending state.
  • *
  • Any secondary command buffers recorded into any element of the {@code pCommandBuffers} member of any element of {@code pSubmits} must be in the pending or executable state.
  • *
  • If any secondary command buffers recorded into any element of the {@code pCommandBuffers} member of any element of {@code pSubmits} was not recorded with the {@link #VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT}, it must not be in the pending state.
  • *
  • Each element of the {@code pCommandBuffers} member of each element of {@code pSubmits} must have been allocated from a {@code VkCommandPool} that was created for the same queue family {@code queue} belongs to.
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code queue} must be a valid {@code VkQueue} handle
  • *
  • If {@code submitCount} is not 0, {@code pSubmits} must be a valid pointer to an array of {@code submitCount} valid {@link VkSubmitInfo} structures
  • *
  • If {@code fence} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code fence} must be a valid {@code VkFence} handle
  • *
  • Both of {@code fence}, and {@code queue} that are valid handles must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code queue} must be externally synchronized
  • *
  • Host access to {@code pSubmits}[].pWaitSemaphores[] must be externally synchronized
  • *
  • Host access to {@code pSubmits}[].pSignalSemaphores[] must be externally synchronized
  • *
  • Host access to {@code fence} must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
--Any-
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST}
  • *
*
* *
See Also
* *

{@link VkSubmitInfo}

* * @param queue the queue that the command buffers will be submitted to. * @param fence an optional: handle to a fence to be signaled once all submitted command buffers have completed execution. If {@code fence} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, it defines a fence signal operation. */ @NativeType("VkResult") public static int vkQueueSubmit(VkQueue queue, @Nullable @NativeType("const VkSubmitInfo *") VkSubmitInfo pSubmit, @NativeType("VkFence") long fence) { return nvkQueueSubmit(queue, 1, pSubmit.address(), fence); } // --- [ vkQueueWaitIdle ] --- /** * Wait for a queue to become idle. * *
C Specification
* *

To wait on the host for the completion of outstanding queue operations for a given queue, call:

* *
     * VkResult vkQueueWaitIdle(
     *     VkQueue                                     queue);
* *
Description
* *

{@link #vkQueueWaitIdle QueueWaitIdle} is equivalent to submitting a fence to a queue and waiting with an infinite timeout for that fence to signal.

* *
Valid Usage (Implicit)
* *
    *
  • {@code queue} must be a valid {@code VkQueue} handle
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
--Any-
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST}
  • *
*
* * @param queue the queue on which to wait. */ @NativeType("VkResult") public static int vkQueueWaitIdle(VkQueue queue) { long __functionAddress = queue.getCapabilities().vkQueueWaitIdle; return callPI(__functionAddress, queue.address()); } // --- [ vkDeviceWaitIdle ] --- /** * Wait for a device to become idle. * *
C Specification
* *

To wait on the host for the completion of outstanding queue operations for all queues on a given logical device, call:

* *
     * VkResult vkDeviceWaitIdle(
     *     VkDevice                                    device);
* *
Description
* *

{@link #vkDeviceWaitIdle DeviceWaitIdle} is equivalent to calling {@link #vkQueueWaitIdle QueueWaitIdle} for all queues owned by {@code device}.

* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
* *
Host Synchronization
* *
    *
  • Host access to all {@code VkQueue} objects created from {@code device} must be externally synchronized
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST}
  • *
*
* * @param device the logical device to idle. */ @NativeType("VkResult") public static int vkDeviceWaitIdle(VkDevice device) { long __functionAddress = device.getCapabilities().vkDeviceWaitIdle; return callPI(__functionAddress, device.address()); } // --- [ vkAllocateMemory ] --- /** Unsafe version of: {@link #vkAllocateMemory AllocateMemory} */ public static int nvkAllocateMemory(VkDevice device, long pAllocateInfo, long pAllocator, long pMemory) { long __functionAddress = device.getCapabilities().vkAllocateMemory; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPPPPI(__functionAddress, device.address(), pAllocateInfo, pAllocator, pMemory); } /** * Allocate GPU memory. * *
C Specification
* *

To allocate memory objects, call:

* *
     * VkResult vkAllocateMemory(
     *     VkDevice                                    device,
     *     const VkMemoryAllocateInfo*                 pAllocateInfo,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkDeviceMemory*                             pMemory);
* *
Description
* *

Allocations returned by {@link #vkAllocateMemory AllocateMemory} are guaranteed to meet any alignment requirement of the implementation. For example, if an implementation requires 128 byte alignment for images and 64 byte alignment for buffers, the device memory returned through this mechanism would be 128-byte aligned. This ensures that applications can correctly suballocate objects of different types (with potentially different alignment requirements) in the same memory object.

* *

When memory is allocated, its contents are undefined.

* *

The maximum number of valid memory allocations that can exist simultaneously within a {@code VkDevice} may be restricted by implementation- or platform-dependent limits. If a call to {@link #vkAllocateMemory AllocateMemory} would cause the total number of allocations to exceed these limits, such a call will fail and must return {@link #VK_ERROR_TOO_MANY_OBJECTS ERROR_TOO_MANY_OBJECTS}. The {@code maxMemoryAllocationCount} feature describes the number of allocations that can exist simultaneously before encountering these internal limits.

* *

Some platforms may have a limit on the maximum size of a single allocation. For example, certain systems may fail to create allocations with a size greater than or equal to 4GB. Such a limit is implementation-dependent, and if such a failure occurs then the error {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY} must be returned.

* *
Valid Usage
* *
    *
  • {@code pAllocateInfo}->{@code allocationSize} must be less than or equal to sVkPhysicalDeviceMemoryProperties{@code ::memoryHeaps}[{@code pAllocateInfo}->{@code memoryTypeIndex}].{@code size} as returned by {@link #vkGetPhysicalDeviceMemoryProperties GetPhysicalDeviceMemoryProperties} for the {@code VkPhysicalDevice} that {@code device} was created from.
  • *
  • {@code pAllocateInfo}->{@code memoryTypeIndex} must be less than {@link VkPhysicalDeviceMemoryProperties}{@code ::memoryTypeCount} as returned by {@link #vkGetPhysicalDeviceMemoryProperties GetPhysicalDeviceMemoryProperties} for the {@code VkPhysicalDevice} that {@code device} was created from.
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pAllocateInfo} must be a valid pointer to a valid {@link VkMemoryAllocateInfo} structure
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pMemory} must be a valid pointer to a {@code VkDeviceMemory} handle
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_TOO_MANY_OBJECTS ERROR_TOO_MANY_OBJECTS}
  • *
  • {@link KHRExternalMemory#VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR ERROR_INVALID_EXTERNAL_HANDLE_KHR}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkMemoryAllocateInfo}

* * @param device the logical device that owns the memory. * @param pAllocateInfo a pointer to an instance of the {@link VkMemoryAllocateInfo} structure describing parameters of the allocation. A successful returned allocation must use the requested parameters — no substitution is permitted by the implementation. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pMemory a pointer to a {@code VkDeviceMemory} handle in which information about the allocated memory is returned. */ @NativeType("VkResult") public static int vkAllocateMemory(VkDevice device, @NativeType("const VkMemoryAllocateInfo *") VkMemoryAllocateInfo pAllocateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkDeviceMemory *") LongBuffer pMemory) { if (CHECKS) { check(pMemory, 1); } return nvkAllocateMemory(device, pAllocateInfo.address(), memAddressSafe(pAllocator), memAddress(pMemory)); } // --- [ vkFreeMemory ] --- /** Unsafe version of: {@link #vkFreeMemory FreeMemory} */ public static void nvkFreeMemory(VkDevice device, long memory, long pAllocator) { long __functionAddress = device.getCapabilities().vkFreeMemory; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPJPV(__functionAddress, device.address(), memory, pAllocator); } /** * Free GPU memory. * *
C Specification
* *

To free a memory object, call:

* *
     * void vkFreeMemory(
     *     VkDevice                                    device,
     *     VkDeviceMemory                              memory,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Description
* *

Before freeing a memory object, an application must ensure the memory object is no longer in use by the device--for example by command buffers in the pending state. The memory can remain bound to images or buffers at the time the memory object is freed, but any further use of them (on host or device) for anything other than destroying those objects will result in undefined behavior. If there are still any bound images or buffers, the memory may not be immediately released by the implementation, but must be released by the time all bound images and buffers have been destroyed. Once memory is released, it is returned to the heap from which it was allocated.

* *

How memory objects are bound to Images and Buffers is described in detail in the Resource Memory Association section.

* *

If a memory object is mapped at the time it is freed, it is implicitly unmapped.

* *
Note
* *

As described below, host writes are not implicitly flushed when the memory object is unmapped, but the implementation must guarantee that writes that have not been flushed do not affect any other memory.

*
* *
Valid Usage
* *
    *
  • All submitted commands that refer to {@code memory} (via images or buffers) must have completed execution
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code memory} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code memory} must be a valid {@code VkDeviceMemory} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • If {@code memory} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code memory} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param device the logical device that owns the memory. * @param memory the {@code VkDeviceMemory} object to be freed. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkFreeMemory(VkDevice device, @NativeType("VkDeviceMemory") long memory, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator) { nvkFreeMemory(device, memory, memAddressSafe(pAllocator)); } // --- [ vkMapMemory ] --- /** Unsafe version of: {@link #vkMapMemory MapMemory} */ public static int nvkMapMemory(VkDevice device, long memory, long offset, long size, int flags, long ppData) { long __functionAddress = device.getCapabilities().vkMapMemory; return callPJJJPI(__functionAddress, device.address(), memory, offset, size, flags, ppData); } /** * Map a memory object into application address space. * *
C Specification
* *

To retrieve a host virtual address pointer to a region of a mappable memory object, call:

* *
     * VkResult vkMapMemory(
     *     VkDevice                                    device,
     *     VkDeviceMemory                              memory,
     *     VkDeviceSize                                offset,
     *     VkDeviceSize                                size,
     *     VkMemoryMapFlags                            flags,
     *     void**                                      ppData);
* *
Description
* *

It is an application error to call {@link #vkMapMemory MapMemory} on a memory object that is already mapped.

* *
Note
* *

{@link #vkMapMemory MapMemory} will fail if the implementation is unable to allocate an appropriately sized contiguous virtual address range, e.g. due to virtual address space fragmentation or platform limits. In such cases, {@link #vkMapMemory MapMemory} must return {@link #VK_ERROR_MEMORY_MAP_FAILED ERROR_MEMORY_MAP_FAILED}. The application can improve the likelihood of success by reducing the size of the mapped range and/or removing unneeded mappings using fname:VkUnmapMemory.

*
* *

{@link #vkMapMemory MapMemory} does not check whether the device memory is currently in use before returning the host-accessible pointer. The application must guarantee that any previously submitted command that writes to this range has completed before the host reads from or writes to that range, and that any previously submitted command that reads from that range has completed before the host writes to that region (see here for details on fulfilling such a guarantee). If the device memory was allocated without the {@link #VK_MEMORY_PROPERTY_HOST_COHERENT_BIT MEMORY_PROPERTY_HOST_COHERENT_BIT} set, these guarantees must be made for an extended range: the application must round down the start of the range to the nearest multiple of {@link VkPhysicalDeviceLimits}{@code ::nonCoherentAtomSize}, and round the end of the range up to the nearest multiple of {@link VkPhysicalDeviceLimits}{@code ::nonCoherentAtomSize}.

* *

While a range of device memory is mapped for host access, the application is responsible for synchronizing both device and host access to that memory range.

* *
Note
* *

It is important for the application developer to become meticulously familiar with all of the mechanisms described in the chapter on Synchronization and Cache Control as they are crucial to maintaining memory access ordering.

*
* *
Valid Usage
* *
    *
  • {@code memory} must not be currently mapped
  • *
  • {@code offset} must be less than the size of {@code memory}
  • *
  • If {@code size} is not equal to {@link #VK_WHOLE_SIZE WHOLE_SIZE}, {@code size} must be greater than 0
  • *
  • If {@code size} is not equal to {@link #VK_WHOLE_SIZE WHOLE_SIZE}, {@code size} must be less than or equal to the size of the {@code memory} minus {@code offset}
  • *
  • {@code memory} must have been created with a memory type that reports {@link #VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT MEMORY_PROPERTY_HOST_VISIBLE_BIT}
  • *
  • {@code memory} must not have been allocated with multiple instances.
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code memory} must be a valid {@code VkDeviceMemory} handle
  • *
  • {@code flags} must be 0
  • *
  • {@code ppData} must be a valid pointer to a pointer value
  • *
  • {@code memory} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code memory} must be externally synchronized
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_MEMORY_MAP_FAILED ERROR_MEMORY_MAP_FAILED}
  • *
*
* * @param device the logical device that owns the memory. * @param memory the {@code VkDeviceMemory} object to be mapped. * @param offset a zero-based byte offset from the beginning of the memory object. * @param size the size of the memory range to map, or {@link #VK_WHOLE_SIZE WHOLE_SIZE} to map from {@code offset} to the end of the allocation. * @param flags reserved for future use. * @param ppData points to a pointer in which is returned a host-accessible pointer to the beginning of the mapped range. This pointer minus {@code offset} must be aligned to at least {@link VkPhysicalDeviceLimits}{@code ::minMemoryMapAlignment}. */ @NativeType("VkResult") public static int vkMapMemory(VkDevice device, @NativeType("VkDeviceMemory") long memory, @NativeType("VkDeviceSize") long offset, @NativeType("VkDeviceSize") long size, @NativeType("VkMemoryMapFlags") int flags, @NativeType("void **") PointerBuffer ppData) { if (CHECKS) { check(ppData, 1); } return nvkMapMemory(device, memory, offset, size, flags, memAddress(ppData)); } // --- [ vkUnmapMemory ] --- /** * Unmap a previously mapped memory object. * *
C Specification
* *

To unmap a memory object once host access to it is no longer needed by the application, call:

* *
     * void vkUnmapMemory(
     *     VkDevice                                    device,
     *     VkDeviceMemory                              memory);
* *
Valid Usage
* *
    *
  • {@code memory} must be currently mapped
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code memory} must be a valid {@code VkDeviceMemory} handle
  • *
  • {@code memory} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code memory} must be externally synchronized
  • *
* * @param device the logical device that owns the memory. * @param memory the memory object to be unmapped. */ public static void vkUnmapMemory(VkDevice device, @NativeType("VkDeviceMemory") long memory) { long __functionAddress = device.getCapabilities().vkUnmapMemory; callPJV(__functionAddress, device.address(), memory); } // --- [ vkFlushMappedMemoryRanges ] --- /** * Unsafe version of: {@link #vkFlushMappedMemoryRanges FlushMappedMemoryRanges} * * @param memoryRangeCount the length of the {@code pMemoryRanges} array. */ public static int nvkFlushMappedMemoryRanges(VkDevice device, int memoryRangeCount, long pMemoryRanges) { long __functionAddress = device.getCapabilities().vkFlushMappedMemoryRanges; return callPPI(__functionAddress, device.address(), memoryRangeCount, pMemoryRanges); } /** * Flush mapped memory ranges. * *
C Specification
* *

To flush ranges of non-coherent memory from the host caches, call:

* *
     * VkResult vkFlushMappedMemoryRanges(
     *     VkDevice                                    device,
     *     uint32_t                                    memoryRangeCount,
     *     const VkMappedMemoryRange*                  pMemoryRanges);
* *
Description
* *

{@link #vkFlushMappedMemoryRanges FlushMappedMemoryRanges} guarantees that host writes to the memory ranges described by {@code pMemoryRanges} can be made available to device access, via availability operations from the {@link #VK_ACCESS_HOST_WRITE_BIT ACCESS_HOST_WRITE_BIT} access type.

* *

Unmapping non-coherent memory does not implicitly flush the mapped memory, and host writes that have not been flushed may not ever be visible to the device. However, implementations must ensure that writes that have not been flushed do not become visible to any other memory.

* *
Note
* *

The above guarantee avoids a potential memory corruption in scenarios where host writes to a mapped memory object have not been flushed before the memory is unmapped (or freed), and the virtual address range is subsequently reused for a different mapping (or memory allocation).

*
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pMemoryRanges} must be a valid pointer to an array of {@code memoryRangeCount} valid {@link VkMappedMemoryRange} structures
  • *
  • {@code memoryRangeCount} must be greater than 0
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkMappedMemoryRange}

* * @param device the logical device that owns the memory ranges. * @param pMemoryRanges a pointer to an array of {@link VkMappedMemoryRange} structures describing the memory ranges to flush. */ @NativeType("VkResult") public static int vkFlushMappedMemoryRanges(VkDevice device, @NativeType("const VkMappedMemoryRange *") VkMappedMemoryRange.Buffer pMemoryRanges) { return nvkFlushMappedMemoryRanges(device, pMemoryRanges.remaining(), pMemoryRanges.address()); } /** * Flush mapped memory ranges. * *
C Specification
* *

To flush ranges of non-coherent memory from the host caches, call:

* *
     * VkResult vkFlushMappedMemoryRanges(
     *     VkDevice                                    device,
     *     uint32_t                                    memoryRangeCount,
     *     const VkMappedMemoryRange*                  pMemoryRanges);
* *
Description
* *

{@link #vkFlushMappedMemoryRanges FlushMappedMemoryRanges} guarantees that host writes to the memory ranges described by {@code pMemoryRanges} can be made available to device access, via availability operations from the {@link #VK_ACCESS_HOST_WRITE_BIT ACCESS_HOST_WRITE_BIT} access type.

* *

Unmapping non-coherent memory does not implicitly flush the mapped memory, and host writes that have not been flushed may not ever be visible to the device. However, implementations must ensure that writes that have not been flushed do not become visible to any other memory.

* *
Note
* *

The above guarantee avoids a potential memory corruption in scenarios where host writes to a mapped memory object have not been flushed before the memory is unmapped (or freed), and the virtual address range is subsequently reused for a different mapping (or memory allocation).

*
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pMemoryRanges} must be a valid pointer to an array of {@code memoryRangeCount} valid {@link VkMappedMemoryRange} structures
  • *
  • {@code memoryRangeCount} must be greater than 0
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkMappedMemoryRange}

* * @param device the logical device that owns the memory ranges. */ @NativeType("VkResult") public static int vkFlushMappedMemoryRanges(VkDevice device, @NativeType("const VkMappedMemoryRange *") VkMappedMemoryRange pMemoryRange) { return nvkFlushMappedMemoryRanges(device, 1, pMemoryRange.address()); } // --- [ vkInvalidateMappedMemoryRanges ] --- /** * Unsafe version of: {@link #vkInvalidateMappedMemoryRanges InvalidateMappedMemoryRanges} * * @param memoryRangeCount the length of the {@code pMemoryRanges} array. */ public static int nvkInvalidateMappedMemoryRanges(VkDevice device, int memoryRangeCount, long pMemoryRanges) { long __functionAddress = device.getCapabilities().vkInvalidateMappedMemoryRanges; return callPPI(__functionAddress, device.address(), memoryRangeCount, pMemoryRanges); } /** * Invalidate ranges of mapped memory objects. * *
C Specification
* *

To invalidate ranges of non-coherent memory from the host caches, call:

* *
     * VkResult vkInvalidateMappedMemoryRanges(
     *     VkDevice                                    device,
     *     uint32_t                                    memoryRangeCount,
     *     const VkMappedMemoryRange*                  pMemoryRanges);
* *
Description
* *

{@link #vkInvalidateMappedMemoryRanges InvalidateMappedMemoryRanges} guarantees that device writes to the memory ranges described by {@code pMemoryRanges}, which have been made visible to the {@link #VK_ACCESS_HOST_WRITE_BIT ACCESS_HOST_WRITE_BIT} and {@link #VK_ACCESS_HOST_READ_BIT ACCESS_HOST_READ_BIT} access types, are made visible to the host. If a range of non-coherent memory is written by the host and then invalidated without first being flushed, its contents are undefined.

* *
Note
* *

Mapping non-coherent memory does not implicitly invalidate the mapped memory, and device writes that have not been invalidated must be made visible before the host reads or overwrites them.

*
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pMemoryRanges} must be a valid pointer to an array of {@code memoryRangeCount} valid {@link VkMappedMemoryRange} structures
  • *
  • {@code memoryRangeCount} must be greater than 0
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkMappedMemoryRange}

* * @param device the logical device that owns the memory ranges. * @param pMemoryRanges a pointer to an array of {@link VkMappedMemoryRange} structures describing the memory ranges to invalidate. */ @NativeType("VkResult") public static int vkInvalidateMappedMemoryRanges(VkDevice device, @NativeType("const VkMappedMemoryRange *") VkMappedMemoryRange.Buffer pMemoryRanges) { return nvkInvalidateMappedMemoryRanges(device, pMemoryRanges.remaining(), pMemoryRanges.address()); } /** * Invalidate ranges of mapped memory objects. * *
C Specification
* *

To invalidate ranges of non-coherent memory from the host caches, call:

* *
     * VkResult vkInvalidateMappedMemoryRanges(
     *     VkDevice                                    device,
     *     uint32_t                                    memoryRangeCount,
     *     const VkMappedMemoryRange*                  pMemoryRanges);
* *
Description
* *

{@link #vkInvalidateMappedMemoryRanges InvalidateMappedMemoryRanges} guarantees that device writes to the memory ranges described by {@code pMemoryRanges}, which have been made visible to the {@link #VK_ACCESS_HOST_WRITE_BIT ACCESS_HOST_WRITE_BIT} and {@link #VK_ACCESS_HOST_READ_BIT ACCESS_HOST_READ_BIT} access types, are made visible to the host. If a range of non-coherent memory is written by the host and then invalidated without first being flushed, its contents are undefined.

* *
Note
* *

Mapping non-coherent memory does not implicitly invalidate the mapped memory, and device writes that have not been invalidated must be made visible before the host reads or overwrites them.

*
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pMemoryRanges} must be a valid pointer to an array of {@code memoryRangeCount} valid {@link VkMappedMemoryRange} structures
  • *
  • {@code memoryRangeCount} must be greater than 0
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkMappedMemoryRange}

* * @param device the logical device that owns the memory ranges. */ @NativeType("VkResult") public static int vkInvalidateMappedMemoryRanges(VkDevice device, @NativeType("const VkMappedMemoryRange *") VkMappedMemoryRange pMemoryRange) { return nvkInvalidateMappedMemoryRanges(device, 1, pMemoryRange.address()); } // --- [ vkGetDeviceMemoryCommitment ] --- /** Unsafe version of: {@link #vkGetDeviceMemoryCommitment GetDeviceMemoryCommitment} */ public static void nvkGetDeviceMemoryCommitment(VkDevice device, long memory, long pCommittedMemoryInBytes) { long __functionAddress = device.getCapabilities().vkGetDeviceMemoryCommitment; callPJPV(__functionAddress, device.address(), memory, pCommittedMemoryInBytes); } /** * Query the current commitment for a VkDeviceMemory. * *
C Specification
* *

To determine the amount of lazily-allocated memory that is currently committed for a memory object, call:

* *
     * void vkGetDeviceMemoryCommitment(
     *     VkDevice                                    device,
     *     VkDeviceMemory                              memory,
     *     VkDeviceSize*                               pCommittedMemoryInBytes);
* *
Description
* *

The implementation may update the commitment at any time, and the value returned by this query may be out of date.

* *

The implementation guarantees to allocate any committed memory from the heapIndex indicated by the memory type that the memory object was created with.

* *
Valid Usage
* *
    *
  • {@code memory} must have been created with a memory type that reports {@link #VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code memory} must be a valid {@code VkDeviceMemory} handle
  • *
  • {@code pCommittedMemoryInBytes} must be a valid pointer to a {@code VkDeviceSize} value
  • *
  • {@code memory} must have been created, allocated, or retrieved from {@code device}
  • *
* * @param device the logical device that owns the memory. * @param memory the memory object being queried. * @param pCommittedMemoryInBytes a pointer to a {@code VkDeviceSize} value in which the number of bytes currently committed is returned, on success. */ public static void vkGetDeviceMemoryCommitment(VkDevice device, @NativeType("VkDeviceMemory") long memory, @NativeType("VkDeviceSize *") LongBuffer pCommittedMemoryInBytes) { if (CHECKS) { check(pCommittedMemoryInBytes, 1); } nvkGetDeviceMemoryCommitment(device, memory, memAddress(pCommittedMemoryInBytes)); } // --- [ vkBindBufferMemory ] --- /** * Bind device memory to a buffer object. * *
C Specification
* *

To attach memory to a buffer object, call:

* *
     * VkResult vkBindBufferMemory(
     *     VkDevice                                    device,
     *     VkBuffer                                    buffer,
     *     VkDeviceMemory                              memory,
     *     VkDeviceSize                                memoryOffset);
* *
Description
* *

{@link #vkBindBufferMemory BindBufferMemory} is equivalent to passing the same parameters through {@link VkBindBufferMemoryInfoKHR} to {@link KHRBindMemory2#vkBindBufferMemory2KHR BindBufferMemory2KHR}.

* *
Valid Usage
* *
    *
  • {@code buffer} must not already be backed by a memory object
  • *
  • {@code buffer} must not have been created with any sparse memory binding flags
  • *
  • {@code memoryOffset} must be less than the size of {@code memory}
  • *
  • If {@code buffer} was created with the {@link #VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT} or {@link #VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT}, {@code memoryOffset} must be a multiple of {@link VkPhysicalDeviceLimits}{@code ::minTexelBufferOffsetAlignment}
  • *
  • If {@code buffer} was created with the {@link #VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT BUFFER_USAGE_UNIFORM_BUFFER_BIT}, {@code memoryOffset} must be a multiple of {@link VkPhysicalDeviceLimits}{@code ::minUniformBufferOffsetAlignment}
  • *
  • If {@code buffer} was created with the {@link #VK_BUFFER_USAGE_STORAGE_BUFFER_BIT BUFFER_USAGE_STORAGE_BUFFER_BIT}, {@code memoryOffset} must be a multiple of {@link VkPhysicalDeviceLimits}{@code ::minStorageBufferOffsetAlignment}
  • *
  • {@code memory} must have been allocated using one of the memory types allowed in the {@code memoryTypeBits} member of the {@link VkMemoryRequirements} structure returned from a call to {@link #vkGetBufferMemoryRequirements GetBufferMemoryRequirements} with {@code buffer}
  • *
  • {@code memoryOffset} must be an integer multiple of the {@code alignment} member of the {@link VkMemoryRequirements} structure returned from a call to {@link #vkGetBufferMemoryRequirements GetBufferMemoryRequirements} with {@code buffer}
  • *
  • The {@code size} member of the {@link VkMemoryRequirements} structure returned from a call to {@link #vkGetBufferMemoryRequirements GetBufferMemoryRequirements} with {@code buffer} must be less than or equal to the size of {@code memory} minus {@code memoryOffset}
  • *
  • If {@code buffer} requires a dedicated allocation(as reported by {@link KHRGetMemoryRequirements2#vkGetBufferMemoryRequirements2KHR GetBufferMemoryRequirements2KHR} in {@link VkMemoryDedicatedRequirementsKHR}::requiresDedicatedAllocation for {@code image}), {@code memory} must have been created with {@link VkMemoryDedicatedAllocateInfoKHR}{@code ::buffer} equal to {@code buffer}
  • *
  • If the {@code VkmemoryAllocateInfo} provided when {@code memory} was allocated included an instance of {@link VkMemoryDedicatedAllocateInfoKHR} in its {@code pNext} chain, and {@link VkMemoryDedicatedAllocateInfoKHR}{@code ::buffer} was not {@link #VK_NULL_HANDLE NULL_HANDLE}, then {@code buffer} must equal {@link VkMemoryDedicatedAllocateInfoKHR}{@code ::buffer} and {@code memoryOffset} must be zero.
  • *
  • If {@code buffer} was created with {@link VkDedicatedAllocationBufferCreateInfoNV}{@code ::dedicatedAllocation} equal to {@link #VK_TRUE TRUE}, {@code memory} must have been created with {@link VkDedicatedAllocationMemoryAllocateInfoNV}{@code ::buffer} equal to a buffer handle created with identical creation parameters to {@code buffer} and {@code memoryOffset} must be zero
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code buffer} must be a valid {@code VkBuffer} handle
  • *
  • {@code memory} must be a valid {@code VkDeviceMemory} handle
  • *
  • {@code buffer} must have been created, allocated, or retrieved from {@code device}
  • *
  • {@code memory} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code buffer} must be externally synchronized
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* * @param device the logical device that owns the buffer and memory. * @param buffer the buffer to be attached to memory. * @param memory a {@code VkDeviceMemory} object describing the device memory to attach. * @param memoryOffset the start offset of the region of {@code memory} which is to be bound to the buffer. The number of bytes returned in the {@link VkMemoryRequirements}{@code ::size} member in {@code memory}, starting from {@code memoryOffset} bytes, will be bound to the specified buffer. */ @NativeType("VkResult") public static int vkBindBufferMemory(VkDevice device, @NativeType("VkBuffer") long buffer, @NativeType("VkDeviceMemory") long memory, @NativeType("VkDeviceSize") long memoryOffset) { long __functionAddress = device.getCapabilities().vkBindBufferMemory; return callPJJJI(__functionAddress, device.address(), buffer, memory, memoryOffset); } // --- [ vkBindImageMemory ] --- /** * Bind device memory to an image object. * *
C Specification
* *

To attach memory to a {@code VkImage} object created without the {@link KHRSamplerYcbcrConversion#VK_IMAGE_CREATE_DISJOINT_BIT_KHR IMAGE_CREATE_DISJOINT_BIT_KHR} set, call:

* *
     * VkResult vkBindImageMemory(
     *     VkDevice                                    device,
     *     VkImage                                     image,
     *     VkDeviceMemory                              memory,
     *     VkDeviceSize                                memoryOffset);
* *
Description
* *

{@link #vkBindImageMemory BindImageMemory} is equivalent to passing the same parameters through {@link VkBindImageMemoryInfoKHR} to {@link KHRBindMemory2#vkBindImageMemory2KHR BindImageMemory2KHR}.

* *
Valid Usage
* *
    *
  • {@code image} must not have been created with the {@link KHRSamplerYcbcrConversion#VK_IMAGE_CREATE_DISJOINT_BIT_KHR IMAGE_CREATE_DISJOINT_BIT_KHR} set.
  • *
  • {@code image} must not already be backed by a memory object
  • *
  • {@code image} must not have been created with any sparse memory binding flags
  • *
  • {@code memoryOffset} must be less than the size of {@code memory}
  • *
  • {@code memory} must have been allocated using one of the memory types allowed in the {@code memoryTypeBits} member of the {@link VkMemoryRequirements} structure returned from a call to {@link #vkGetImageMemoryRequirements GetImageMemoryRequirements} with {@code image}
  • *
  • {@code memoryOffset} must be an integer multiple of the {@code alignment} member of the {@link VkMemoryRequirements} structure returned from a call to {@link #vkGetImageMemoryRequirements GetImageMemoryRequirements} with {@code image}
  • *
  • The {@code size} member of the {@link VkMemoryRequirements} structure returned from a call to {@link #vkGetImageMemoryRequirements GetImageMemoryRequirements} with {@code image} must be less than or equal to the size of {@code memory} minus {@code memoryOffset}
  • *
  • If {@code image} requires a dedicated allocation (as reported by {@link KHRGetMemoryRequirements2#vkGetImageMemoryRequirements2KHR GetImageMemoryRequirements2KHR} in {@link VkMemoryDedicatedRequirementsKHR}::requiresDedicatedAllocation for {@code image}), {@code memory} must have been created with {@link VkMemoryDedicatedAllocateInfoKHR}{@code ::image} equal to {@code image}
  • *
  • If the {@code VkmemoryAllocateInfo} provided when {@code memory} was allocated included an instance of {@link VkMemoryDedicatedAllocateInfoKHR} in its {@code pNext} chain, and {@link VkMemoryDedicatedAllocateInfoKHR}{@code ::image} was not {@link #VK_NULL_HANDLE NULL_HANDLE}, then {@code image} must equal {@link VkMemoryDedicatedAllocateInfoKHR}{@code ::image} and {@code memoryOffset} must be zero.
  • *
  • If {@code image} was created with {@link VkDedicatedAllocationImageCreateInfoNV}{@code ::dedicatedAllocation} equal to {@link #VK_TRUE TRUE}, {@code memory} must have been created with {@link VkDedicatedAllocationMemoryAllocateInfoNV}{@code ::image} equal to an image handle created with identical creation parameters to {@code image} and {@code memoryOffset} must be zero
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code image} must be a valid {@code VkImage} handle
  • *
  • {@code memory} must be a valid {@code VkDeviceMemory} handle
  • *
  • {@code image} must have been created, allocated, or retrieved from {@code device}
  • *
  • {@code memory} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code image} must be externally synchronized
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* * @param device the logical device that owns the image and memory. * @param image the image. * @param memory the {@code VkDeviceMemory} object describing the device memory to attach. * @param memoryOffset the start offset of the region of {@code memory} which is to be bound to the image. The number of bytes returned in the {@link VkMemoryRequirements}{@code ::size} member in {@code memory}, starting from {@code memoryOffset} bytes, will be bound to the specified image. */ @NativeType("VkResult") public static int vkBindImageMemory(VkDevice device, @NativeType("VkImage") long image, @NativeType("VkDeviceMemory") long memory, @NativeType("VkDeviceSize") long memoryOffset) { long __functionAddress = device.getCapabilities().vkBindImageMemory; return callPJJJI(__functionAddress, device.address(), image, memory, memoryOffset); } // --- [ vkGetBufferMemoryRequirements ] --- /** Unsafe version of: {@link #vkGetBufferMemoryRequirements GetBufferMemoryRequirements} */ public static void nvkGetBufferMemoryRequirements(VkDevice device, long buffer, long pMemoryRequirements) { long __functionAddress = device.getCapabilities().vkGetBufferMemoryRequirements; callPJPV(__functionAddress, device.address(), buffer, pMemoryRequirements); } /** * Returns the memory requirements for specified Vulkan object. * *
C Specification
* *

To determine the memory requirements for a buffer resource, call:

* *
     * void vkGetBufferMemoryRequirements(
     *     VkDevice                                    device,
     *     VkBuffer                                    buffer,
     *     VkMemoryRequirements*                       pMemoryRequirements);
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code buffer} must be a valid {@code VkBuffer} handle
  • *
  • {@code pMemoryRequirements} must be a valid pointer to a {@link VkMemoryRequirements} structure
  • *
  • {@code buffer} must have been created, allocated, or retrieved from {@code device}
  • *
* *
See Also
* *

{@link VkMemoryRequirements}

* * @param device the logical device that owns the buffer. * @param buffer the buffer to query. * @param pMemoryRequirements points to an instance of the {@link VkMemoryRequirements} structure in which the memory requirements of the buffer object are returned. */ public static void vkGetBufferMemoryRequirements(VkDevice device, @NativeType("VkBuffer") long buffer, @NativeType("VkMemoryRequirements *") VkMemoryRequirements pMemoryRequirements) { nvkGetBufferMemoryRequirements(device, buffer, pMemoryRequirements.address()); } // --- [ vkGetImageMemoryRequirements ] --- /** Unsafe version of: {@link #vkGetImageMemoryRequirements GetImageMemoryRequirements} */ public static void nvkGetImageMemoryRequirements(VkDevice device, long image, long pMemoryRequirements) { long __functionAddress = device.getCapabilities().vkGetImageMemoryRequirements; callPJPV(__functionAddress, device.address(), image, pMemoryRequirements); } /** * Returns the memory requirements for specified Vulkan object. * *
C Specification
* *

To determine the memory requirements for an image resource which is not created with the {@link KHRSamplerYcbcrConversion#VK_IMAGE_CREATE_DISJOINT_BIT_KHR IMAGE_CREATE_DISJOINT_BIT_KHR} flag set, call:

* *
     * void vkGetImageMemoryRequirements(
     *     VkDevice                                    device,
     *     VkImage                                     image,
     *     VkMemoryRequirements*                       pMemoryRequirements);
* *
Valid Usage
* *
    *
  • {@code image} must not have been created with the {@link KHRSamplerYcbcrConversion#VK_IMAGE_CREATE_DISJOINT_BIT_KHR IMAGE_CREATE_DISJOINT_BIT_KHR} flag set
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code image} must be a valid {@code VkImage} handle
  • *
  • {@code pMemoryRequirements} must be a valid pointer to a {@link VkMemoryRequirements} structure
  • *
  • {@code image} must have been created, allocated, or retrieved from {@code device}
  • *
* *
See Also
* *

{@link VkMemoryRequirements}

* * @param device the logical device that owns the image. * @param image the image to query. * @param pMemoryRequirements points to an instance of the {@link VkMemoryRequirements} structure in which the memory requirements of the image object are returned. */ public static void vkGetImageMemoryRequirements(VkDevice device, @NativeType("VkImage") long image, @NativeType("VkMemoryRequirements *") VkMemoryRequirements pMemoryRequirements) { nvkGetImageMemoryRequirements(device, image, pMemoryRequirements.address()); } // --- [ vkGetImageSparseMemoryRequirements ] --- /** * Unsafe version of: {@link #vkGetImageSparseMemoryRequirements GetImageSparseMemoryRequirements} * * @param pSparseMemoryRequirementCount a pointer to an integer related to the number of sparse memory requirements available or queried, as described below. */ public static void nvkGetImageSparseMemoryRequirements(VkDevice device, long image, long pSparseMemoryRequirementCount, long pSparseMemoryRequirements) { long __functionAddress = device.getCapabilities().vkGetImageSparseMemoryRequirements; callPJPPV(__functionAddress, device.address(), image, pSparseMemoryRequirementCount, pSparseMemoryRequirements); } /** * Query the memory requirements for a sparse image. * *
C Specification
* *

To query sparse memory requirements for an image, call:

* *
     * void vkGetImageSparseMemoryRequirements(
     *     VkDevice                                    device,
     *     VkImage                                     image,
     *     uint32_t*                                   pSparseMemoryRequirementCount,
     *     VkSparseImageMemoryRequirements*            pSparseMemoryRequirements);
* *
Description
* *

If {@code pSparseMemoryRequirements} is {@code NULL}, then the number of sparse memory requirements available is returned in {@code pSparseMemoryRequirementCount}. Otherwise, {@code pSparseMemoryRequirementCount} must point to a variable set by the user to the number of elements in the {@code pSparseMemoryRequirements} array, and on return the variable is overwritten with the number of structures actually written to {@code pSparseMemoryRequirements}. If {@code pSparseMemoryRequirementCount} is less than the number of sparse memory requirements available, at most {@code pSparseMemoryRequirementCount} structures will be written.

* *

If the image was not created with {@link #VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT IMAGE_CREATE_SPARSE_RESIDENCY_BIT} then {@code pSparseMemoryRequirementCount} will be set to zero and {@code pSparseMemoryRequirements} will not be written to.

* *
Note
* *

It is legal for an implementation to report a larger value in {@link VkMemoryRequirements}{@code ::size} than would be obtained by adding together memory sizes for all {@link VkSparseImageMemoryRequirements} returned by {@link #vkGetImageSparseMemoryRequirements GetImageSparseMemoryRequirements}. This may occur when the hardware requires unused padding in the address range describing the resource.

*
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code image} must be a valid {@code VkImage} handle
  • *
  • {@code pSparseMemoryRequirementCount} must be a valid pointer to a {@code uint32_t} value
  • *
  • If the value referenced by {@code pSparseMemoryRequirementCount} is not 0, and {@code pSparseMemoryRequirements} is not {@code NULL}, {@code pSparseMemoryRequirements} must be a valid pointer to an array of {@code pSparseMemoryRequirementCount} {@link VkSparseImageMemoryRequirements} structures
  • *
  • {@code image} must have been created, allocated, or retrieved from {@code device}
  • *
* *
See Also
* *

{@link VkSparseImageMemoryRequirements}

* * @param device the logical device that owns the image. * @param image the {@code VkImage} object to get the memory requirements for. * @param pSparseMemoryRequirementCount a pointer to an integer related to the number of sparse memory requirements available or queried, as described below. * @param pSparseMemoryRequirements either {@code NULL} or a pointer to an array of {@link VkSparseImageMemoryRequirements} structures. */ public static void vkGetImageSparseMemoryRequirements(VkDevice device, @NativeType("VkImage") long image, @NativeType("uint32_t *") IntBuffer pSparseMemoryRequirementCount, @Nullable @NativeType("VkSparseImageMemoryRequirements *") VkSparseImageMemoryRequirements.Buffer pSparseMemoryRequirements) { if (CHECKS) { check(pSparseMemoryRequirementCount, 1); checkSafe(pSparseMemoryRequirements, pSparseMemoryRequirementCount.get(pSparseMemoryRequirementCount.position())); } nvkGetImageSparseMemoryRequirements(device, image, memAddress(pSparseMemoryRequirementCount), memAddressSafe(pSparseMemoryRequirements)); } // --- [ vkGetPhysicalDeviceSparseImageFormatProperties ] --- /** * Unsafe version of: {@link #vkGetPhysicalDeviceSparseImageFormatProperties GetPhysicalDeviceSparseImageFormatProperties} * * @param pPropertyCount a pointer to an integer related to the number of sparse format properties available or queried, as described below. */ public static void nvkGetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, int format, int type, int samples, int usage, int tiling, long pPropertyCount, long pProperties) { long __functionAddress = physicalDevice.getCapabilities().vkGetPhysicalDeviceSparseImageFormatProperties; callPPPV(__functionAddress, physicalDevice.address(), format, type, samples, usage, tiling, pPropertyCount, pProperties); } /** * Retrieve properties of an image format applied to sparse images. * *
C Specification
* *

{@link #vkGetPhysicalDeviceSparseImageFormatProperties GetPhysicalDeviceSparseImageFormatProperties} returns an array of {@link VkSparseImageFormatProperties}. Each element will describe properties for one set of image aspects that are bound simultaneously in the image. This is usually one element for each aspect in the image, but for interleaved depth/stencil images there is only one element describing the combined aspects.

* *
     * void vkGetPhysicalDeviceSparseImageFormatProperties(
     *     VkPhysicalDevice                            physicalDevice,
     *     VkFormat                                    format,
     *     VkImageType                                 type,
     *     VkSampleCountFlagBits                       samples,
     *     VkImageUsageFlags                           usage,
     *     VkImageTiling                               tiling,
     *     uint32_t*                                   pPropertyCount,
     *     VkSparseImageFormatProperties*              pProperties);
* *
Description
* *

If {@code pProperties} is {@code NULL}, then the number of sparse format properties available is returned in {@code pPropertyCount}. Otherwise, {@code pPropertyCount} must point to a variable set by the user to the number of elements in the {@code pProperties} array, and on return the variable is overwritten with the number of structures actually written to {@code pProperties}. If {@code pPropertyCount} is less than the number of sparse format properties available, at most {@code pPropertyCount} structures will be written.

* *

If {@link #VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT IMAGE_CREATE_SPARSE_RESIDENCY_BIT} is not supported for the given arguments, {@code pPropertyCount} will be set to zero upon return, and no data will be written to {@code pProperties}.

* *

Multiple aspects are returned for depth/stencil images that are implemented as separate planes by the implementation. The depth and stencil data planes each have unique {@link VkSparseImageFormatProperties} data.

* *

Depth/stencil images with depth and stencil data interleaved into a single plane will return a single {@link VkSparseImageFormatProperties} structure with the {@code aspectMask} set to {@link #VK_IMAGE_ASPECT_DEPTH_BIT IMAGE_ASPECT_DEPTH_BIT} | {@link #VK_IMAGE_ASPECT_STENCIL_BIT IMAGE_ASPECT_STENCIL_BIT}.

* *
Valid Usage
* *
    *
  • {@code samples} must be a bit value that is set in {@link VkImageFormatProperties}{@code ::sampleCounts} returned by {@link #vkGetPhysicalDeviceImageFormatProperties GetPhysicalDeviceImageFormatProperties} with {@code format}, {@code type}, {@code tiling}, and {@code usage} equal to those in this command and {@code flags} equal to the value that is set in {@link VkImageCreateInfo}{@code ::flags} when the image is created
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code physicalDevice} must be a valid {@code VkPhysicalDevice} handle
  • *
  • {@code format} must be a valid {@code VkFormat} value
  • *
  • {@code type} must be a valid {@code VkImageType} value
  • *
  • {@code samples} must be a valid {@code VkSampleCountFlagBits} value
  • *
  • {@code usage} must be a valid combination of {@code VkImageUsageFlagBits} values
  • *
  • {@code usage} must not be 0
  • *
  • {@code tiling} must be a valid {@code VkImageTiling} value
  • *
  • {@code pPropertyCount} must be a valid pointer to a {@code uint32_t} value
  • *
  • If the value referenced by {@code pPropertyCount} is not 0, and {@code pProperties} is not {@code NULL}, {@code pProperties} must be a valid pointer to an array of {@code pPropertyCount} {@link VkSparseImageFormatProperties} structures
  • *
* *
See Also
* *

{@link VkSparseImageFormatProperties}

* * @param physicalDevice the physical device from which to query the sparse image capabilities. * @param format the image format. * @param type the dimensionality of image. * @param samples the number of samples per texel as defined in {@code VkSampleCountFlagBits}. * @param usage a bitmask describing the intended usage of the image. * @param tiling the tiling arrangement of the data elements in memory. * @param pPropertyCount a pointer to an integer related to the number of sparse format properties available or queried, as described below. * @param pProperties either {@code NULL} or a pointer to an array of {@link VkSparseImageFormatProperties} structures. */ public static void vkGetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, @NativeType("VkFormat") int format, @NativeType("VkImageType") int type, @NativeType("VkSampleCountFlagBits") int samples, @NativeType("VkImageUsageFlags") int usage, @NativeType("VkImageTiling") int tiling, @NativeType("uint32_t *") IntBuffer pPropertyCount, @Nullable @NativeType("VkSparseImageFormatProperties *") VkSparseImageFormatProperties.Buffer pProperties) { if (CHECKS) { check(pPropertyCount, 1); checkSafe(pProperties, pPropertyCount.get(pPropertyCount.position())); } nvkGetPhysicalDeviceSparseImageFormatProperties(physicalDevice, format, type, samples, usage, tiling, memAddress(pPropertyCount), memAddressSafe(pProperties)); } // --- [ vkQueueBindSparse ] --- /** * Unsafe version of: {@link #vkQueueBindSparse QueueBindSparse} * * @param bindInfoCount the number of elements in the {@code pBindInfo} array. */ public static int nvkQueueBindSparse(VkQueue queue, int bindInfoCount, long pBindInfo, long fence) { long __functionAddress = queue.getCapabilities().vkQueueBindSparse; if (CHECKS) { if (pBindInfo != NULL) { VkBindSparseInfo.validate(pBindInfo, bindInfoCount); } } return callPPJI(__functionAddress, queue.address(), bindInfoCount, pBindInfo, fence); } /** * Bind device memory to a sparse resource object. * *
C Specification
* *

To submit sparse binding operations to a queue, call:

* *
     * VkResult vkQueueBindSparse(
     *     VkQueue                                     queue,
     *     uint32_t                                    bindInfoCount,
     *     const VkBindSparseInfo*                     pBindInfo,
     *     VkFence                                     fence);
* *
Description
* *

{@link #vkQueueBindSparse QueueBindSparse} is a queue submission command, with each batch defined by an element of {@code pBindInfo} as an instance of the {@link VkBindSparseInfo} structure. Batches begin execution in the order they appear in {@code pBindInfo}, but may complete out of order.

* *

Within a batch, a given range of a resource must not be bound more than once. Across batches, if a range is to be bound to one allocation and offset and then to another allocation and offset, then the application must guarantee (usually using semaphores) that the binding operations are executed in the correct order, as well as to order binding operations against the execution of command buffer submissions.

* *

As no operation to {@link #vkQueueBindSparse QueueBindSparse} causes any pipeline stage to access memory, synchronization primitives used in this command effectively only define execution dependencies.

* *

Additional information about fence and semaphore operation is described in the synchronization chapter.

* *
Valid Usage
* *
    *
  • If {@code fence} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code fence} must be unsignaled
  • *
  • If {@code fence} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code fence} must not be associated with any other queue command that has not yet completed execution on that queue
  • *
  • Each element of the {@code pSignalSemaphores} member of each element of {@code pBindInfo} must be unsignaled when the semaphore signal operation it defines is executed on the device
  • *
  • When a semaphore unsignal operation defined by any element of the {@code pWaitSemaphores} member of any element of {@code pBindInfo} executes on {@code queue}, no other queue must be waiting on the same semaphore.
  • *
  • All elements of the {@code pWaitSemaphores} member of all elements of {@code pBindInfo} must be semaphores that are signaled, or have semaphore signal operations previously submitted for execution.
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code queue} must be a valid {@code VkQueue} handle
  • *
  • If {@code bindInfoCount} is not 0, {@code pBindInfo} must be a valid pointer to an array of {@code bindInfoCount} valid {@link VkBindSparseInfo} structures
  • *
  • If {@code fence} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code fence} must be a valid {@code VkFence} handle
  • *
  • The {@code queue} must support sparse binding operations
  • *
  • Both of {@code fence}, and {@code queue} that are valid handles must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code queue} must be externally synchronized
  • *
  • Host access to {@code pBindInfo}[].pWaitSemaphores[] must be externally synchronized
  • *
  • Host access to {@code pBindInfo}[].pSignalSemaphores[] must be externally synchronized
  • *
  • Host access to {@code pBindInfo}[].pBufferBinds[].buffer must be externally synchronized
  • *
  • Host access to {@code pBindInfo}[].pImageOpaqueBinds[].image must be externally synchronized
  • *
  • Host access to {@code pBindInfo}[].pImageBinds[].image must be externally synchronized
  • *
  • Host access to {@code fence} must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
--SPARSE_BINDING-
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST}
  • *
*
* *
See Also
* *

{@link VkBindSparseInfo}

* * @param queue the queue that the sparse binding operations will be submitted to. * @param pBindInfo an array of {@link VkBindSparseInfo} structures, each specifying a sparse binding submission batch. * @param fence an optional: handle to a fence to be signaled. If {@code fence} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, it defines a fence signal operation. */ @NativeType("VkResult") public static int vkQueueBindSparse(VkQueue queue, @Nullable @NativeType("const VkBindSparseInfo *") VkBindSparseInfo.Buffer pBindInfo, @NativeType("VkFence") long fence) { return nvkQueueBindSparse(queue, remainingSafe(pBindInfo), memAddressSafe(pBindInfo), fence); } /** * Bind device memory to a sparse resource object. * *
C Specification
* *

To submit sparse binding operations to a queue, call:

* *
     * VkResult vkQueueBindSparse(
     *     VkQueue                                     queue,
     *     uint32_t                                    bindInfoCount,
     *     const VkBindSparseInfo*                     pBindInfo,
     *     VkFence                                     fence);
* *
Description
* *

{@link #vkQueueBindSparse QueueBindSparse} is a queue submission command, with each batch defined by an element of {@code pBindInfo} as an instance of the {@link VkBindSparseInfo} structure. Batches begin execution in the order they appear in {@code pBindInfo}, but may complete out of order.

* *

Within a batch, a given range of a resource must not be bound more than once. Across batches, if a range is to be bound to one allocation and offset and then to another allocation and offset, then the application must guarantee (usually using semaphores) that the binding operations are executed in the correct order, as well as to order binding operations against the execution of command buffer submissions.

* *

As no operation to {@link #vkQueueBindSparse QueueBindSparse} causes any pipeline stage to access memory, synchronization primitives used in this command effectively only define execution dependencies.

* *

Additional information about fence and semaphore operation is described in the synchronization chapter.

* *
Valid Usage
* *
    *
  • If {@code fence} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code fence} must be unsignaled
  • *
  • If {@code fence} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code fence} must not be associated with any other queue command that has not yet completed execution on that queue
  • *
  • Each element of the {@code pSignalSemaphores} member of each element of {@code pBindInfo} must be unsignaled when the semaphore signal operation it defines is executed on the device
  • *
  • When a semaphore unsignal operation defined by any element of the {@code pWaitSemaphores} member of any element of {@code pBindInfo} executes on {@code queue}, no other queue must be waiting on the same semaphore.
  • *
  • All elements of the {@code pWaitSemaphores} member of all elements of {@code pBindInfo} must be semaphores that are signaled, or have semaphore signal operations previously submitted for execution.
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code queue} must be a valid {@code VkQueue} handle
  • *
  • If {@code bindInfoCount} is not 0, {@code pBindInfo} must be a valid pointer to an array of {@code bindInfoCount} valid {@link VkBindSparseInfo} structures
  • *
  • If {@code fence} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code fence} must be a valid {@code VkFence} handle
  • *
  • The {@code queue} must support sparse binding operations
  • *
  • Both of {@code fence}, and {@code queue} that are valid handles must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code queue} must be externally synchronized
  • *
  • Host access to {@code pBindInfo}[].pWaitSemaphores[] must be externally synchronized
  • *
  • Host access to {@code pBindInfo}[].pSignalSemaphores[] must be externally synchronized
  • *
  • Host access to {@code pBindInfo}[].pBufferBinds[].buffer must be externally synchronized
  • *
  • Host access to {@code pBindInfo}[].pImageOpaqueBinds[].image must be externally synchronized
  • *
  • Host access to {@code pBindInfo}[].pImageBinds[].image must be externally synchronized
  • *
  • Host access to {@code fence} must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
--SPARSE_BINDING-
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST}
  • *
*
* *
See Also
* *

{@link VkBindSparseInfo}

* * @param queue the queue that the sparse binding operations will be submitted to. * @param pBindInfo an array of {@link VkBindSparseInfo} structures, each specifying a sparse binding submission batch. * @param fence an optional: handle to a fence to be signaled. If {@code fence} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, it defines a fence signal operation. */ @NativeType("VkResult") public static int vkQueueBindSparse(VkQueue queue, @Nullable @NativeType("const VkBindSparseInfo *") VkBindSparseInfo pBindInfo, @NativeType("VkFence") long fence) { return nvkQueueBindSparse(queue, 1, pBindInfo.address(), fence); } // --- [ vkCreateFence ] --- /** Unsafe version of: {@link #vkCreateFence CreateFence} */ public static int nvkCreateFence(VkDevice device, long pCreateInfo, long pAllocator, long pFence) { long __functionAddress = device.getCapabilities().vkCreateFence; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo, pAllocator, pFence); } /** * Create a new fence object. * *
C Specification
* *

To create a fence, call:

* *
     * VkResult vkCreateFence(
     *     VkDevice                                    device,
     *     const VkFenceCreateInfo*                    pCreateInfo,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkFence*                                    pFence);
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pCreateInfo} must be a valid pointer to a valid {@link VkFenceCreateInfo} structure
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pFence} must be a valid pointer to a {@code VkFence} handle
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkFenceCreateInfo}

* * @param device the logical device that creates the fence. * @param pCreateInfo a pointer to an instance of the {@link VkFenceCreateInfo} structure which contains information about how the fence is to be created. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pFence points to a handle in which the resulting fence object is returned. */ @NativeType("VkResult") public static int vkCreateFence(VkDevice device, @NativeType("const VkFenceCreateInfo *") VkFenceCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkFence *") LongBuffer pFence) { if (CHECKS) { check(pFence, 1); } return nvkCreateFence(device, pCreateInfo.address(), memAddressSafe(pAllocator), memAddress(pFence)); } // --- [ vkDestroyFence ] --- /** Unsafe version of: {@link #vkDestroyFence DestroyFence} */ public static void nvkDestroyFence(VkDevice device, long fence, long pAllocator) { long __functionAddress = device.getCapabilities().vkDestroyFence; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPJPV(__functionAddress, device.address(), fence, pAllocator); } /** * Destroy a fence object. * *
C Specification
* *

To destroy a fence, call:

* *
     * void vkDestroyFence(
     *     VkDevice                                    device,
     *     VkFence                                     fence,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Valid Usage
* *
    *
  • All queue submission commands that refer to {@code fence} must have completed execution
  • *
  • If {@link VkAllocationCallbacks} were provided when {@code fence} was created, a compatible set of callbacks must be provided here
  • *
  • If no {@link VkAllocationCallbacks} were provided when {@code fence} was created, {@code pAllocator} must be {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code fence} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code fence} must be a valid {@code VkFence} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • If {@code fence} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code fence} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param device the logical device that destroys the fence. * @param fence the handle of the fence to destroy. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkDestroyFence(VkDevice device, @NativeType("VkFence") long fence, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator) { nvkDestroyFence(device, fence, memAddressSafe(pAllocator)); } // --- [ vkResetFences ] --- /** * Unsafe version of: {@link #vkResetFences ResetFences} * * @param fenceCount the number of fences to reset. */ public static int nvkResetFences(VkDevice device, int fenceCount, long pFences) { long __functionAddress = device.getCapabilities().vkResetFences; return callPPI(__functionAddress, device.address(), fenceCount, pFences); } /** * Resets one or more fence objects. * *
C Specification
* *

To set the state of fences to unsignaled from the host, call:

* *
     * VkResult vkResetFences(
     *     VkDevice                                    device,
     *     uint32_t                                    fenceCount,
     *     const VkFence*                              pFences);
* *
Description
* *

If any member of {@code pFences} currently has its payload imported with temporary permanence, that fence's prior permanent payload is first restored. The remaining operations described therefore operate on the restored payload.

* *

When {@link #vkResetFences ResetFences} is executed on the host, it defines a fence unsignal operation for each fence, which resets the fence to the unsignaled state.

* *

If any member of {@code pFences} is already in the unsignaled state when {@link #vkResetFences ResetFences} is executed, then {@link #vkResetFences ResetFences} has no effect on that fence.

* *
Valid Usage
* *
    *
  • Each element of {@code pFences} must not be currently associated with any queue command that has not yet completed execution on that queue
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pFences} must be a valid pointer to an array of {@code fenceCount} valid {@code VkFence} handles
  • *
  • {@code fenceCount} must be greater than 0
  • *
  • Each element of {@code pFences} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to each member of {@code pFences} must be externally synchronized
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* * @param device the logical device that owns the fences. * @param pFences a pointer to an array of fence handles to reset. */ @NativeType("VkResult") public static int vkResetFences(VkDevice device, @NativeType("const VkFence *") LongBuffer pFences) { return nvkResetFences(device, pFences.remaining(), memAddress(pFences)); } /** * Resets one or more fence objects. * *
C Specification
* *

To set the state of fences to unsignaled from the host, call:

* *
     * VkResult vkResetFences(
     *     VkDevice                                    device,
     *     uint32_t                                    fenceCount,
     *     const VkFence*                              pFences);
* *
Description
* *

If any member of {@code pFences} currently has its payload imported with temporary permanence, that fence's prior permanent payload is first restored. The remaining operations described therefore operate on the restored payload.

* *

When {@link #vkResetFences ResetFences} is executed on the host, it defines a fence unsignal operation for each fence, which resets the fence to the unsignaled state.

* *

If any member of {@code pFences} is already in the unsignaled state when {@link #vkResetFences ResetFences} is executed, then {@link #vkResetFences ResetFences} has no effect on that fence.

* *
Valid Usage
* *
    *
  • Each element of {@code pFences} must not be currently associated with any queue command that has not yet completed execution on that queue
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pFences} must be a valid pointer to an array of {@code fenceCount} valid {@code VkFence} handles
  • *
  • {@code fenceCount} must be greater than 0
  • *
  • Each element of {@code pFences} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to each member of {@code pFences} must be externally synchronized
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* * @param device the logical device that owns the fences. */ @NativeType("VkResult") public static int vkResetFences(VkDevice device, @NativeType("const VkFence *") long pFence) { MemoryStack stack = stackGet(); int stackPointer = stack.getPointer(); try { LongBuffer pFences = stack.longs(pFence); return nvkResetFences(device, 1, memAddress(pFences)); } finally { stack.setPointer(stackPointer); } } // --- [ vkGetFenceStatus ] --- /** * Return the status of a fence. * *
C Specification
* *

To query the status of a fence from the host, call:

* *
     * VkResult vkGetFenceStatus(
     *     VkDevice                                    device,
     *     VkFence                                     fence);
* *
Description
* *

Upon success, {@link #vkGetFenceStatus GetFenceStatus} returns the status of the fence object, with the following return codes:

* *
Fence Object Status Codes
* * * * * * * * *
StatusMeaning
{@link #VK_SUCCESS SUCCESS}The fence specified by {@code fence} is signaled.
{@link #VK_NOT_READY NOT_READY}The fence specified by {@code fence} is unsignaled.
{@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST}The device has been lost. See Lost Device.
* *

If a queue submission command is pending execution, then the value returned by this command may immediately be out of date.

* *

If the device has been lost (see Lost Device), {@link #vkGetFenceStatus GetFenceStatus} may return any of the above status codes. If the device has been lost and {@link #vkGetFenceStatus GetFenceStatus} is called repeatedly, it will eventually return either {@link #VK_SUCCESS SUCCESS} or {@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST}.

* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code fence} must be a valid {@code VkFence} handle
  • *
  • {@code fence} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
  • {@link #VK_NOT_READY NOT_READY}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST}
  • *
*
* * @param device the logical device that owns the fence. * @param fence the handle of the fence to query. */ @NativeType("VkResult") public static int vkGetFenceStatus(VkDevice device, @NativeType("VkFence") long fence) { long __functionAddress = device.getCapabilities().vkGetFenceStatus; return callPJI(__functionAddress, device.address(), fence); } // --- [ vkWaitForFences ] --- /** * Unsafe version of: {@link #vkWaitForFences WaitForFences} * * @param fenceCount the number of fences to wait on. */ public static int nvkWaitForFences(VkDevice device, int fenceCount, long pFences, int waitAll, long timeout) { long __functionAddress = device.getCapabilities().vkWaitForFences; return callPPJI(__functionAddress, device.address(), fenceCount, pFences, waitAll, timeout); } /** * Wait for one or more fences to become signaled. * *
C Specification
* *

To wait for one or more fences to enter the signaled state on the host, call:

* *
     * VkResult vkWaitForFences(
     *     VkDevice                                    device,
     *     uint32_t                                    fenceCount,
     *     const VkFence*                              pFences,
     *     VkBool32                                    waitAll,
     *     uint64_t                                    timeout);
* *
Description
* *

If the condition is satisfied when {@link #vkWaitForFences WaitForFences} is called, then {@link #vkWaitForFences WaitForFences} returns immediately. If the condition is not satisfied at the time {@link #vkWaitForFences WaitForFences} is called, then {@link #vkWaitForFences WaitForFences} will block and wait up to {@code timeout} nanoseconds for the condition to become satisfied.

* *

If {@code timeout} is zero, then {@link #vkWaitForFences WaitForFences} does not wait, but simply returns the current state of the fences. {@link #VK_TIMEOUT TIMEOUT} will be returned in this case if the condition is not satisfied, even though no actual wait was performed.

* *

If the specified timeout period expires before the condition is satisfied, {@link #vkWaitForFences WaitForFences} returns {@link #VK_TIMEOUT TIMEOUT}. If the condition is satisfied before {@code timeout} nanoseconds has expired, {@link #vkWaitForFences WaitForFences} returns {@link #VK_SUCCESS SUCCESS}.

* *

If device loss occurs (see Lost Device) before the timeout has expired, {@link #vkWaitForFences WaitForFences} must return in finite time with either {@link #VK_SUCCESS SUCCESS} or {@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST}.

* *
Note
* *

While we guarantee that {@link #vkWaitForFences WaitForFences} must return in finite time, no guarantees are made that it returns immediately upon device loss. However, the client can reasonably expect that the delay will be on the order of seconds and that calling {@link #vkWaitForFences WaitForFences} will not result in a permanently (or seemingly permanently) dead process.

*
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pFences} must be a valid pointer to an array of {@code fenceCount} valid {@code VkFence} handles
  • *
  • {@code fenceCount} must be greater than 0
  • *
  • Each element of {@code pFences} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
  • {@link #VK_TIMEOUT TIMEOUT}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST}
  • *
*
* * @param device the logical device that owns the fences. * @param pFences a pointer to an array of {@code fenceCount} fence handles. * @param waitAll the condition that must be satisfied to successfully unblock the wait. If {@code waitAll} is {@link #VK_TRUE TRUE}, then the condition is that all fences in {@code pFences} are signaled. Otherwise, the condition is that at least one fence in {@code pFences} is signaled. * @param timeout the timeout period in units of nanoseconds. {@code timeout} is adjusted to the closest value allowed by the implementation-dependent timeout accuracy, which may be substantially longer than one nanosecond, and may be longer than the requested period. */ @NativeType("VkResult") public static int vkWaitForFences(VkDevice device, @NativeType("const VkFence *") LongBuffer pFences, @NativeType("VkBool32") boolean waitAll, @NativeType("uint64_t") long timeout) { return nvkWaitForFences(device, pFences.remaining(), memAddress(pFences), waitAll ? 1 : 0, timeout); } /** * Wait for one or more fences to become signaled. * *
C Specification
* *

To wait for one or more fences to enter the signaled state on the host, call:

* *
     * VkResult vkWaitForFences(
     *     VkDevice                                    device,
     *     uint32_t                                    fenceCount,
     *     const VkFence*                              pFences,
     *     VkBool32                                    waitAll,
     *     uint64_t                                    timeout);
* *
Description
* *

If the condition is satisfied when {@link #vkWaitForFences WaitForFences} is called, then {@link #vkWaitForFences WaitForFences} returns immediately. If the condition is not satisfied at the time {@link #vkWaitForFences WaitForFences} is called, then {@link #vkWaitForFences WaitForFences} will block and wait up to {@code timeout} nanoseconds for the condition to become satisfied.

* *

If {@code timeout} is zero, then {@link #vkWaitForFences WaitForFences} does not wait, but simply returns the current state of the fences. {@link #VK_TIMEOUT TIMEOUT} will be returned in this case if the condition is not satisfied, even though no actual wait was performed.

* *

If the specified timeout period expires before the condition is satisfied, {@link #vkWaitForFences WaitForFences} returns {@link #VK_TIMEOUT TIMEOUT}. If the condition is satisfied before {@code timeout} nanoseconds has expired, {@link #vkWaitForFences WaitForFences} returns {@link #VK_SUCCESS SUCCESS}.

* *

If device loss occurs (see Lost Device) before the timeout has expired, {@link #vkWaitForFences WaitForFences} must return in finite time with either {@link #VK_SUCCESS SUCCESS} or {@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST}.

* *
Note
* *

While we guarantee that {@link #vkWaitForFences WaitForFences} must return in finite time, no guarantees are made that it returns immediately upon device loss. However, the client can reasonably expect that the delay will be on the order of seconds and that calling {@link #vkWaitForFences WaitForFences} will not result in a permanently (or seemingly permanently) dead process.

*
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pFences} must be a valid pointer to an array of {@code fenceCount} valid {@code VkFence} handles
  • *
  • {@code fenceCount} must be greater than 0
  • *
  • Each element of {@code pFences} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
  • {@link #VK_TIMEOUT TIMEOUT}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST}
  • *
*
* * @param device the logical device that owns the fences. * @param waitAll the condition that must be satisfied to successfully unblock the wait. If {@code waitAll} is {@link #VK_TRUE TRUE}, then the condition is that all fences in {@code pFences} are signaled. Otherwise, the condition is that at least one fence in {@code pFences} is signaled. * @param timeout the timeout period in units of nanoseconds. {@code timeout} is adjusted to the closest value allowed by the implementation-dependent timeout accuracy, which may be substantially longer than one nanosecond, and may be longer than the requested period. */ @NativeType("VkResult") public static int vkWaitForFences(VkDevice device, @NativeType("const VkFence *") long pFence, @NativeType("VkBool32") boolean waitAll, @NativeType("uint64_t") long timeout) { MemoryStack stack = stackGet(); int stackPointer = stack.getPointer(); try { LongBuffer pFences = stack.longs(pFence); return nvkWaitForFences(device, 1, memAddress(pFences), waitAll ? 1 : 0, timeout); } finally { stack.setPointer(stackPointer); } } // --- [ vkCreateSemaphore ] --- /** Unsafe version of: {@link #vkCreateSemaphore CreateSemaphore} */ public static int nvkCreateSemaphore(VkDevice device, long pCreateInfo, long pAllocator, long pSemaphore) { long __functionAddress = device.getCapabilities().vkCreateSemaphore; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo, pAllocator, pSemaphore); } /** * Create a new queue semaphore object. * *
C Specification
* *

To create a semaphore, call:

* *
     * VkResult vkCreateSemaphore(
     *     VkDevice                                    device,
     *     const VkSemaphoreCreateInfo*                pCreateInfo,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkSemaphore*                                pSemaphore);
* *
Description
* *

When created, the semaphore is in the unsignaled state.

* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pCreateInfo} must be a valid pointer to a valid {@link VkSemaphoreCreateInfo} structure
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pSemaphore} must be a valid pointer to a {@code VkSemaphore} handle
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkSemaphoreCreateInfo}

* * @param device the logical device that creates the semaphore. * @param pCreateInfo a pointer to an instance of the {@link VkSemaphoreCreateInfo} structure which contains information about how the semaphore is to be created. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pSemaphore points to a handle in which the resulting semaphore object is returned. */ @NativeType("VkResult") public static int vkCreateSemaphore(VkDevice device, @NativeType("const VkSemaphoreCreateInfo *") VkSemaphoreCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkSemaphore *") LongBuffer pSemaphore) { if (CHECKS) { check(pSemaphore, 1); } return nvkCreateSemaphore(device, pCreateInfo.address(), memAddressSafe(pAllocator), memAddress(pSemaphore)); } // --- [ vkDestroySemaphore ] --- /** Unsafe version of: {@link #vkDestroySemaphore DestroySemaphore} */ public static void nvkDestroySemaphore(VkDevice device, long semaphore, long pAllocator) { long __functionAddress = device.getCapabilities().vkDestroySemaphore; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPJPV(__functionAddress, device.address(), semaphore, pAllocator); } /** * Destroy a semaphore object. * *
C Specification
* *

To destroy a semaphore, call:

* *
     * void vkDestroySemaphore(
     *     VkDevice                                    device,
     *     VkSemaphore                                 semaphore,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Valid Usage
* *
    *
  • All submitted batches that refer to {@code semaphore} must have completed execution
  • *
  • If {@link VkAllocationCallbacks} were provided when {@code semaphore} was created, a compatible set of callbacks must be provided here
  • *
  • If no {@link VkAllocationCallbacks} were provided when {@code semaphore} was created, {@code pAllocator} must be {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code semaphore} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code semaphore} must be a valid {@code VkSemaphore} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • If {@code semaphore} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code semaphore} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param device the logical device that destroys the semaphore. * @param semaphore the handle of the semaphore to destroy. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkDestroySemaphore(VkDevice device, @NativeType("VkSemaphore") long semaphore, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator) { nvkDestroySemaphore(device, semaphore, memAddressSafe(pAllocator)); } // --- [ vkCreateEvent ] --- /** Unsafe version of: {@link #vkCreateEvent CreateEvent} */ public static int nvkCreateEvent(VkDevice device, long pCreateInfo, long pAllocator, long pEvent) { long __functionAddress = device.getCapabilities().vkCreateEvent; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo, pAllocator, pEvent); } /** * Create a new event object. * *
C Specification
* *

To create an event, call:

* *
     * VkResult vkCreateEvent(
     *     VkDevice                                    device,
     *     const VkEventCreateInfo*                    pCreateInfo,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkEvent*                                    pEvent);
* *
Description
* *

When created, the event object is in the unsignaled state.

* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pCreateInfo} must be a valid pointer to a valid {@link VkEventCreateInfo} structure
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pEvent} must be a valid pointer to a {@code VkEvent} handle
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkEventCreateInfo}

* * @param device the logical device that creates the event. * @param pCreateInfo a pointer to an instance of the {@link VkEventCreateInfo} structure which contains information about how the event is to be created. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pEvent points to a handle in which the resulting event object is returned. */ @NativeType("VkResult") public static int vkCreateEvent(VkDevice device, @NativeType("const VkEventCreateInfo *") VkEventCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkEvent *") LongBuffer pEvent) { if (CHECKS) { check(pEvent, 1); } return nvkCreateEvent(device, pCreateInfo.address(), memAddressSafe(pAllocator), memAddress(pEvent)); } // --- [ vkDestroyEvent ] --- /** Unsafe version of: {@link #vkDestroyEvent DestroyEvent} */ public static void nvkDestroyEvent(VkDevice device, long event, long pAllocator) { long __functionAddress = device.getCapabilities().vkDestroyEvent; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPJPV(__functionAddress, device.address(), event, pAllocator); } /** * Destroy an event object. * *
C Specification
* *

To destroy an event, call:

* *
     * void vkDestroyEvent(
     *     VkDevice                                    device,
     *     VkEvent                                     event,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Valid Usage
* *
    *
  • All submitted commands that refer to {@code event} must have completed execution
  • *
  • If {@link VkAllocationCallbacks} were provided when {@code event} was created, a compatible set of callbacks must be provided here
  • *
  • If no {@link VkAllocationCallbacks} were provided when {@code event} was created, {@code pAllocator} must be {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code event} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code event} must be a valid {@code VkEvent} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • If {@code event} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code event} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param device the logical device that destroys the event. * @param event the handle of the event to destroy. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkDestroyEvent(VkDevice device, @NativeType("VkEvent") long event, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator) { nvkDestroyEvent(device, event, memAddressSafe(pAllocator)); } // --- [ vkGetEventStatus ] --- /** * Retrieve the status of an event object. * *
C Specification
* *

To query the state of an event from the host, call:

* *
     * VkResult vkGetEventStatus(
     *     VkDevice                                    device,
     *     VkEvent                                     event);
* *
Description
* *

Upon success, {@link #vkGetEventStatus GetEventStatus} returns the state of the event object with the following return codes:

* *
Event Object Status Codes
* * * * * * * *
StatusMeaning
{@link #VK_EVENT_SET EVENT_SET}The event specified by {@code event} is signaled.
{@link #VK_EVENT_RESET EVENT_RESET}The event specified by {@code event} is unsignaled.
* *

If a {@link #vkCmdSetEvent CmdSetEvent} or {@link #vkCmdResetEvent CmdResetEvent} command is in a command buffer that is in the pending state, then the value returned by this command may immediately be out of date.

* *

The state of an event can be updated by the host. The state of the event is immediately changed, and subsequent calls to {@link #vkGetEventStatus GetEventStatus} will return the new state. If an event is already in the requested state, then updating it to the same state has no effect.

* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code event} must be a valid {@code VkEvent} handle
  • *
  • {@code event} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_EVENT_SET EVENT_SET}
  • *
  • {@link #VK_EVENT_RESET EVENT_RESET}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST}
  • *
*
* * @param device the logical device that owns the event. * @param event the handle of the event to query. */ @NativeType("VkResult") public static int vkGetEventStatus(VkDevice device, @NativeType("VkEvent") long event) { long __functionAddress = device.getCapabilities().vkGetEventStatus; return callPJI(__functionAddress, device.address(), event); } // --- [ vkSetEvent ] --- /** * Set an event to signaled state. * *
C Specification
* *

To set the state of an event to signaled from the host, call:

* *
     * VkResult vkSetEvent(
     *     VkDevice                                    device,
     *     VkEvent                                     event);
* *
Description
* *

When {@link #vkSetEvent SetEvent} is executed on the host, it defines an event signal operation which sets the event to the signaled state.

* *

If {@code event} is already in the signaled state when {@link #vkSetEvent SetEvent} is executed, then {@link #vkSetEvent SetEvent} has no effect, and no event signal operation occurs.

* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code event} must be a valid {@code VkEvent} handle
  • *
  • {@code event} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code event} must be externally synchronized
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* * @param device the logical device that owns the event. * @param event the event to set. */ @NativeType("VkResult") public static int vkSetEvent(VkDevice device, @NativeType("VkEvent") long event) { long __functionAddress = device.getCapabilities().vkSetEvent; return callPJI(__functionAddress, device.address(), event); } // --- [ vkResetEvent ] --- /** * Reset an event to non-signaled state. * *
C Specification
* *

To set the state of an event to unsignaled from the host, call:

* *
     * VkResult vkResetEvent(
     *     VkDevice                                    device,
     *     VkEvent                                     event);
* *
Description
* *

When {@link #vkResetEvent ResetEvent} is executed on the host, it defines an event unsignal operation which resets the event to the unsignaled state.

* *

If {@code event} is already in the unsignaled state when {@link #vkResetEvent ResetEvent} is executed, then {@link #vkResetEvent ResetEvent} has no effect, and no event unsignal operation occurs.

* *
Valid Usage
* *
    *
  • {@code event} must not be waited on by a {@link #vkCmdWaitEvents CmdWaitEvents} command that is currently executing
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code event} must be a valid {@code VkEvent} handle
  • *
  • {@code event} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code event} must be externally synchronized
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* * @param device the logical device that owns the event. * @param event the event to reset. */ @NativeType("VkResult") public static int vkResetEvent(VkDevice device, @NativeType("VkEvent") long event) { long __functionAddress = device.getCapabilities().vkResetEvent; return callPJI(__functionAddress, device.address(), event); } // --- [ vkCreateQueryPool ] --- /** Unsafe version of: {@link #vkCreateQueryPool CreateQueryPool} */ public static int nvkCreateQueryPool(VkDevice device, long pCreateInfo, long pAllocator, long pQueryPool) { long __functionAddress = device.getCapabilities().vkCreateQueryPool; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo, pAllocator, pQueryPool); } /** * Create a new query pool object. * *
C Specification
* *

To create a query pool, call:

* *
     * VkResult vkCreateQueryPool(
     *     VkDevice                                    device,
     *     const VkQueryPoolCreateInfo*                pCreateInfo,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkQueryPool*                                pQueryPool);
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pCreateInfo} must be a valid pointer to a valid {@link VkQueryPoolCreateInfo} structure
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pQueryPool} must be a valid pointer to a {@code VkQueryPool} handle
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkQueryPoolCreateInfo}

* * @param device the logical device that creates the query pool. * @param pCreateInfo a pointer to an instance of the {@link VkQueryPoolCreateInfo} structure containing the number and type of queries to be managed by the pool. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pQueryPool a pointer to a {@code VkQueryPool} handle in which the resulting query pool object is returned. */ @NativeType("VkResult") public static int vkCreateQueryPool(VkDevice device, @NativeType("const VkQueryPoolCreateInfo *") VkQueryPoolCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkQueryPool *") LongBuffer pQueryPool) { if (CHECKS) { check(pQueryPool, 1); } return nvkCreateQueryPool(device, pCreateInfo.address(), memAddressSafe(pAllocator), memAddress(pQueryPool)); } // --- [ vkDestroyQueryPool ] --- /** Unsafe version of: {@link #vkDestroyQueryPool DestroyQueryPool} */ public static void nvkDestroyQueryPool(VkDevice device, long queryPool, long pAllocator) { long __functionAddress = device.getCapabilities().vkDestroyQueryPool; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPJPV(__functionAddress, device.address(), queryPool, pAllocator); } /** * Destroy a query pool object. * *
C Specification
* *

To destroy a query pool, call:

* *
     * void vkDestroyQueryPool(
     *     VkDevice                                    device,
     *     VkQueryPool                                 queryPool,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Valid Usage
* *
    *
  • All submitted commands that refer to {@code queryPool} must have completed execution
  • *
  • If {@link VkAllocationCallbacks} were provided when {@code queryPool} was created, a compatible set of callbacks must be provided here
  • *
  • If no {@link VkAllocationCallbacks} were provided when {@code queryPool} was created, {@code pAllocator} must be {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code queryPool} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code queryPool} must be a valid {@code VkQueryPool} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • If {@code queryPool} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code queryPool} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param device the logical device that destroys the query pool. * @param queryPool the query pool to destroy. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkDestroyQueryPool(VkDevice device, @NativeType("VkQueryPool") long queryPool, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator) { nvkDestroyQueryPool(device, queryPool, memAddressSafe(pAllocator)); } // --- [ vkGetQueryPoolResults ] --- /** * Unsafe version of: {@link #vkGetQueryPoolResults GetQueryPoolResults} * * @param dataSize the size in bytes of the buffer pointed to by {@code pData}. */ public static int nvkGetQueryPoolResults(VkDevice device, long queryPool, int firstQuery, int queryCount, long dataSize, long pData, long stride, int flags) { long __functionAddress = device.getCapabilities().vkGetQueryPoolResults; return callPJPPJI(__functionAddress, device.address(), queryPool, firstQuery, queryCount, dataSize, pData, stride, flags); } /** * Copy results of queries in a query pool to a host memory region. * *
C Specification
* *

To retrieve status and results for a set of queries, call:

* *
     * VkResult vkGetQueryPoolResults(
     *     VkDevice                                    device,
     *     VkQueryPool                                 queryPool,
     *     uint32_t                                    firstQuery,
     *     uint32_t                                    queryCount,
     *     size_t                                      dataSize,
     *     void*                                       pData,
     *     VkDeviceSize                                stride,
     *     VkQueryResultFlags                          flags);
* *
Description
* *

If no bits are set in {@code flags}, and all requested queries are in the available state, results are written as an array of 32-bit unsigned integer values. The behavior when not all queries are available, is described below.

* *

If {@link #VK_QUERY_RESULT_64_BIT QUERY_RESULT_64_BIT} is not set and the result overflows a 32-bit value, the value may either wrap or saturate. Similarly, if {@link #VK_QUERY_RESULT_64_BIT QUERY_RESULT_64_BIT} is set and the result overflows a 64-bit value, the value may either wrap or saturate.

* *

If {@link #VK_QUERY_RESULT_WAIT_BIT QUERY_RESULT_WAIT_BIT} is set, Vulkan will wait for each query to be in the available state before retrieving the numerical results for that query. In this case, {@link #vkGetQueryPoolResults GetQueryPoolResults} is guaranteed to succeed and return {@link #VK_SUCCESS SUCCESS} if the queries become available in a finite time (i.e. if they have been issued and not reset). If queries will never finish (e.g. due to being reset but not issued), then {@link #vkGetQueryPoolResults GetQueryPoolResults} may not return in finite time.

* *

If {@link #VK_QUERY_RESULT_WAIT_BIT QUERY_RESULT_WAIT_BIT} and {@link #VK_QUERY_RESULT_PARTIAL_BIT QUERY_RESULT_PARTIAL_BIT} are both not set then no result values are written to {@code pData} for queries that are in the unavailable state at the time of the call, and {@link #vkGetQueryPoolResults GetQueryPoolResults} returns {@link #VK_NOT_READY NOT_READY}. However, availability state is still written to {@code pData} for those queries if {@link #VK_QUERY_RESULT_WITH_AVAILABILITY_BIT QUERY_RESULT_WITH_AVAILABILITY_BIT} is set.

* *
Note
* *

Applications must take care to ensure that use of the {@link #VK_QUERY_RESULT_WAIT_BIT QUERY_RESULT_WAIT_BIT} bit has the desired effect.

* *

For example, if a query has been used previously and a command buffer records the commands {@link #vkCmdResetQueryPool CmdResetQueryPool}, {@link #vkCmdBeginQuery CmdBeginQuery}, and {@link #vkCmdEndQuery CmdEndQuery} for that query, then the query will remain in the available state until the {@link #vkCmdResetQueryPool CmdResetQueryPool} command executes on a queue. Applications can use fences or events to ensure that a query has already been reset before checking for its results or availability status. Otherwise, a stale value could be returned from a previous use of the query.

* *

The above also applies when {@link #VK_QUERY_RESULT_WAIT_BIT QUERY_RESULT_WAIT_BIT} is used in combination with {@link #VK_QUERY_RESULT_WITH_AVAILABILITY_BIT QUERY_RESULT_WITH_AVAILABILITY_BIT}. In this case, the returned availability status may reflect the result of a previous use of the query unless the {@link #vkCmdResetQueryPool CmdResetQueryPool} command has been executed since the last use of the query.

*
* *
Note
* *

Applications can double-buffer query pool usage, with a pool per frame, and reset queries at the end of the frame in which they are read.

*
* *

If {@link #VK_QUERY_RESULT_PARTIAL_BIT QUERY_RESULT_PARTIAL_BIT} is set, {@link #VK_QUERY_RESULT_WAIT_BIT QUERY_RESULT_WAIT_BIT} is not set, and the query's status is unavailable, an intermediate result value between zero and the final result value is written to {@code pData} for that query.

* *

{@link #VK_QUERY_RESULT_PARTIAL_BIT QUERY_RESULT_PARTIAL_BIT} must not be used if the pool's {@code queryType} is {@link #VK_QUERY_TYPE_TIMESTAMP QUERY_TYPE_TIMESTAMP}.

* *

If {@link #VK_QUERY_RESULT_WITH_AVAILABILITY_BIT QUERY_RESULT_WITH_AVAILABILITY_BIT} is set, the final integer value written for each query is non-zero if the query's status was available or zero if the status was unavailable. When {@link #VK_QUERY_RESULT_WITH_AVAILABILITY_BIT QUERY_RESULT_WITH_AVAILABILITY_BIT} is used, implementations must guarantee that if they return a non-zero availability value then the numerical results must be valid, assuming the results are not reset by a subsequent command.

* *
Note
* *

Satisfying this guarantee may require careful ordering by the application, e.g. to read the availability status before reading the results.

*
* *
Valid Usage
* *
    *
  • {@code firstQuery} must be less than the number of queries in {@code queryPool}
  • *
  • If {@link #VK_QUERY_RESULT_64_BIT QUERY_RESULT_64_BIT} is not set in {@code flags} then {@code pData} and {@code stride} must be multiples of 4
  • *
  • If {@link #VK_QUERY_RESULT_64_BIT QUERY_RESULT_64_BIT} is set in {@code flags} then {@code pData} and {@code stride} must be multiples of 8
  • *
  • The sum of {@code firstQuery} and {@code queryCount} must be less than or equal to the number of queries in {@code queryPool}
  • *
  • {@code dataSize} must be large enough to contain the result of each query, as described here
  • *
  • If the {@code queryType} used to create {@code queryPool} was {@link #VK_QUERY_TYPE_TIMESTAMP QUERY_TYPE_TIMESTAMP}, {@code flags} must not contain {@link #VK_QUERY_RESULT_PARTIAL_BIT QUERY_RESULT_PARTIAL_BIT}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code queryPool} must be a valid {@code VkQueryPool} handle
  • *
  • {@code pData} must be a valid pointer to an array of {@code dataSize} bytes
  • *
  • {@code flags} must be a valid combination of {@code VkQueryResultFlagBits} values
  • *
  • {@code dataSize} must be greater than 0
  • *
  • {@code queryPool} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
  • {@link #VK_NOT_READY NOT_READY}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST}
  • *
*
* * @param device the logical device that owns the query pool. * @param queryPool the query pool managing the queries containing the desired results. * @param firstQuery the initial query index. * @param queryCount the number of queries. {@code firstQuery} and {@code queryCount} together define a range of queries. For pipeline statistics queries, each query index in the pool contains one integer value for each bit that is enabled in {@link VkQueryPoolCreateInfo}{@code ::pipelineStatistics} when the pool is created. * @param pData a pointer to a user-allocated buffer where the results will be written * @param stride the stride in bytes between results for individual queries within {@code pData}. * @param flags a bitmask of {@code VkQueryResultFlagBits} specifying how and when results are returned. */ @NativeType("VkResult") public static int vkGetQueryPoolResults(VkDevice device, @NativeType("VkQueryPool") long queryPool, @NativeType("uint32_t") int firstQuery, @NativeType("uint32_t") int queryCount, @NativeType("void *") ByteBuffer pData, @NativeType("VkDeviceSize") long stride, @NativeType("VkQueryResultFlags") int flags) { return nvkGetQueryPoolResults(device, queryPool, firstQuery, queryCount, pData.remaining(), memAddress(pData), stride, flags); } /** * Copy results of queries in a query pool to a host memory region. * *
C Specification
* *

To retrieve status and results for a set of queries, call:

* *
     * VkResult vkGetQueryPoolResults(
     *     VkDevice                                    device,
     *     VkQueryPool                                 queryPool,
     *     uint32_t                                    firstQuery,
     *     uint32_t                                    queryCount,
     *     size_t                                      dataSize,
     *     void*                                       pData,
     *     VkDeviceSize                                stride,
     *     VkQueryResultFlags                          flags);
* *
Description
* *

If no bits are set in {@code flags}, and all requested queries are in the available state, results are written as an array of 32-bit unsigned integer values. The behavior when not all queries are available, is described below.

* *

If {@link #VK_QUERY_RESULT_64_BIT QUERY_RESULT_64_BIT} is not set and the result overflows a 32-bit value, the value may either wrap or saturate. Similarly, if {@link #VK_QUERY_RESULT_64_BIT QUERY_RESULT_64_BIT} is set and the result overflows a 64-bit value, the value may either wrap or saturate.

* *

If {@link #VK_QUERY_RESULT_WAIT_BIT QUERY_RESULT_WAIT_BIT} is set, Vulkan will wait for each query to be in the available state before retrieving the numerical results for that query. In this case, {@link #vkGetQueryPoolResults GetQueryPoolResults} is guaranteed to succeed and return {@link #VK_SUCCESS SUCCESS} if the queries become available in a finite time (i.e. if they have been issued and not reset). If queries will never finish (e.g. due to being reset but not issued), then {@link #vkGetQueryPoolResults GetQueryPoolResults} may not return in finite time.

* *

If {@link #VK_QUERY_RESULT_WAIT_BIT QUERY_RESULT_WAIT_BIT} and {@link #VK_QUERY_RESULT_PARTIAL_BIT QUERY_RESULT_PARTIAL_BIT} are both not set then no result values are written to {@code pData} for queries that are in the unavailable state at the time of the call, and {@link #vkGetQueryPoolResults GetQueryPoolResults} returns {@link #VK_NOT_READY NOT_READY}. However, availability state is still written to {@code pData} for those queries if {@link #VK_QUERY_RESULT_WITH_AVAILABILITY_BIT QUERY_RESULT_WITH_AVAILABILITY_BIT} is set.

* *
Note
* *

Applications must take care to ensure that use of the {@link #VK_QUERY_RESULT_WAIT_BIT QUERY_RESULT_WAIT_BIT} bit has the desired effect.

* *

For example, if a query has been used previously and a command buffer records the commands {@link #vkCmdResetQueryPool CmdResetQueryPool}, {@link #vkCmdBeginQuery CmdBeginQuery}, and {@link #vkCmdEndQuery CmdEndQuery} for that query, then the query will remain in the available state until the {@link #vkCmdResetQueryPool CmdResetQueryPool} command executes on a queue. Applications can use fences or events to ensure that a query has already been reset before checking for its results or availability status. Otherwise, a stale value could be returned from a previous use of the query.

* *

The above also applies when {@link #VK_QUERY_RESULT_WAIT_BIT QUERY_RESULT_WAIT_BIT} is used in combination with {@link #VK_QUERY_RESULT_WITH_AVAILABILITY_BIT QUERY_RESULT_WITH_AVAILABILITY_BIT}. In this case, the returned availability status may reflect the result of a previous use of the query unless the {@link #vkCmdResetQueryPool CmdResetQueryPool} command has been executed since the last use of the query.

*
* *
Note
* *

Applications can double-buffer query pool usage, with a pool per frame, and reset queries at the end of the frame in which they are read.

*
* *

If {@link #VK_QUERY_RESULT_PARTIAL_BIT QUERY_RESULT_PARTIAL_BIT} is set, {@link #VK_QUERY_RESULT_WAIT_BIT QUERY_RESULT_WAIT_BIT} is not set, and the query's status is unavailable, an intermediate result value between zero and the final result value is written to {@code pData} for that query.

* *

{@link #VK_QUERY_RESULT_PARTIAL_BIT QUERY_RESULT_PARTIAL_BIT} must not be used if the pool's {@code queryType} is {@link #VK_QUERY_TYPE_TIMESTAMP QUERY_TYPE_TIMESTAMP}.

* *

If {@link #VK_QUERY_RESULT_WITH_AVAILABILITY_BIT QUERY_RESULT_WITH_AVAILABILITY_BIT} is set, the final integer value written for each query is non-zero if the query's status was available or zero if the status was unavailable. When {@link #VK_QUERY_RESULT_WITH_AVAILABILITY_BIT QUERY_RESULT_WITH_AVAILABILITY_BIT} is used, implementations must guarantee that if they return a non-zero availability value then the numerical results must be valid, assuming the results are not reset by a subsequent command.

* *
Note
* *

Satisfying this guarantee may require careful ordering by the application, e.g. to read the availability status before reading the results.

*
* *
Valid Usage
* *
    *
  • {@code firstQuery} must be less than the number of queries in {@code queryPool}
  • *
  • If {@link #VK_QUERY_RESULT_64_BIT QUERY_RESULT_64_BIT} is not set in {@code flags} then {@code pData} and {@code stride} must be multiples of 4
  • *
  • If {@link #VK_QUERY_RESULT_64_BIT QUERY_RESULT_64_BIT} is set in {@code flags} then {@code pData} and {@code stride} must be multiples of 8
  • *
  • The sum of {@code firstQuery} and {@code queryCount} must be less than or equal to the number of queries in {@code queryPool}
  • *
  • {@code dataSize} must be large enough to contain the result of each query, as described here
  • *
  • If the {@code queryType} used to create {@code queryPool} was {@link #VK_QUERY_TYPE_TIMESTAMP QUERY_TYPE_TIMESTAMP}, {@code flags} must not contain {@link #VK_QUERY_RESULT_PARTIAL_BIT QUERY_RESULT_PARTIAL_BIT}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code queryPool} must be a valid {@code VkQueryPool} handle
  • *
  • {@code pData} must be a valid pointer to an array of {@code dataSize} bytes
  • *
  • {@code flags} must be a valid combination of {@code VkQueryResultFlagBits} values
  • *
  • {@code dataSize} must be greater than 0
  • *
  • {@code queryPool} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
  • {@link #VK_NOT_READY NOT_READY}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST}
  • *
*
* * @param device the logical device that owns the query pool. * @param queryPool the query pool managing the queries containing the desired results. * @param firstQuery the initial query index. * @param queryCount the number of queries. {@code firstQuery} and {@code queryCount} together define a range of queries. For pipeline statistics queries, each query index in the pool contains one integer value for each bit that is enabled in {@link VkQueryPoolCreateInfo}{@code ::pipelineStatistics} when the pool is created. * @param pData a pointer to a user-allocated buffer where the results will be written * @param stride the stride in bytes between results for individual queries within {@code pData}. * @param flags a bitmask of {@code VkQueryResultFlagBits} specifying how and when results are returned. */ @NativeType("VkResult") public static int vkGetQueryPoolResults(VkDevice device, @NativeType("VkQueryPool") long queryPool, @NativeType("uint32_t") int firstQuery, @NativeType("uint32_t") int queryCount, @NativeType("void *") IntBuffer pData, @NativeType("VkDeviceSize") long stride, @NativeType("VkQueryResultFlags") int flags) { return nvkGetQueryPoolResults(device, queryPool, firstQuery, queryCount, pData.remaining() << 2, memAddress(pData), stride, flags); } /** * Copy results of queries in a query pool to a host memory region. * *
C Specification
* *

To retrieve status and results for a set of queries, call:

* *
     * VkResult vkGetQueryPoolResults(
     *     VkDevice                                    device,
     *     VkQueryPool                                 queryPool,
     *     uint32_t                                    firstQuery,
     *     uint32_t                                    queryCount,
     *     size_t                                      dataSize,
     *     void*                                       pData,
     *     VkDeviceSize                                stride,
     *     VkQueryResultFlags                          flags);
* *
Description
* *

If no bits are set in {@code flags}, and all requested queries are in the available state, results are written as an array of 32-bit unsigned integer values. The behavior when not all queries are available, is described below.

* *

If {@link #VK_QUERY_RESULT_64_BIT QUERY_RESULT_64_BIT} is not set and the result overflows a 32-bit value, the value may either wrap or saturate. Similarly, if {@link #VK_QUERY_RESULT_64_BIT QUERY_RESULT_64_BIT} is set and the result overflows a 64-bit value, the value may either wrap or saturate.

* *

If {@link #VK_QUERY_RESULT_WAIT_BIT QUERY_RESULT_WAIT_BIT} is set, Vulkan will wait for each query to be in the available state before retrieving the numerical results for that query. In this case, {@link #vkGetQueryPoolResults GetQueryPoolResults} is guaranteed to succeed and return {@link #VK_SUCCESS SUCCESS} if the queries become available in a finite time (i.e. if they have been issued and not reset). If queries will never finish (e.g. due to being reset but not issued), then {@link #vkGetQueryPoolResults GetQueryPoolResults} may not return in finite time.

* *

If {@link #VK_QUERY_RESULT_WAIT_BIT QUERY_RESULT_WAIT_BIT} and {@link #VK_QUERY_RESULT_PARTIAL_BIT QUERY_RESULT_PARTIAL_BIT} are both not set then no result values are written to {@code pData} for queries that are in the unavailable state at the time of the call, and {@link #vkGetQueryPoolResults GetQueryPoolResults} returns {@link #VK_NOT_READY NOT_READY}. However, availability state is still written to {@code pData} for those queries if {@link #VK_QUERY_RESULT_WITH_AVAILABILITY_BIT QUERY_RESULT_WITH_AVAILABILITY_BIT} is set.

* *
Note
* *

Applications must take care to ensure that use of the {@link #VK_QUERY_RESULT_WAIT_BIT QUERY_RESULT_WAIT_BIT} bit has the desired effect.

* *

For example, if a query has been used previously and a command buffer records the commands {@link #vkCmdResetQueryPool CmdResetQueryPool}, {@link #vkCmdBeginQuery CmdBeginQuery}, and {@link #vkCmdEndQuery CmdEndQuery} for that query, then the query will remain in the available state until the {@link #vkCmdResetQueryPool CmdResetQueryPool} command executes on a queue. Applications can use fences or events to ensure that a query has already been reset before checking for its results or availability status. Otherwise, a stale value could be returned from a previous use of the query.

* *

The above also applies when {@link #VK_QUERY_RESULT_WAIT_BIT QUERY_RESULT_WAIT_BIT} is used in combination with {@link #VK_QUERY_RESULT_WITH_AVAILABILITY_BIT QUERY_RESULT_WITH_AVAILABILITY_BIT}. In this case, the returned availability status may reflect the result of a previous use of the query unless the {@link #vkCmdResetQueryPool CmdResetQueryPool} command has been executed since the last use of the query.

*
* *
Note
* *

Applications can double-buffer query pool usage, with a pool per frame, and reset queries at the end of the frame in which they are read.

*
* *

If {@link #VK_QUERY_RESULT_PARTIAL_BIT QUERY_RESULT_PARTIAL_BIT} is set, {@link #VK_QUERY_RESULT_WAIT_BIT QUERY_RESULT_WAIT_BIT} is not set, and the query's status is unavailable, an intermediate result value between zero and the final result value is written to {@code pData} for that query.

* *

{@link #VK_QUERY_RESULT_PARTIAL_BIT QUERY_RESULT_PARTIAL_BIT} must not be used if the pool's {@code queryType} is {@link #VK_QUERY_TYPE_TIMESTAMP QUERY_TYPE_TIMESTAMP}.

* *

If {@link #VK_QUERY_RESULT_WITH_AVAILABILITY_BIT QUERY_RESULT_WITH_AVAILABILITY_BIT} is set, the final integer value written for each query is non-zero if the query's status was available or zero if the status was unavailable. When {@link #VK_QUERY_RESULT_WITH_AVAILABILITY_BIT QUERY_RESULT_WITH_AVAILABILITY_BIT} is used, implementations must guarantee that if they return a non-zero availability value then the numerical results must be valid, assuming the results are not reset by a subsequent command.

* *
Note
* *

Satisfying this guarantee may require careful ordering by the application, e.g. to read the availability status before reading the results.

*
* *
Valid Usage
* *
    *
  • {@code firstQuery} must be less than the number of queries in {@code queryPool}
  • *
  • If {@link #VK_QUERY_RESULT_64_BIT QUERY_RESULT_64_BIT} is not set in {@code flags} then {@code pData} and {@code stride} must be multiples of 4
  • *
  • If {@link #VK_QUERY_RESULT_64_BIT QUERY_RESULT_64_BIT} is set in {@code flags} then {@code pData} and {@code stride} must be multiples of 8
  • *
  • The sum of {@code firstQuery} and {@code queryCount} must be less than or equal to the number of queries in {@code queryPool}
  • *
  • {@code dataSize} must be large enough to contain the result of each query, as described here
  • *
  • If the {@code queryType} used to create {@code queryPool} was {@link #VK_QUERY_TYPE_TIMESTAMP QUERY_TYPE_TIMESTAMP}, {@code flags} must not contain {@link #VK_QUERY_RESULT_PARTIAL_BIT QUERY_RESULT_PARTIAL_BIT}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code queryPool} must be a valid {@code VkQueryPool} handle
  • *
  • {@code pData} must be a valid pointer to an array of {@code dataSize} bytes
  • *
  • {@code flags} must be a valid combination of {@code VkQueryResultFlagBits} values
  • *
  • {@code dataSize} must be greater than 0
  • *
  • {@code queryPool} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
  • {@link #VK_NOT_READY NOT_READY}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST}
  • *
*
* * @param device the logical device that owns the query pool. * @param queryPool the query pool managing the queries containing the desired results. * @param firstQuery the initial query index. * @param queryCount the number of queries. {@code firstQuery} and {@code queryCount} together define a range of queries. For pipeline statistics queries, each query index in the pool contains one integer value for each bit that is enabled in {@link VkQueryPoolCreateInfo}{@code ::pipelineStatistics} when the pool is created. * @param pData a pointer to a user-allocated buffer where the results will be written * @param stride the stride in bytes between results for individual queries within {@code pData}. * @param flags a bitmask of {@code VkQueryResultFlagBits} specifying how and when results are returned. */ @NativeType("VkResult") public static int vkGetQueryPoolResults(VkDevice device, @NativeType("VkQueryPool") long queryPool, @NativeType("uint32_t") int firstQuery, @NativeType("uint32_t") int queryCount, @NativeType("void *") LongBuffer pData, @NativeType("VkDeviceSize") long stride, @NativeType("VkQueryResultFlags") int flags) { return nvkGetQueryPoolResults(device, queryPool, firstQuery, queryCount, pData.remaining() << 3, memAddress(pData), stride, flags); } // --- [ vkCreateBuffer ] --- /** Unsafe version of: {@link #vkCreateBuffer CreateBuffer} */ public static int nvkCreateBuffer(VkDevice device, long pCreateInfo, long pAllocator, long pBuffer) { long __functionAddress = device.getCapabilities().vkCreateBuffer; if (CHECKS) { VkBufferCreateInfo.validate(pCreateInfo); if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo, pAllocator, pBuffer); } /** * Create a new buffer object. * *
C Specification
* *

To create buffers, call:

* *
     * VkResult vkCreateBuffer(
     *     VkDevice                                    device,
     *     const VkBufferCreateInfo*                   pCreateInfo,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkBuffer*                                   pBuffer);
* *
Valid Usage
* *
    *
  • If the {@code flags} member of {@code pCreateInfo} includes {@link #VK_BUFFER_CREATE_SPARSE_BINDING_BIT BUFFER_CREATE_SPARSE_BINDING_BIT}, creating this {@code VkBuffer} must not cause the total required sparse memory for all currently valid sparse resources on the device to exceed {@link VkPhysicalDeviceLimits}{@code ::sparseAddressSpaceSize}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pCreateInfo} must be a valid pointer to a valid {@link VkBufferCreateInfo} structure
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pBuffer} must be a valid pointer to a {@code VkBuffer} handle
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkBufferCreateInfo}

* * @param device the logical device that creates the buffer object. * @param pCreateInfo a pointer to an instance of the {@link VkBufferCreateInfo} structure containing parameters affecting creation of the buffer. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pBuffer points to a {@code VkBuffer} handle in which the resulting buffer object is returned. */ @NativeType("VkResult") public static int vkCreateBuffer(VkDevice device, @NativeType("const VkBufferCreateInfo *") VkBufferCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkBuffer *") LongBuffer pBuffer) { if (CHECKS) { check(pBuffer, 1); } return nvkCreateBuffer(device, pCreateInfo.address(), memAddressSafe(pAllocator), memAddress(pBuffer)); } // --- [ vkDestroyBuffer ] --- /** Unsafe version of: {@link #vkDestroyBuffer DestroyBuffer} */ public static void nvkDestroyBuffer(VkDevice device, long buffer, long pAllocator) { long __functionAddress = device.getCapabilities().vkDestroyBuffer; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPJPV(__functionAddress, device.address(), buffer, pAllocator); } /** * Destroy a buffer object. * *
C Specification
* *

To destroy a buffer, call:

* *
     * void vkDestroyBuffer(
     *     VkDevice                                    device,
     *     VkBuffer                                    buffer,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Valid Usage
* *
    *
  • All submitted commands that refer to {@code buffer}, either directly or via a {@code VkBufferView}, must have completed execution
  • *
  • If {@link VkAllocationCallbacks} were provided when {@code buffer} was created, a compatible set of callbacks must be provided here
  • *
  • If no {@link VkAllocationCallbacks} were provided when {@code buffer} was created, {@code pAllocator} must be {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code buffer} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code buffer} must be a valid {@code VkBuffer} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • If {@code buffer} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code buffer} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param device the logical device that destroys the buffer. * @param buffer the buffer to destroy. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkDestroyBuffer(VkDevice device, @NativeType("VkBuffer") long buffer, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator) { nvkDestroyBuffer(device, buffer, memAddressSafe(pAllocator)); } // --- [ vkCreateBufferView ] --- /** Unsafe version of: {@link #vkCreateBufferView CreateBufferView} */ public static int nvkCreateBufferView(VkDevice device, long pCreateInfo, long pAllocator, long pView) { long __functionAddress = device.getCapabilities().vkCreateBufferView; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo, pAllocator, pView); } /** * Create a new buffer view object. * *
C Specification
* *

To create a buffer view, call:

* *
     * VkResult vkCreateBufferView(
     *     VkDevice                                    device,
     *     const VkBufferViewCreateInfo*               pCreateInfo,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkBufferView*                               pView);
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pCreateInfo} must be a valid pointer to a valid {@link VkBufferViewCreateInfo} structure
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pView} must be a valid pointer to a {@code VkBufferView} handle
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkBufferViewCreateInfo}

* * @param device the logical device that creates the buffer view. * @param pCreateInfo a pointer to an instance of the {@link VkBufferViewCreateInfo} structure containing parameters to be used to create the buffer. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pView points to a {@code VkBufferView} handle in which the resulting buffer view object is returned. */ @NativeType("VkResult") public static int vkCreateBufferView(VkDevice device, @NativeType("const VkBufferViewCreateInfo *") VkBufferViewCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkBufferView *") LongBuffer pView) { if (CHECKS) { check(pView, 1); } return nvkCreateBufferView(device, pCreateInfo.address(), memAddressSafe(pAllocator), memAddress(pView)); } // --- [ vkDestroyBufferView ] --- /** Unsafe version of: {@link #vkDestroyBufferView DestroyBufferView} */ public static void nvkDestroyBufferView(VkDevice device, long bufferView, long pAllocator) { long __functionAddress = device.getCapabilities().vkDestroyBufferView; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPJPV(__functionAddress, device.address(), bufferView, pAllocator); } /** * Destroy a buffer view object. * *
C Specification
* *

To destroy a buffer view, call:

* *
     * void vkDestroyBufferView(
     *     VkDevice                                    device,
     *     VkBufferView                                bufferView,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Valid Usage
* *
    *
  • All submitted commands that refer to {@code bufferView} must have completed execution
  • *
  • If {@link VkAllocationCallbacks} were provided when {@code bufferView} was created, a compatible set of callbacks must be provided here
  • *
  • If no {@link VkAllocationCallbacks} were provided when {@code bufferView} was created, {@code pAllocator} must be {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code bufferView} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code bufferView} must be a valid {@code VkBufferView} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • If {@code bufferView} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code bufferView} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param device the logical device that destroys the buffer view. * @param bufferView the buffer view to destroy. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkDestroyBufferView(VkDevice device, @NativeType("VkBufferView") long bufferView, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator) { nvkDestroyBufferView(device, bufferView, memAddressSafe(pAllocator)); } // --- [ vkCreateImage ] --- /** Unsafe version of: {@link #vkCreateImage CreateImage} */ public static int nvkCreateImage(VkDevice device, long pCreateInfo, long pAllocator, long pImage) { long __functionAddress = device.getCapabilities().vkCreateImage; if (CHECKS) { VkImageCreateInfo.validate(pCreateInfo); if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo, pAllocator, pImage); } /** * Create a new image object. * *
C Specification
* *

To create images, call:

* *
     * VkResult vkCreateImage(
     *     VkDevice                                    device,
     *     const VkImageCreateInfo*                    pCreateInfo,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkImage*                                    pImage);
* *
Valid Usage
* *
    *
  • If the {@code flags} member of {@code pCreateInfo} includes {@link #VK_IMAGE_CREATE_SPARSE_BINDING_BIT IMAGE_CREATE_SPARSE_BINDING_BIT}, creating this {@code VkImage} must not cause the total required sparse memory for all currently valid sparse resources on the device to exceed {@link VkPhysicalDeviceLimits}{@code ::sparseAddressSpaceSize}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pCreateInfo} must be a valid pointer to a valid {@link VkImageCreateInfo} structure
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pImage} must be a valid pointer to a {@code VkImage} handle
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkImageCreateInfo}

* * @param device the logical device that creates the image. * @param pCreateInfo a pointer to an instance of the {@link VkImageCreateInfo} structure containing parameters to be used to create the image. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pImage points to a {@code VkImage} handle in which the resulting image object is returned. */ @NativeType("VkResult") public static int vkCreateImage(VkDevice device, @NativeType("const VkImageCreateInfo *") VkImageCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkImage *") LongBuffer pImage) { if (CHECKS) { check(pImage, 1); } return nvkCreateImage(device, pCreateInfo.address(), memAddressSafe(pAllocator), memAddress(pImage)); } // --- [ vkDestroyImage ] --- /** Unsafe version of: {@link #vkDestroyImage DestroyImage} */ public static void nvkDestroyImage(VkDevice device, long image, long pAllocator) { long __functionAddress = device.getCapabilities().vkDestroyImage; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPJPV(__functionAddress, device.address(), image, pAllocator); } /** * Destroy an image object. * *
C Specification
* *

To destroy an image, call:

* *
     * void vkDestroyImage(
     *     VkDevice                                    device,
     *     VkImage                                     image,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Valid Usage
* *
    *
  • All submitted commands that refer to {@code image}, either directly or via a {@code VkImageView}, must have completed execution
  • *
  • If {@link VkAllocationCallbacks} were provided when {@code image} was created, a compatible set of callbacks must be provided here
  • *
  • If no {@link VkAllocationCallbacks} were provided when {@code image} was created, {@code pAllocator} must be {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code image} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code image} must be a valid {@code VkImage} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • If {@code image} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code image} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param device the logical device that destroys the image. * @param image the image to destroy. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkDestroyImage(VkDevice device, @NativeType("VkImage") long image, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator) { nvkDestroyImage(device, image, memAddressSafe(pAllocator)); } // --- [ vkGetImageSubresourceLayout ] --- /** Unsafe version of: {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout} */ public static void nvkGetImageSubresourceLayout(VkDevice device, long image, long pSubresource, long pLayout) { long __functionAddress = device.getCapabilities().vkGetImageSubresourceLayout; callPJPPV(__functionAddress, device.address(), image, pSubresource, pLayout); } /** * Retrieve information about an image subresource. * *
C Specification
* *

To query the host access layout of an image subresource, for an image created with linear tiling, call:

* *
     * void vkGetImageSubresourceLayout(
     *     VkDevice                                    device,
     *     VkImage                                     image,
     *     const VkImageSubresource*                   pSubresource,
     *     VkSubresourceLayout*                        pLayout);
* *
Description
* *

If the {@code VkFormat} of {@code image} is a multi-planar format, {@link #vkGetImageSubresourceLayout GetImageSubresourceLayout} describes one plane of the image.

* *

{@link #vkGetImageSubresourceLayout GetImageSubresourceLayout} is invariant for the lifetime of a single image.

* *
Valid Usage
* *
    *
  • {@code image} must have been created with {@code tiling} equal to {@link #VK_IMAGE_TILING_LINEAR IMAGE_TILING_LINEAR}
  • *
  • The {@code aspectMask} member of {@code pSubresource} must only have a single bit set
  • *
  • The {@code mipLevel} member of {@code pSubresource} must be less than the {@code mipLevels} specified in {@link VkImageCreateInfo} when {@code image} was created
  • *
  • The {@code arrayLayer} member of {@code pSubresource} must be less than the {@code arrayLayers} specified in {@link VkImageCreateInfo} when {@code image} was created
  • *
  • If the {@code format} of {@code image} is a multi-planar format with two planes, the {@code aspectMask} member of {@code pSubresource} must be {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} or {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR}
  • *
  • If the {@code format} of {@code image} is a multi-planar format with three planes, the {@code aspectMask} member of {@code pSubresource} must be {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR}, {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} or {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_2_BIT_KHR IMAGE_ASPECT_PLANE_2_BIT_KHR}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code image} must be a valid {@code VkImage} handle
  • *
  • {@code pSubresource} must be a valid pointer to a valid {@link VkImageSubresource} structure
  • *
  • {@code pLayout} must be a valid pointer to a {@link VkSubresourceLayout} structure
  • *
  • {@code image} must have been created, allocated, or retrieved from {@code device}
  • *
* *
See Also
* *

{@link VkImageSubresource}, {@link VkSubresourceLayout}

* * @param device the logical device that owns the image. * @param image the image whose layout is being queried. * @param pSubresource a pointer to a {@link VkImageSubresource} structure selecting a specific image for the image subresource. * @param pLayout points to a {@link VkSubresourceLayout} structure in which the layout is returned. */ public static void vkGetImageSubresourceLayout(VkDevice device, @NativeType("VkImage") long image, @NativeType("const VkImageSubresource *") VkImageSubresource pSubresource, @NativeType("VkSubresourceLayout *") VkSubresourceLayout pLayout) { nvkGetImageSubresourceLayout(device, image, pSubresource.address(), pLayout.address()); } // --- [ vkCreateImageView ] --- /** Unsafe version of: {@link #vkCreateImageView CreateImageView} */ public static int nvkCreateImageView(VkDevice device, long pCreateInfo, long pAllocator, long pView) { long __functionAddress = device.getCapabilities().vkCreateImageView; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo, pAllocator, pView); } /** * Create an image view from an existing image. * *
C Specification
* *

To create an image view, call:

* *
     * VkResult vkCreateImageView(
     *     VkDevice                                    device,
     *     const VkImageViewCreateInfo*                pCreateInfo,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkImageView*                                pView);
* *
Description
* *

Some of the image creation parameters are inherited by the view. In particular, image view creation inherits the implicit parameter {@code usage} specifying the allowed usages of the image view that, by default, takes the value of the corresponding {@code usage} parameter specified in {@link VkImageCreateInfo} at image creation time. This implicit parameter can be overriden by chaining a {@link VkImageViewUsageCreateInfoKHR} structure through the {@code pNext} member to {@link VkImageViewCreateInfo} as described later in this section.

* *

The remaining parameters are contained in the {@code pCreateInfo}.

* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pCreateInfo} must be a valid pointer to a valid {@link VkImageViewCreateInfo} structure
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pView} must be a valid pointer to a {@code VkImageView} handle
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkImageViewCreateInfo}

* * @param device the logical device that creates the image view. * @param pCreateInfo a pointer to an instance of the {@link VkImageViewCreateInfo} structure containing parameters to be used to create the image view. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pView points to a {@code VkImageView} handle in which the resulting image view object is returned. */ @NativeType("VkResult") public static int vkCreateImageView(VkDevice device, @NativeType("const VkImageViewCreateInfo *") VkImageViewCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkImageView *") LongBuffer pView) { if (CHECKS) { check(pView, 1); } return nvkCreateImageView(device, pCreateInfo.address(), memAddressSafe(pAllocator), memAddress(pView)); } // --- [ vkDestroyImageView ] --- /** Unsafe version of: {@link #vkDestroyImageView DestroyImageView} */ public static void nvkDestroyImageView(VkDevice device, long imageView, long pAllocator) { long __functionAddress = device.getCapabilities().vkDestroyImageView; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPJPV(__functionAddress, device.address(), imageView, pAllocator); } /** * Destroy an image view object. * *
C Specification
* *

To destroy an image view, call:

* *
     * void vkDestroyImageView(
     *     VkDevice                                    device,
     *     VkImageView                                 imageView,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Valid Usage
* *
    *
  • All submitted commands that refer to {@code imageView} must have completed execution
  • *
  • If {@link VkAllocationCallbacks} were provided when {@code imageView} was created, a compatible set of callbacks must be provided here
  • *
  • If no {@link VkAllocationCallbacks} were provided when {@code imageView} was created, {@code pAllocator} must be {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code imageView} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code imageView} must be a valid {@code VkImageView} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • If {@code imageView} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code imageView} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param device the logical device that destroys the image view. * @param imageView the image view to destroy. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkDestroyImageView(VkDevice device, @NativeType("VkImageView") long imageView, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator) { nvkDestroyImageView(device, imageView, memAddressSafe(pAllocator)); } // --- [ vkCreateShaderModule ] --- /** Unsafe version of: {@link #vkCreateShaderModule CreateShaderModule} */ public static int nvkCreateShaderModule(VkDevice device, long pCreateInfo, long pAllocator, long pShaderModule) { long __functionAddress = device.getCapabilities().vkCreateShaderModule; if (CHECKS) { VkShaderModuleCreateInfo.validate(pCreateInfo); if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo, pAllocator, pShaderModule); } /** * Creates a new shader module object. * *
C Specification
* *

To create a shader module, call:

* *
     * VkResult vkCreateShaderModule(
     *     VkDevice                                    device,
     *     const VkShaderModuleCreateInfo*             pCreateInfo,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkShaderModule*                             pShaderModule);
* *
Description
* *

Once a shader module has been created, any entry points it contains can be used in pipeline shader stages as described in Compute Pipelines and Graphics Pipelines.

* *

If the shader stage fails to compile {@link NVGLSLShader#VK_ERROR_INVALID_SHADER_NV ERROR_INVALID_SHADER_NV} will be generated and the compile log will be reported back to the application by {@link EXTDebugReport VK_EXT_debug_report} if enabled.

* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pCreateInfo} must be a valid pointer to a valid {@link VkShaderModuleCreateInfo} structure
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pShaderModule} must be a valid pointer to a {@code VkShaderModule} handle
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link NVGLSLShader#VK_ERROR_INVALID_SHADER_NV ERROR_INVALID_SHADER_NV}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkShaderModuleCreateInfo}

* * @param device the logical device that creates the shader module. * @param pCreateInfo parameter is a pointer to an instance of the {@link VkShaderModuleCreateInfo} structure. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pShaderModule points to a {@code VkShaderModule} handle in which the resulting shader module object is returned. */ @NativeType("VkResult") public static int vkCreateShaderModule(VkDevice device, @NativeType("const VkShaderModuleCreateInfo *") VkShaderModuleCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkShaderModule *") LongBuffer pShaderModule) { if (CHECKS) { check(pShaderModule, 1); } return nvkCreateShaderModule(device, pCreateInfo.address(), memAddressSafe(pAllocator), memAddress(pShaderModule)); } // --- [ vkDestroyShaderModule ] --- /** Unsafe version of: {@link #vkDestroyShaderModule DestroyShaderModule} */ public static void nvkDestroyShaderModule(VkDevice device, long shaderModule, long pAllocator) { long __functionAddress = device.getCapabilities().vkDestroyShaderModule; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPJPV(__functionAddress, device.address(), shaderModule, pAllocator); } /** * Destroy a shader module module. * *
C Specification
* *

To destroy a shader module, call:

* *
     * void vkDestroyShaderModule(
     *     VkDevice                                    device,
     *     VkShaderModule                              shaderModule,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Description
* *

A shader module can be destroyed while pipelines created using its shaders are still in use.

* *
Valid Usage
* *
    *
  • If {@link VkAllocationCallbacks} were provided when {@code shaderModule} was created, a compatible set of callbacks must be provided here
  • *
  • If no {@link VkAllocationCallbacks} were provided when {@code shaderModule} was created, {@code pAllocator} must be {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code shaderModule} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code shaderModule} must be a valid {@code VkShaderModule} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • If {@code shaderModule} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code shaderModule} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param device the logical device that destroys the shader module. * @param shaderModule the handle of the shader module to destroy. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkDestroyShaderModule(VkDevice device, @NativeType("VkShaderModule") long shaderModule, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator) { nvkDestroyShaderModule(device, shaderModule, memAddressSafe(pAllocator)); } // --- [ vkCreatePipelineCache ] --- /** Unsafe version of: {@link #vkCreatePipelineCache CreatePipelineCache} */ public static int nvkCreatePipelineCache(VkDevice device, long pCreateInfo, long pAllocator, long pPipelineCache) { long __functionAddress = device.getCapabilities().vkCreatePipelineCache; if (CHECKS) { VkPipelineCacheCreateInfo.validate(pCreateInfo); if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo, pAllocator, pPipelineCache); } /** * Creates a new pipeline cache. * *
C Specification
* *

To create pipeline cache objects, call:

* *
     * VkResult vkCreatePipelineCache(
     *     VkDevice                                    device,
     *     const VkPipelineCacheCreateInfo*            pCreateInfo,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkPipelineCache*                            pPipelineCache);
* *
Description
* *
Note
* *

Applications can track and manage the total host memory size of a pipeline cache object using the {@code pAllocator}. Applications can limit the amount of data retrieved from a pipeline cache object in {@link #vkGetPipelineCacheData GetPipelineCacheData}. Implementations should not internally limit the total number of entries added to a pipeline cache object or the total host memory consumed.

*
* *

Once created, a pipeline cache can be passed to the {@link #vkCreateGraphicsPipelines CreateGraphicsPipelines} and {@link #vkCreateComputePipelines CreateComputePipelines} commands. If the pipeline cache passed into these commands is not {@link #VK_NULL_HANDLE NULL_HANDLE}, the implementation will query it for possible reuse opportunities and update it with new content. The use of the pipeline cache object in these commands is internally synchronized, and the same pipeline cache object can be used in multiple threads simultaneously.

* *
Note
* *

Implementations should make every effort to limit any critical sections to the actual accesses to the cache, which is expected to be significantly shorter than the duration of the {@link #vkCreateGraphicsPipelines CreateGraphicsPipelines} and {@link #vkCreateComputePipelines CreateComputePipelines} commands.

*
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pCreateInfo} must be a valid pointer to a valid {@link VkPipelineCacheCreateInfo} structure
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pPipelineCache} must be a valid pointer to a {@code VkPipelineCache} handle
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkPipelineCacheCreateInfo}

* * @param device the logical device that creates the pipeline cache object. * @param pCreateInfo a pointer to a {@link VkPipelineCacheCreateInfo} structure that contains the initial parameters for the pipeline cache object. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pPipelineCache a pointer to a {@code VkPipelineCache} handle in which the resulting pipeline cache object is returned. */ @NativeType("VkResult") public static int vkCreatePipelineCache(VkDevice device, @NativeType("const VkPipelineCacheCreateInfo *") VkPipelineCacheCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkPipelineCache *") LongBuffer pPipelineCache) { if (CHECKS) { check(pPipelineCache, 1); } return nvkCreatePipelineCache(device, pCreateInfo.address(), memAddressSafe(pAllocator), memAddress(pPipelineCache)); } // --- [ vkDestroyPipelineCache ] --- /** Unsafe version of: {@link #vkDestroyPipelineCache DestroyPipelineCache} */ public static void nvkDestroyPipelineCache(VkDevice device, long pipelineCache, long pAllocator) { long __functionAddress = device.getCapabilities().vkDestroyPipelineCache; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPJPV(__functionAddress, device.address(), pipelineCache, pAllocator); } /** * Destroy a pipeline cache object. * *
C Specification
* *

To destroy a pipeline cache, call:

* *
     * void vkDestroyPipelineCache(
     *     VkDevice                                    device,
     *     VkPipelineCache                             pipelineCache,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Valid Usage
* *
    *
  • If {@link VkAllocationCallbacks} were provided when {@code pipelineCache} was created, a compatible set of callbacks must be provided here
  • *
  • If no {@link VkAllocationCallbacks} were provided when {@code pipelineCache} was created, {@code pAllocator} must be {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code pipelineCache} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code pipelineCache} must be a valid {@code VkPipelineCache} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • If {@code pipelineCache} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code pipelineCache} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param device the logical device that destroys the pipeline cache object. * @param pipelineCache the handle of the pipeline cache to destroy. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkDestroyPipelineCache(VkDevice device, @NativeType("VkPipelineCache") long pipelineCache, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator) { nvkDestroyPipelineCache(device, pipelineCache, memAddressSafe(pAllocator)); } // --- [ vkGetPipelineCacheData ] --- /** * Unsafe version of: {@link #vkGetPipelineCacheData GetPipelineCacheData} * * @param pDataSize a pointer to a value related to the amount of data in the pipeline cache, as described below. */ public static int nvkGetPipelineCacheData(VkDevice device, long pipelineCache, long pDataSize, long pData) { long __functionAddress = device.getCapabilities().vkGetPipelineCacheData; return callPJPPI(__functionAddress, device.address(), pipelineCache, pDataSize, pData); } /** * Get the data store from a pipeline cache. * *
C Specification
* *

Data can be retrieved from a pipeline cache object using the command:

* *
     * VkResult vkGetPipelineCacheData(
     *     VkDevice                                    device,
     *     VkPipelineCache                             pipelineCache,
     *     size_t*                                     pDataSize,
     *     void*                                       pData);
* *
Description
* *

If {@code pData} is {@code NULL}, then the maximum size of the data that can be retrieved from the pipeline cache, in bytes, is returned in {@code pDataSize}. Otherwise, {@code pDataSize} must point to a variable set by the user to the size of the buffer, in bytes, pointed to by {@code pData}, and on return the variable is overwritten with the amount of data actually written to {@code pData}.

* *

If {@code pDataSize} is less than the maximum size that can be retrieved by the pipeline cache, at most {@code pDataSize} bytes will be written to {@code pData}, and {@link #vkGetPipelineCacheData GetPipelineCacheData} will return {@link #VK_INCOMPLETE INCOMPLETE}. Any data written to {@code pData} is valid and can be provided as the {@code pInitialData} member of the {@link VkPipelineCacheCreateInfo} structure passed to {@link #vkCreatePipelineCache CreatePipelineCache}.

* *

Two calls to {@link #vkGetPipelineCacheData GetPipelineCacheData} with the same parameters must retrieve the same data unless a command that modifies the contents of the cache is called between them.

* *

Applications can store the data retrieved from the pipeline cache, and use these data, possibly in a future run of the application, to populate new pipeline cache objects. The results of pipeline compiles, however, may depend on the vendor ID, device ID, driver version, and other details of the device. To enable applications to detect when previously retrieved data is incompatible with the device, the initial bytes written to {@code pData} must be a header consisting of the following members:

* *
Layout for pipeline cache header version ename:VK_PIPELINE_CACHE_HEADER_VERSION_ONE
* * * * * * * * * * *
OffsetSizeMeaning
04length in bytes of the entire pipeline cache header written as a stream of bytes, with the least significant byte first
44a {@code VkPipelineCacheHeaderVersion} value written as a stream of bytes, with the least significant byte first
84a vendor ID equal to {@link VkPhysicalDeviceProperties}{@code ::vendorID} written as a stream of bytes, with the least significant byte first
124a device ID equal to {@link VkPhysicalDeviceProperties}{@code ::deviceID} written as a stream of bytes, with the least significant byte first
16{@link #VK_UUID_SIZE UUID_SIZE}a pipeline cache ID equal to {@link VkPhysicalDeviceProperties}{@code ::pipelineCacheUUID}
* *

The first four bytes encode the length of the entire pipeline cache header, in bytes. This value includes all fields in the header including the pipeline cache version field and the size of the length field.

* *

The next four bytes encode the pipeline cache version, as described for {@code VkPipelineCacheHeaderVersion}. A consumer of the pipeline cache should use the cache version to interpret the remainder of the cache header.

* *

If {@code pDataSize} is less than what is necessary to store this header, nothing will be written to {@code pData} and zero will be written to {@code pDataSize}.

* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pipelineCache} must be a valid {@code VkPipelineCache} handle
  • *
  • {@code pDataSize} must be a valid pointer to a {@code size_t} value
  • *
  • If the value referenced by {@code pDataSize} is not 0, and {@code pData} is not {@code NULL}, {@code pData} must be a valid pointer to an array of {@code pDataSize} bytes
  • *
  • {@code pipelineCache} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
  • {@link #VK_INCOMPLETE INCOMPLETE}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* * @param device the logical device that owns the pipeline cache. * @param pipelineCache the pipeline cache to retrieve data from. * @param pDataSize a pointer to a value related to the amount of data in the pipeline cache, as described below. * @param pData either {@code NULL} or a pointer to a buffer. */ @NativeType("VkResult") public static int vkGetPipelineCacheData(VkDevice device, @NativeType("VkPipelineCache") long pipelineCache, @NativeType("size_t *") PointerBuffer pDataSize, @Nullable @NativeType("void *") ByteBuffer pData) { if (CHECKS) { check(pDataSize, 1); checkSafe(pData, pDataSize.get(pDataSize.position())); } return nvkGetPipelineCacheData(device, pipelineCache, memAddress(pDataSize), memAddressSafe(pData)); } // --- [ vkMergePipelineCaches ] --- /** * Unsafe version of: {@link #vkMergePipelineCaches MergePipelineCaches} * * @param srcCacheCount the length of the {@code pSrcCaches} array. */ public static int nvkMergePipelineCaches(VkDevice device, long dstCache, int srcCacheCount, long pSrcCaches) { long __functionAddress = device.getCapabilities().vkMergePipelineCaches; return callPJPI(__functionAddress, device.address(), dstCache, srcCacheCount, pSrcCaches); } /** * Combine the data stores of pipeline caches. * *
C Specification
* *

Pipeline cache objects can be merged using the command:

* *
     * VkResult vkMergePipelineCaches(
     *     VkDevice                                    device,
     *     VkPipelineCache                             dstCache,
     *     uint32_t                                    srcCacheCount,
     *     const VkPipelineCache*                      pSrcCaches);
* *
Description
* *
Note
* *

The details of the merge operation are implementation dependent, but implementations should merge the contents of the specified pipelines and prune duplicate entries.

*
* *
Valid Usage
* *
    *
  • {@code dstCache} must not appear in the list of source caches
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code dstCache} must be a valid {@code VkPipelineCache} handle
  • *
  • {@code pSrcCaches} must be a valid pointer to an array of {@code srcCacheCount} valid {@code VkPipelineCache} handles
  • *
  • {@code srcCacheCount} must be greater than 0
  • *
  • {@code dstCache} must have been created, allocated, or retrieved from {@code device}
  • *
  • Each element of {@code pSrcCaches} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code dstCache} must be externally synchronized
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* * @param device the logical device that owns the pipeline cache objects. * @param dstCache the handle of the pipeline cache to merge results into. * @param pSrcCaches an array of pipeline cache handles, which will be merged into {@code dstCache}. The previous contents of {@code dstCache} are included after the merge. */ @NativeType("VkResult") public static int vkMergePipelineCaches(VkDevice device, @NativeType("VkPipelineCache") long dstCache, @NativeType("const VkPipelineCache *") LongBuffer pSrcCaches) { return nvkMergePipelineCaches(device, dstCache, pSrcCaches.remaining(), memAddress(pSrcCaches)); } // --- [ vkCreateGraphicsPipelines ] --- /** * Unsafe version of: {@link #vkCreateGraphicsPipelines CreateGraphicsPipelines} * * @param createInfoCount the length of the {@code pCreateInfos} and {@code pPipelines} arrays. */ public static int nvkCreateGraphicsPipelines(VkDevice device, long pipelineCache, int createInfoCount, long pCreateInfos, long pAllocator, long pPipelines) { long __functionAddress = device.getCapabilities().vkCreateGraphicsPipelines; if (CHECKS) { VkGraphicsPipelineCreateInfo.validate(pCreateInfos, createInfoCount); if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPJPPPI(__functionAddress, device.address(), pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); } /** * Create graphics pipelines. * *
C Specification
* *

To create graphics pipelines, call:

* *
     * VkResult vkCreateGraphicsPipelines(
     *     VkDevice                                    device,
     *     VkPipelineCache                             pipelineCache,
     *     uint32_t                                    createInfoCount,
     *     const VkGraphicsPipelineCreateInfo*         pCreateInfos,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkPipeline*                                 pPipelines);
* *
Description
* *

The {@link VkGraphicsPipelineCreateInfo} structure includes an array of shader create info structures containing all the desired active shader stages, as well as creation info to define all relevant fixed-function stages, and a pipeline layout.

* *
Valid Usage
* *
    *
  • If the {@code flags} member of any element of {@code pCreateInfos} contains the {@link #VK_PIPELINE_CREATE_DERIVATIVE_BIT PIPELINE_CREATE_DERIVATIVE_BIT} flag, and the {@code basePipelineIndex} member of that same element is not {@code -1}, {@code basePipelineIndex} must be less than the index into {@code pCreateInfos} that corresponds to that element
  • *
  • If the {@code flags} member of any element of {@code pCreateInfos} contains the {@link #VK_PIPELINE_CREATE_DERIVATIVE_BIT PIPELINE_CREATE_DERIVATIVE_BIT} flag, the base pipeline must have been created with the {@link #VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT} flag set
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code pipelineCache} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code pipelineCache} must be a valid {@code VkPipelineCache} handle
  • *
  • {@code pCreateInfos} must be a valid pointer to an array of {@code createInfoCount} valid {@link VkGraphicsPipelineCreateInfo} structures
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pPipelines} must be a valid pointer to an array of {@code createInfoCount} {@code VkPipeline} handles
  • *
  • {@code createInfoCount} must be greater than 0
  • *
  • If {@code pipelineCache} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link NVGLSLShader#VK_ERROR_INVALID_SHADER_NV ERROR_INVALID_SHADER_NV}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkGraphicsPipelineCreateInfo}

* * @param device the logical device that creates the graphics pipelines. * @param pipelineCache either {@link #VK_NULL_HANDLE NULL_HANDLE}, indicating that pipeline caching is disabled; or the handle of a valid pipeline cache object, in which case use of that cache is enabled for the duration of the command. * @param pCreateInfos an array of {@link VkGraphicsPipelineCreateInfo} structures. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pPipelines a pointer to an array in which the resulting graphics pipeline objects are returned. */ @NativeType("VkResult") public static int vkCreateGraphicsPipelines(VkDevice device, @NativeType("VkPipelineCache") long pipelineCache, @NativeType("const VkGraphicsPipelineCreateInfo *") VkGraphicsPipelineCreateInfo.Buffer pCreateInfos, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkPipeline *") LongBuffer pPipelines) { if (CHECKS) { check(pPipelines, pCreateInfos.remaining()); } return nvkCreateGraphicsPipelines(device, pipelineCache, pCreateInfos.remaining(), pCreateInfos.address(), memAddressSafe(pAllocator), memAddress(pPipelines)); } // --- [ vkCreateComputePipelines ] --- /** * Unsafe version of: {@link #vkCreateComputePipelines CreateComputePipelines} * * @param createInfoCount the length of the {@code pCreateInfos} and {@code pPipelines} arrays. */ public static int nvkCreateComputePipelines(VkDevice device, long pipelineCache, int createInfoCount, long pCreateInfos, long pAllocator, long pPipelines) { long __functionAddress = device.getCapabilities().vkCreateComputePipelines; if (CHECKS) { VkComputePipelineCreateInfo.validate(pCreateInfos, createInfoCount); if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPJPPPI(__functionAddress, device.address(), pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines); } /** * Creates a new compute pipeline object. * *
C Specification
* *

To create compute pipelines, call:

* *
     * VkResult vkCreateComputePipelines(
     *     VkDevice                                    device,
     *     VkPipelineCache                             pipelineCache,
     *     uint32_t                                    createInfoCount,
     *     const VkComputePipelineCreateInfo*          pCreateInfos,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkPipeline*                                 pPipelines);
* *
Valid Usage
* *
    *
  • If the {@code flags} member of any element of {@code pCreateInfos} contains the {@link #VK_PIPELINE_CREATE_DERIVATIVE_BIT PIPELINE_CREATE_DERIVATIVE_BIT} flag, and the {@code basePipelineIndex} member of that same element is not {@code -1}, {@code basePipelineIndex} must be less than the index into {@code pCreateInfos} that corresponds to that element
  • *
  • If the {@code flags} member of any element of {@code pCreateInfos} contains the {@link #VK_PIPELINE_CREATE_DERIVATIVE_BIT PIPELINE_CREATE_DERIVATIVE_BIT} flag, the base pipeline must have been created with the {@link #VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT} flag set
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code pipelineCache} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code pipelineCache} must be a valid {@code VkPipelineCache} handle
  • *
  • {@code pCreateInfos} must be a valid pointer to an array of {@code createInfoCount} valid {@link VkComputePipelineCreateInfo} structures
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pPipelines} must be a valid pointer to an array of {@code createInfoCount} {@code VkPipeline} handles
  • *
  • {@code createInfoCount} must be greater than 0
  • *
  • If {@code pipelineCache} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link NVGLSLShader#VK_ERROR_INVALID_SHADER_NV ERROR_INVALID_SHADER_NV}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkComputePipelineCreateInfo}

* * @param device the logical device that creates the compute pipelines. * @param pipelineCache either {@link #VK_NULL_HANDLE NULL_HANDLE}, indicating that pipeline caching is disabled; or the handle of a valid pipeline cache object, in which case use of that cache is enabled for the duration of the command. * @param pCreateInfos an array of {@link VkComputePipelineCreateInfo} structures. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pPipelines a pointer to an array in which the resulting compute pipeline objects are returned. */ @NativeType("VkResult") public static int vkCreateComputePipelines(VkDevice device, @NativeType("VkPipelineCache") long pipelineCache, @NativeType("const VkComputePipelineCreateInfo *") VkComputePipelineCreateInfo.Buffer pCreateInfos, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkPipeline *") LongBuffer pPipelines) { if (CHECKS) { check(pPipelines, pCreateInfos.remaining()); } return nvkCreateComputePipelines(device, pipelineCache, pCreateInfos.remaining(), pCreateInfos.address(), memAddressSafe(pAllocator), memAddress(pPipelines)); } // --- [ vkDestroyPipeline ] --- /** Unsafe version of: {@link #vkDestroyPipeline DestroyPipeline} */ public static void nvkDestroyPipeline(VkDevice device, long pipeline, long pAllocator) { long __functionAddress = device.getCapabilities().vkDestroyPipeline; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPJPV(__functionAddress, device.address(), pipeline, pAllocator); } /** * Destroy a pipeline object. * *
C Specification
* *

To destroy a graphics or compute pipeline, call:

* *
     * void vkDestroyPipeline(
     *     VkDevice                                    device,
     *     VkPipeline                                  pipeline,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Valid Usage
* *
    *
  • All submitted commands that refer to {@code pipeline} must have completed execution
  • *
  • If {@link VkAllocationCallbacks} were provided when {@code pipeline} was created, a compatible set of callbacks must be provided here
  • *
  • If no {@link VkAllocationCallbacks} were provided when {@code pipeline} was created, {@code pAllocator} must be {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code pipeline} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code pipeline} must be a valid {@code VkPipeline} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • If {@code pipeline} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code pipeline} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param device the logical device that destroys the pipeline. * @param pipeline the handle of the pipeline to destroy. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkDestroyPipeline(VkDevice device, @NativeType("VkPipeline") long pipeline, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator) { nvkDestroyPipeline(device, pipeline, memAddressSafe(pAllocator)); } // --- [ vkCreatePipelineLayout ] --- /** Unsafe version of: {@link #vkCreatePipelineLayout CreatePipelineLayout} */ public static int nvkCreatePipelineLayout(VkDevice device, long pCreateInfo, long pAllocator, long pPipelineLayout) { long __functionAddress = device.getCapabilities().vkCreatePipelineLayout; if (CHECKS) { VkPipelineLayoutCreateInfo.validate(pCreateInfo); if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo, pAllocator, pPipelineLayout); } /** * Creates a new pipeline layout object. * *
C Specification
* *

To create a pipeline layout, call:

* *
     * VkResult vkCreatePipelineLayout(
     *     VkDevice                                    device,
     *     const VkPipelineLayoutCreateInfo*           pCreateInfo,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkPipelineLayout*                           pPipelineLayout);
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pCreateInfo} must be a valid pointer to a valid {@link VkPipelineLayoutCreateInfo} structure
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pPipelineLayout} must be a valid pointer to a {@code VkPipelineLayout} handle
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkPipelineLayoutCreateInfo}

* * @param device the logical device that creates the pipeline layout. * @param pCreateInfo a pointer to an instance of the {@link VkPipelineLayoutCreateInfo} structure specifying the state of the pipeline layout object. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pPipelineLayout points to a {@code VkPipelineLayout} handle in which the resulting pipeline layout object is returned. */ @NativeType("VkResult") public static int vkCreatePipelineLayout(VkDevice device, @NativeType("const VkPipelineLayoutCreateInfo *") VkPipelineLayoutCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkPipelineLayout *") LongBuffer pPipelineLayout) { if (CHECKS) { check(pPipelineLayout, 1); } return nvkCreatePipelineLayout(device, pCreateInfo.address(), memAddressSafe(pAllocator), memAddress(pPipelineLayout)); } // --- [ vkDestroyPipelineLayout ] --- /** Unsafe version of: {@link #vkDestroyPipelineLayout DestroyPipelineLayout} */ public static void nvkDestroyPipelineLayout(VkDevice device, long pipelineLayout, long pAllocator) { long __functionAddress = device.getCapabilities().vkDestroyPipelineLayout; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPJPV(__functionAddress, device.address(), pipelineLayout, pAllocator); } /** * Destroy a pipeline layout object. * *
C Specification
* *

To destroy a pipeline layout, call:

* *
     * void vkDestroyPipelineLayout(
     *     VkDevice                                    device,
     *     VkPipelineLayout                            pipelineLayout,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Valid Usage
* *
    *
  • If {@link VkAllocationCallbacks} were provided when {@code pipelineLayout} was created, a compatible set of callbacks must be provided here
  • *
  • If no {@link VkAllocationCallbacks} were provided when {@code pipelineLayout} was created, {@code pAllocator} must be {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code pipelineLayout} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code pipelineLayout} must be a valid {@code VkPipelineLayout} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • If {@code pipelineLayout} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code pipelineLayout} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param device the logical device that destroys the pipeline layout. * @param pipelineLayout the pipeline layout to destroy. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkDestroyPipelineLayout(VkDevice device, @NativeType("VkPipelineLayout") long pipelineLayout, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator) { nvkDestroyPipelineLayout(device, pipelineLayout, memAddressSafe(pAllocator)); } // --- [ vkCreateSampler ] --- /** Unsafe version of: {@link #vkCreateSampler CreateSampler} */ public static int nvkCreateSampler(VkDevice device, long pCreateInfo, long pAllocator, long pSampler) { long __functionAddress = device.getCapabilities().vkCreateSampler; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo, pAllocator, pSampler); } /** * Create a new sampler object. * *
C Specification
* *

To create a sampler object, call:

* *
     * VkResult vkCreateSampler(
     *     VkDevice                                    device,
     *     const VkSamplerCreateInfo*                  pCreateInfo,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkSampler*                                  pSampler);
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pCreateInfo} must be a valid pointer to a valid {@link VkSamplerCreateInfo} structure
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pSampler} must be a valid pointer to a {@code VkSampler} handle
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_TOO_MANY_OBJECTS ERROR_TOO_MANY_OBJECTS}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkSamplerCreateInfo}

* * @param device the logical device that creates the sampler. * @param pCreateInfo a pointer to an instance of the {@link VkSamplerCreateInfo} structure specifying the state of the sampler object. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pSampler points to a {@code VkSampler} handle in which the resulting sampler object is returned. */ @NativeType("VkResult") public static int vkCreateSampler(VkDevice device, @NativeType("const VkSamplerCreateInfo *") VkSamplerCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkSampler *") LongBuffer pSampler) { if (CHECKS) { check(pSampler, 1); } return nvkCreateSampler(device, pCreateInfo.address(), memAddressSafe(pAllocator), memAddress(pSampler)); } // --- [ vkDestroySampler ] --- /** Unsafe version of: {@link #vkDestroySampler DestroySampler} */ public static void nvkDestroySampler(VkDevice device, long sampler, long pAllocator) { long __functionAddress = device.getCapabilities().vkDestroySampler; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPJPV(__functionAddress, device.address(), sampler, pAllocator); } /** * Destroy a sampler object. * *
C Specification
* *

To destroy a sampler, call:

* *
     * void vkDestroySampler(
     *     VkDevice                                    device,
     *     VkSampler                                   sampler,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Valid Usage
* *
    *
  • All submitted commands that refer to {@code sampler} must have completed execution
  • *
  • If {@link VkAllocationCallbacks} were provided when {@code sampler} was created, a compatible set of callbacks must be provided here
  • *
  • If no {@link VkAllocationCallbacks} were provided when {@code sampler} was created, {@code pAllocator} must be {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code sampler} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code sampler} must be a valid {@code VkSampler} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • If {@code sampler} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code sampler} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param device the logical device that destroys the sampler. * @param sampler the sampler to destroy. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkDestroySampler(VkDevice device, @NativeType("VkSampler") long sampler, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator) { nvkDestroySampler(device, sampler, memAddressSafe(pAllocator)); } // --- [ vkCreateDescriptorSetLayout ] --- /** Unsafe version of: {@link #vkCreateDescriptorSetLayout CreateDescriptorSetLayout} */ public static int nvkCreateDescriptorSetLayout(VkDevice device, long pCreateInfo, long pAllocator, long pSetLayout) { long __functionAddress = device.getCapabilities().vkCreateDescriptorSetLayout; if (CHECKS) { VkDescriptorSetLayoutCreateInfo.validate(pCreateInfo); if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo, pAllocator, pSetLayout); } /** * Create a new descriptor set layout. * *
C Specification
* *

To create descriptor set layout objects, call:

* *
     * VkResult vkCreateDescriptorSetLayout(
     *     VkDevice                                    device,
     *     const VkDescriptorSetLayoutCreateInfo*      pCreateInfo,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkDescriptorSetLayout*                      pSetLayout);
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pCreateInfo} must be a valid pointer to a valid {@link VkDescriptorSetLayoutCreateInfo} structure
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pSetLayout} must be a valid pointer to a {@code VkDescriptorSetLayout} handle
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkDescriptorSetLayoutCreateInfo}

* * @param device the logical device that creates the descriptor set layout. * @param pCreateInfo a pointer to an instance of the {@link VkDescriptorSetLayoutCreateInfo} structure specifying the state of the descriptor set layout object. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pSetLayout points to a {@code VkDescriptorSetLayout} handle in which the resulting descriptor set layout object is returned. */ @NativeType("VkResult") public static int vkCreateDescriptorSetLayout(VkDevice device, @NativeType("const VkDescriptorSetLayoutCreateInfo *") VkDescriptorSetLayoutCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkDescriptorSetLayout *") LongBuffer pSetLayout) { if (CHECKS) { check(pSetLayout, 1); } return nvkCreateDescriptorSetLayout(device, pCreateInfo.address(), memAddressSafe(pAllocator), memAddress(pSetLayout)); } // --- [ vkDestroyDescriptorSetLayout ] --- /** Unsafe version of: {@link #vkDestroyDescriptorSetLayout DestroyDescriptorSetLayout} */ public static void nvkDestroyDescriptorSetLayout(VkDevice device, long descriptorSetLayout, long pAllocator) { long __functionAddress = device.getCapabilities().vkDestroyDescriptorSetLayout; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPJPV(__functionAddress, device.address(), descriptorSetLayout, pAllocator); } /** * Destroy a descriptor set layout object. * *
C Specification
* *

To destroy a descriptor set layout, call:

* *
     * void vkDestroyDescriptorSetLayout(
     *     VkDevice                                    device,
     *     VkDescriptorSetLayout                       descriptorSetLayout,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Valid Usage
* *
    *
  • If {@link VkAllocationCallbacks} were provided when {@code descriptorSetLayout} was created, a compatible set of callbacks must be provided here
  • *
  • If no {@link VkAllocationCallbacks} were provided when {@code descriptorSetLayout} was created, {@code pAllocator} must be {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code descriptorSetLayout} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code descriptorSetLayout} must be a valid {@code VkDescriptorSetLayout} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • If {@code descriptorSetLayout} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code descriptorSetLayout} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param device the logical device that destroys the descriptor set layout. * @param descriptorSetLayout the descriptor set layout to destroy. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkDestroyDescriptorSetLayout(VkDevice device, @NativeType("VkDescriptorSetLayout") long descriptorSetLayout, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator) { nvkDestroyDescriptorSetLayout(device, descriptorSetLayout, memAddressSafe(pAllocator)); } // --- [ vkCreateDescriptorPool ] --- /** Unsafe version of: {@link #vkCreateDescriptorPool CreateDescriptorPool} */ public static int nvkCreateDescriptorPool(VkDevice device, long pCreateInfo, long pAllocator, long pDescriptorPool) { long __functionAddress = device.getCapabilities().vkCreateDescriptorPool; if (CHECKS) { VkDescriptorPoolCreateInfo.validate(pCreateInfo); if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo, pAllocator, pDescriptorPool); } /** * Creates a descriptor pool object. * *
C Specification
* *

To create a descriptor pool object, call:

* *
     * VkResult vkCreateDescriptorPool(
     *     VkDevice                                    device,
     *     const VkDescriptorPoolCreateInfo*           pCreateInfo,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkDescriptorPool*                           pDescriptorPool);
* *
Description
* *

{@code pAllocator} controls host memory allocation as described in the Memory Allocation chapter.

* *

The created descriptor pool is returned in {@code pDescriptorPool}.

* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pCreateInfo} must be a valid pointer to a valid {@link VkDescriptorPoolCreateInfo} structure
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pDescriptorPool} must be a valid pointer to a {@code VkDescriptorPool} handle
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkDescriptorPoolCreateInfo}

* * @param device the logical device that creates the descriptor pool. * @param pCreateInfo a pointer to an instance of the {@link VkDescriptorPoolCreateInfo} structure specifying the state of the descriptor pool object. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pDescriptorPool points to a {@code VkDescriptorPool} handle in which the resulting descriptor pool object is returned. */ @NativeType("VkResult") public static int vkCreateDescriptorPool(VkDevice device, @NativeType("const VkDescriptorPoolCreateInfo *") VkDescriptorPoolCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkDescriptorPool *") LongBuffer pDescriptorPool) { if (CHECKS) { check(pDescriptorPool, 1); } return nvkCreateDescriptorPool(device, pCreateInfo.address(), memAddressSafe(pAllocator), memAddress(pDescriptorPool)); } // --- [ vkDestroyDescriptorPool ] --- /** Unsafe version of: {@link #vkDestroyDescriptorPool DestroyDescriptorPool} */ public static void nvkDestroyDescriptorPool(VkDevice device, long descriptorPool, long pAllocator) { long __functionAddress = device.getCapabilities().vkDestroyDescriptorPool; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPJPV(__functionAddress, device.address(), descriptorPool, pAllocator); } /** * Destroy a descriptor pool object. * *
C Specification
* *

To destroy a descriptor pool, call:

* *
     * void vkDestroyDescriptorPool(
     *     VkDevice                                    device,
     *     VkDescriptorPool                            descriptorPool,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Description
* *

When a pool is destroyed, all descriptor sets allocated from the pool are implicitly freed and become invalid. Descriptor sets allocated from a given pool do not need to be freed before destroying that descriptor pool.

* *
Valid Usage
* *
    *
  • All submitted commands that refer to {@code descriptorPool} (via any allocated descriptor sets) must have completed execution
  • *
  • If {@link VkAllocationCallbacks} were provided when {@code descriptorPool} was created, a compatible set of callbacks must be provided here
  • *
  • If no {@link VkAllocationCallbacks} were provided when {@code descriptorPool} was created, {@code pAllocator} must be {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code descriptorPool} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code descriptorPool} must be a valid {@code VkDescriptorPool} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • If {@code descriptorPool} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code descriptorPool} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param device the logical device that destroys the descriptor pool. * @param descriptorPool the descriptor pool to destroy. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkDestroyDescriptorPool(VkDevice device, @NativeType("VkDescriptorPool") long descriptorPool, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator) { nvkDestroyDescriptorPool(device, descriptorPool, memAddressSafe(pAllocator)); } // --- [ vkResetDescriptorPool ] --- /** * Resets a descriptor pool object. * *
C Specification
* *

To return all descriptor sets allocated from a given pool to the pool, rather than freeing individual descriptor sets, call:

* *
     * VkResult vkResetDescriptorPool(
     *     VkDevice                                    device,
     *     VkDescriptorPool                            descriptorPool,
     *     VkDescriptorPoolResetFlags                  flags);
* *
Description
* *

Resetting a descriptor pool recycles all of the resources from all of the descriptor sets allocated from the descriptor pool back to the descriptor pool, and the descriptor sets are implicitly freed.

* *
Valid Usage
* *
    *
  • All uses of {@code descriptorPool} (via any allocated descriptor sets) must have completed execution
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code descriptorPool} must be a valid {@code VkDescriptorPool} handle
  • *
  • {@code flags} must be 0
  • *
  • {@code descriptorPool} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code descriptorPool} must be externally synchronized
  • *
  • Host access to any {@code VkDescriptorSet} objects allocated from {@code descriptorPool} must be externally synchronized
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* * @param device the logical device that owns the descriptor pool. * @param descriptorPool the descriptor pool to be reset. * @param flags reserved for future use. */ @NativeType("VkResult") public static int vkResetDescriptorPool(VkDevice device, @NativeType("VkDescriptorPool") long descriptorPool, @NativeType("VkDescriptorPoolResetFlags") int flags) { long __functionAddress = device.getCapabilities().vkResetDescriptorPool; return callPJI(__functionAddress, device.address(), descriptorPool, flags); } // --- [ vkAllocateDescriptorSets ] --- /** Unsafe version of: {@link #vkAllocateDescriptorSets AllocateDescriptorSets} */ public static int nvkAllocateDescriptorSets(VkDevice device, long pAllocateInfo, long pDescriptorSets) { long __functionAddress = device.getCapabilities().vkAllocateDescriptorSets; if (CHECKS) { VkDescriptorSetAllocateInfo.validate(pAllocateInfo); } return callPPPI(__functionAddress, device.address(), pAllocateInfo, pDescriptorSets); } /** * Allocate one or more descriptor sets. * *
C Specification
* *

To allocate descriptor sets from a descriptor pool, call:

* *
     * VkResult vkAllocateDescriptorSets(
     *     VkDevice                                    device,
     *     const VkDescriptorSetAllocateInfo*          pAllocateInfo,
     *     VkDescriptorSet*                            pDescriptorSets);
* *
Description
* *

The allocated descriptor sets are returned in {@code pDescriptorSets}.

* *

When a descriptor set is allocated, the initial state is largely uninitialized and all descriptors are undefined. However, the descriptor set can be bound in a command buffer without causing errors or exceptions. All entries that are statically used by a pipeline in a drawing or dispatching command must have been populated before the descriptor set is bound for use by that command. Entries that are not statically used by a pipeline can have uninitialized descriptors or descriptors of resources that have been destroyed, and executing a draw or dispatch with such a descriptor set bound does not cause undefined behavior. This means applications need not populate unused entries with dummy descriptors.

* *

If a call to {@link #vkAllocateDescriptorSets AllocateDescriptorSets} would cause the total number of descriptor sets allocated from the pool to exceed the value of {@link VkDescriptorPoolCreateInfo}{@code ::maxSets} used to create {@code pAllocateInfo}->{@code descriptorPool}, then the allocation may fail due to lack of space in the descriptor pool. Similarly, the allocation may fail due to lack of space if the call to {@link #vkAllocateDescriptorSets AllocateDescriptorSets} would cause the number of any given descriptor type to exceed the sum of all the {@code descriptorCount} members of each element of {@link VkDescriptorPoolCreateInfo}{@code ::pPoolSizes} with a {@code member} equal to that type. If the allocation fails due to no more space in the descriptor pool, and not because of system or device memory exhaustion, then {@link KHRMaintenance1#VK_ERROR_OUT_OF_POOL_MEMORY_KHR ERROR_OUT_OF_POOL_MEMORY_KHR} must be returned.

* *

{@link #vkAllocateDescriptorSets AllocateDescriptorSets} can be used to create multiple descriptor sets. If the creation of any of those descriptor sets fails, then the implementation must destroy all successfully created descriptor set objects from this command, set all entries of the {@code pDescriptorSets} array to {@link #VK_NULL_HANDLE NULL_HANDLE} and return the error.

* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pAllocateInfo} must be a valid pointer to a valid {@link VkDescriptorSetAllocateInfo} structure
  • *
  • {@code pDescriptorSets} must be a valid pointer to an array of {@code pAllocateInfo}::descriptorSetCount {@code VkDescriptorSet} handles
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code pAllocateInfo}::descriptorPool must be externally synchronized
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
  • {@link #VK_ERROR_FRAGMENTED_POOL ERROR_FRAGMENTED_POOL}
  • *
  • {@link KHRMaintenance1#VK_ERROR_OUT_OF_POOL_MEMORY_KHR ERROR_OUT_OF_POOL_MEMORY_KHR}
  • *
*
* *
See Also
* *

{@link VkDescriptorSetAllocateInfo}

* * @param device the logical device that owns the descriptor pool. * @param pAllocateInfo a pointer to an instance of the {@link VkDescriptorSetAllocateInfo} structure describing parameters of the allocation. * @param pDescriptorSets a pointer to an array of {@code VkDescriptorSet} handles in which the resulting descriptor set objects are returned. */ @NativeType("VkResult") public static int vkAllocateDescriptorSets(VkDevice device, @NativeType("const VkDescriptorSetAllocateInfo *") VkDescriptorSetAllocateInfo pAllocateInfo, @NativeType("VkDescriptorSet *") LongBuffer pDescriptorSets) { if (CHECKS) { check(pDescriptorSets, pAllocateInfo.descriptorSetCount()); } return nvkAllocateDescriptorSets(device, pAllocateInfo.address(), memAddress(pDescriptorSets)); } // --- [ vkFreeDescriptorSets ] --- /** * Unsafe version of: {@link #vkFreeDescriptorSets FreeDescriptorSets} * * @param descriptorSetCount the number of elements in the {@code pDescriptorSets} array. */ public static int nvkFreeDescriptorSets(VkDevice device, long descriptorPool, int descriptorSetCount, long pDescriptorSets) { long __functionAddress = device.getCapabilities().vkFreeDescriptorSets; return callPJPI(__functionAddress, device.address(), descriptorPool, descriptorSetCount, pDescriptorSets); } /** * Free one or more descriptor sets. * *
C Specification
* *

To free allocated descriptor sets, call:

* *
     * VkResult vkFreeDescriptorSets(
     *     VkDevice                                    device,
     *     VkDescriptorPool                            descriptorPool,
     *     uint32_t                                    descriptorSetCount,
     *     const VkDescriptorSet*                      pDescriptorSets);
* *
Description
* *

After a successful call to {@link #vkFreeDescriptorSets FreeDescriptorSets}, all descriptor sets in {@code pDescriptorSets} are invalid.

* *
Valid Usage
* *
    *
  • All submitted commands that refer to any element of {@code pDescriptorSets} must have completed execution
  • *
  • {@code pDescriptorSets} must be a valid pointer to an array of {@code descriptorSetCount} {@code VkDescriptorSet} handles, each element of which must either be a valid handle or {@link #VK_NULL_HANDLE NULL_HANDLE}
  • *
  • Each valid handle in {@code pDescriptorSets} must have been allocated from {@code descriptorPool}
  • *
  • {@code descriptorPool} must have been created with the {@link #VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT} flag
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code descriptorPool} must be a valid {@code VkDescriptorPool} handle
  • *
  • {@code descriptorSetCount} must be greater than 0
  • *
  • {@code descriptorPool} must have been created, allocated, or retrieved from {@code device}
  • *
  • Each element of {@code pDescriptorSets} that is a valid handle must have been created, allocated, or retrieved from {@code descriptorPool}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code descriptorPool} must be externally synchronized
  • *
  • Host access to each member of {@code pDescriptorSets} must be externally synchronized
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* * @param device the logical device that owns the descriptor pool. * @param descriptorPool the descriptor pool from which the descriptor sets were allocated. * @param pDescriptorSets an array of handles to {@code VkDescriptorSet} objects. */ @NativeType("VkResult") public static int vkFreeDescriptorSets(VkDevice device, @NativeType("VkDescriptorPool") long descriptorPool, @NativeType("const VkDescriptorSet *") LongBuffer pDescriptorSets) { return nvkFreeDescriptorSets(device, descriptorPool, pDescriptorSets.remaining(), memAddress(pDescriptorSets)); } /** * Free one or more descriptor sets. * *
C Specification
* *

To free allocated descriptor sets, call:

* *
     * VkResult vkFreeDescriptorSets(
     *     VkDevice                                    device,
     *     VkDescriptorPool                            descriptorPool,
     *     uint32_t                                    descriptorSetCount,
     *     const VkDescriptorSet*                      pDescriptorSets);
* *
Description
* *

After a successful call to {@link #vkFreeDescriptorSets FreeDescriptorSets}, all descriptor sets in {@code pDescriptorSets} are invalid.

* *
Valid Usage
* *
    *
  • All submitted commands that refer to any element of {@code pDescriptorSets} must have completed execution
  • *
  • {@code pDescriptorSets} must be a valid pointer to an array of {@code descriptorSetCount} {@code VkDescriptorSet} handles, each element of which must either be a valid handle or {@link #VK_NULL_HANDLE NULL_HANDLE}
  • *
  • Each valid handle in {@code pDescriptorSets} must have been allocated from {@code descriptorPool}
  • *
  • {@code descriptorPool} must have been created with the {@link #VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT} flag
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code descriptorPool} must be a valid {@code VkDescriptorPool} handle
  • *
  • {@code descriptorSetCount} must be greater than 0
  • *
  • {@code descriptorPool} must have been created, allocated, or retrieved from {@code device}
  • *
  • Each element of {@code pDescriptorSets} that is a valid handle must have been created, allocated, or retrieved from {@code descriptorPool}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code descriptorPool} must be externally synchronized
  • *
  • Host access to each member of {@code pDescriptorSets} must be externally synchronized
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* * @param device the logical device that owns the descriptor pool. * @param descriptorPool the descriptor pool from which the descriptor sets were allocated. */ @NativeType("VkResult") public static int vkFreeDescriptorSets(VkDevice device, @NativeType("VkDescriptorPool") long descriptorPool, @NativeType("const VkDescriptorSet *") long pDescriptorSet) { MemoryStack stack = stackGet(); int stackPointer = stack.getPointer(); try { LongBuffer pDescriptorSets = stack.longs(pDescriptorSet); return nvkFreeDescriptorSets(device, descriptorPool, 1, memAddress(pDescriptorSets)); } finally { stack.setPointer(stackPointer); } } // --- [ vkUpdateDescriptorSets ] --- /** * Unsafe version of: {@link #vkUpdateDescriptorSets UpdateDescriptorSets} * * @param descriptorWriteCount the number of elements in the {@code pDescriptorWrites} array. * @param descriptorCopyCount the number of elements in the {@code pDescriptorCopies} array. */ public static void nvkUpdateDescriptorSets(VkDevice device, int descriptorWriteCount, long pDescriptorWrites, int descriptorCopyCount, long pDescriptorCopies) { long __functionAddress = device.getCapabilities().vkUpdateDescriptorSets; if (CHECKS) { if (pDescriptorWrites != NULL) { VkWriteDescriptorSet.validate(pDescriptorWrites, descriptorWriteCount); } } callPPPV(__functionAddress, device.address(), descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies); } /** * Update the contents of a descriptor set object. * *
C Specification
* *

Once allocated, descriptor sets can be updated with a combination of write and copy operations. To update descriptor sets, call:

* *
     * void vkUpdateDescriptorSets(
     *     VkDevice                                    device,
     *     uint32_t                                    descriptorWriteCount,
     *     const VkWriteDescriptorSet*                 pDescriptorWrites,
     *     uint32_t                                    descriptorCopyCount,
     *     const VkCopyDescriptorSet*                  pDescriptorCopies);
* *
Description
* *

The operations described by {@code pDescriptorWrites} are performed first, followed by the operations described by {@code pDescriptorCopies}. Within each array, the operations are performed in the order they appear in the array.

* *

Each element in the {@code pDescriptorWrites} array describes an operation updating the descriptor set using descriptors for resources specified in the structure.

* *

Each element in the {@code pDescriptorCopies} array is a {@link VkCopyDescriptorSet} structure describing an operation copying descriptors between sets.

* *

If the {@code dstSet} member of any element of {@code pDescriptorWrites} or {@code pDescriptorCopies} is bound, accessed, or modified by any command that was recorded to a command buffer which is currently in the recording or executable state, that command buffer becomes invalid.

* *
Valid Usage
* *
    *
  • The {@code dstSet} member of each element of {@code pDescriptorWrites} or {@code pDescriptorCopies} must not be used by any command that was recorded to a command buffer which is in the pending state.
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code descriptorWriteCount} is not 0, {@code pDescriptorWrites} must be a valid pointer to an array of {@code descriptorWriteCount} valid {@link VkWriteDescriptorSet} structures
  • *
  • If {@code descriptorCopyCount} is not 0, {@code pDescriptorCopies} must be a valid pointer to an array of {@code descriptorCopyCount} valid {@link VkCopyDescriptorSet} structures
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code pDescriptorWrites}[].dstSet must be externally synchronized
  • *
  • Host access to {@code pDescriptorCopies}[].dstSet must be externally synchronized
  • *
* *
See Also
* *

{@link VkCopyDescriptorSet}, {@link VkWriteDescriptorSet}

* * @param device the logical device that updates the descriptor sets. * @param pDescriptorWrites a pointer to an array of {@link VkWriteDescriptorSet} structures describing the descriptor sets to write to. * @param pDescriptorCopies a pointer to an array of {@link VkCopyDescriptorSet} structures describing the descriptor sets to copy between. */ public static void vkUpdateDescriptorSets(VkDevice device, @Nullable @NativeType("const VkWriteDescriptorSet *") VkWriteDescriptorSet.Buffer pDescriptorWrites, @Nullable @NativeType("const VkCopyDescriptorSet *") VkCopyDescriptorSet.Buffer pDescriptorCopies) { nvkUpdateDescriptorSets(device, remainingSafe(pDescriptorWrites), memAddressSafe(pDescriptorWrites), remainingSafe(pDescriptorCopies), memAddressSafe(pDescriptorCopies)); } // --- [ vkCreateFramebuffer ] --- /** Unsafe version of: {@link #vkCreateFramebuffer CreateFramebuffer} */ public static int nvkCreateFramebuffer(VkDevice device, long pCreateInfo, long pAllocator, long pFramebuffer) { long __functionAddress = device.getCapabilities().vkCreateFramebuffer; if (CHECKS) { VkFramebufferCreateInfo.validate(pCreateInfo); if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo, pAllocator, pFramebuffer); } /** * Create a new framebuffer object. * *
C Specification
* *

To create a framebuffer, call:

* *
     * VkResult vkCreateFramebuffer(
     *     VkDevice                                    device,
     *     const VkFramebufferCreateInfo*              pCreateInfo,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkFramebuffer*                              pFramebuffer);
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pCreateInfo} must be a valid pointer to a valid {@link VkFramebufferCreateInfo} structure
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pFramebuffer} must be a valid pointer to a {@code VkFramebuffer} handle
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkFramebufferCreateInfo}

* * @param device the logical device that creates the framebuffer. * @param pCreateInfo points to a {@link VkFramebufferCreateInfo} structure which describes additional information about framebuffer creation. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pFramebuffer points to a {@code VkFramebuffer} handle in which the resulting framebuffer object is returned. */ @NativeType("VkResult") public static int vkCreateFramebuffer(VkDevice device, @NativeType("const VkFramebufferCreateInfo *") VkFramebufferCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkFramebuffer *") LongBuffer pFramebuffer) { if (CHECKS) { check(pFramebuffer, 1); } return nvkCreateFramebuffer(device, pCreateInfo.address(), memAddressSafe(pAllocator), memAddress(pFramebuffer)); } // --- [ vkDestroyFramebuffer ] --- /** Unsafe version of: {@link #vkDestroyFramebuffer DestroyFramebuffer} */ public static void nvkDestroyFramebuffer(VkDevice device, long framebuffer, long pAllocator) { long __functionAddress = device.getCapabilities().vkDestroyFramebuffer; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPJPV(__functionAddress, device.address(), framebuffer, pAllocator); } /** * Destroy a framebuffer object. * *
C Specification
* *

To destroy a framebuffer, call:

* *
     * void vkDestroyFramebuffer(
     *     VkDevice                                    device,
     *     VkFramebuffer                               framebuffer,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Valid Usage
* *
    *
  • All submitted commands that refer to {@code framebuffer} must have completed execution
  • *
  • If {@link VkAllocationCallbacks} were provided when {@code framebuffer} was created, a compatible set of callbacks must be provided here
  • *
  • If no {@link VkAllocationCallbacks} were provided when {@code framebuffer} was created, {@code pAllocator} must be {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code framebuffer} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code framebuffer} must be a valid {@code VkFramebuffer} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • If {@code framebuffer} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code framebuffer} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param device the logical device that destroys the framebuffer. * @param framebuffer the handle of the framebuffer to destroy. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkDestroyFramebuffer(VkDevice device, @NativeType("VkFramebuffer") long framebuffer, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator) { nvkDestroyFramebuffer(device, framebuffer, memAddressSafe(pAllocator)); } // --- [ vkCreateRenderPass ] --- /** Unsafe version of: {@link #vkCreateRenderPass CreateRenderPass} */ public static int nvkCreateRenderPass(VkDevice device, long pCreateInfo, long pAllocator, long pRenderPass) { long __functionAddress = device.getCapabilities().vkCreateRenderPass; if (CHECKS) { VkRenderPassCreateInfo.validate(pCreateInfo); if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo, pAllocator, pRenderPass); } /** * Create a new render pass object. * *
C Specification
* *

To create a render pass, call:

* *
     * VkResult vkCreateRenderPass(
     *     VkDevice                                    device,
     *     const VkRenderPassCreateInfo*               pCreateInfo,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkRenderPass*                               pRenderPass);
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pCreateInfo} must be a valid pointer to a valid {@link VkRenderPassCreateInfo} structure
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pRenderPass} must be a valid pointer to a {@code VkRenderPass} handle
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkRenderPassCreateInfo}

* * @param device the logical device that creates the render pass. * @param pCreateInfo a pointer to an instance of the {@link VkRenderPassCreateInfo} structure that describes the parameters of the render pass. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pRenderPass points to a {@code VkRenderPass} handle in which the resulting render pass object is returned. */ @NativeType("VkResult") public static int vkCreateRenderPass(VkDevice device, @NativeType("const VkRenderPassCreateInfo *") VkRenderPassCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkRenderPass *") LongBuffer pRenderPass) { if (CHECKS) { check(pRenderPass, 1); } return nvkCreateRenderPass(device, pCreateInfo.address(), memAddressSafe(pAllocator), memAddress(pRenderPass)); } // --- [ vkDestroyRenderPass ] --- /** Unsafe version of: {@link #vkDestroyRenderPass DestroyRenderPass} */ public static void nvkDestroyRenderPass(VkDevice device, long renderPass, long pAllocator) { long __functionAddress = device.getCapabilities().vkDestroyRenderPass; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPJPV(__functionAddress, device.address(), renderPass, pAllocator); } /** * Destroy a render pass object. * *
C Specification
* *

To destroy a render pass, call:

* *
     * void vkDestroyRenderPass(
     *     VkDevice                                    device,
     *     VkRenderPass                                renderPass,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Valid Usage
* *
    *
  • All submitted commands that refer to {@code renderPass} must have completed execution
  • *
  • If {@link VkAllocationCallbacks} were provided when {@code renderPass} was created, a compatible set of callbacks must be provided here
  • *
  • If no {@link VkAllocationCallbacks} were provided when {@code renderPass} was created, {@code pAllocator} must be {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code renderPass} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code renderPass} must be a valid {@code VkRenderPass} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • If {@code renderPass} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code renderPass} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param device the logical device that destroys the render pass. * @param renderPass the handle of the render pass to destroy. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkDestroyRenderPass(VkDevice device, @NativeType("VkRenderPass") long renderPass, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator) { nvkDestroyRenderPass(device, renderPass, memAddressSafe(pAllocator)); } // --- [ vkGetRenderAreaGranularity ] --- /** Unsafe version of: {@link #vkGetRenderAreaGranularity GetRenderAreaGranularity} */ public static void nvkGetRenderAreaGranularity(VkDevice device, long renderPass, long pGranularity) { long __functionAddress = device.getCapabilities().vkGetRenderAreaGranularity; callPJPV(__functionAddress, device.address(), renderPass, pGranularity); } /** * Returns the granularity for optimal render area. * *
C Specification
* *

To query the render area granularity, call:

* *
     * void vkGetRenderAreaGranularity(
     *     VkDevice                                    device,
     *     VkRenderPass                                renderPass,
     *     VkExtent2D*                                 pGranularity);
* *
Description
* *

The conditions leading to an optimal {@code renderArea} are:

* *
    *
  • the {@code offset.x} member in {@code renderArea} is a multiple of the {@code width} member of the returned {@link VkExtent2D} (the horizontal granularity).
  • *
  • the {@code offset.y} member in {@code renderArea} is a multiple of the {@code height} of the returned {@link VkExtent2D} (the vertical granularity).
  • *
  • either the {@code offset.width} member in {@code renderArea} is a multiple of the horizontal granularity or {@code offset.x}+{@code offset.width} is equal to the {@code width} of the {@code framebuffer} in the {@link VkRenderPassBeginInfo}.
  • *
  • either the {@code offset.height} member in {@code renderArea} is a multiple of the vertical granularity or {@code offset.y}+{@code offset.height} is equal to the {@code height} of the {@code framebuffer} in the {@link VkRenderPassBeginInfo}.
  • *
* *

Subpass dependencies are not affected by the render area, and apply to the entire image subresources attached to the framebuffer as specified in the description of automatic layout transitions. Similarly, pipeline barriers are valid even if their effect extends outside the render area.

* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code renderPass} must be a valid {@code VkRenderPass} handle
  • *
  • {@code pGranularity} must be a valid pointer to a {@link VkExtent2D} structure
  • *
  • {@code renderPass} must have been created, allocated, or retrieved from {@code device}
  • *
* *
See Also
* *

{@link VkExtent2D}

* * @param device the logical device that owns the render pass. * @param renderPass a handle to a render pass. * @param pGranularity points to a {@link VkExtent2D} structure in which the granularity is returned. */ public static void vkGetRenderAreaGranularity(VkDevice device, @NativeType("VkRenderPass") long renderPass, @NativeType("VkExtent2D *") VkExtent2D pGranularity) { nvkGetRenderAreaGranularity(device, renderPass, pGranularity.address()); } // --- [ vkCreateCommandPool ] --- /** Unsafe version of: {@link #vkCreateCommandPool CreateCommandPool} */ public static int nvkCreateCommandPool(VkDevice device, long pCreateInfo, long pAllocator, long pCommandPool) { long __functionAddress = device.getCapabilities().vkCreateCommandPool; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo, pAllocator, pCommandPool); } /** * Create a new command pool object. * *
C Specification
* *

To create a command pool, call:

* *
     * VkResult vkCreateCommandPool(
     *     VkDevice                                    device,
     *     const VkCommandPoolCreateInfo*              pCreateInfo,
     *     const VkAllocationCallbacks*                pAllocator,
     *     VkCommandPool*                              pCommandPool);
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pCreateInfo} must be a valid pointer to a valid {@link VkCommandPoolCreateInfo} structure
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • {@code pCommandPool} must be a valid pointer to a {@code VkCommandPool} handle
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkAllocationCallbacks}, {@link VkCommandPoolCreateInfo}

* * @param device the logical device that creates the command pool. * @param pCreateInfo contains information used to create the command pool. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. * @param pCommandPool points to a {@code VkCommandPool} handle in which the created pool is returned. */ @NativeType("VkResult") public static int vkCreateCommandPool(VkDevice device, @NativeType("const VkCommandPoolCreateInfo *") VkCommandPoolCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkCommandPool *") LongBuffer pCommandPool) { if (CHECKS) { check(pCommandPool, 1); } return nvkCreateCommandPool(device, pCreateInfo.address(), memAddressSafe(pAllocator), memAddress(pCommandPool)); } // --- [ vkDestroyCommandPool ] --- /** Unsafe version of: {@link #vkDestroyCommandPool DestroyCommandPool} */ public static void nvkDestroyCommandPool(VkDevice device, long commandPool, long pAllocator) { long __functionAddress = device.getCapabilities().vkDestroyCommandPool; if (CHECKS) { if (pAllocator != NULL) { VkAllocationCallbacks.validate(pAllocator); } } callPJPV(__functionAddress, device.address(), commandPool, pAllocator); } /** * Destroy a command pool object. * *
C Specification
* *

To destroy a command pool, call:

* *
     * void vkDestroyCommandPool(
     *     VkDevice                                    device,
     *     VkCommandPool                               commandPool,
     *     const VkAllocationCallbacks*                pAllocator);
* *
Description
* *

When a pool is destroyed, all command buffers allocated from the pool are freed.

* *

Any primary command buffer allocated from another {@code VkCommandPool} that is in the recording or executable state and has a secondary command buffer allocated from {@code commandPool} recorded into it, becomes invalid.

* *
Valid Usage
* *
    *
  • All {@code VkCommandBuffer} objects allocated from {@code commandPool} must not be in the pending state.
  • *
  • If {@link VkAllocationCallbacks} were provided when {@code commandPool} was created, a compatible set of callbacks must be provided here
  • *
  • If no {@link VkAllocationCallbacks} were provided when {@code commandPool} was created, {@code pAllocator} must be {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • If {@code commandPool} is not {@link #VK_NULL_HANDLE NULL_HANDLE}, {@code commandPool} must be a valid {@code VkCommandPool} handle
  • *
  • If {@code pAllocator} is not {@code NULL}, {@code pAllocator} must be a valid pointer to a valid {@link VkAllocationCallbacks} structure
  • *
  • If {@code commandPool} is a valid handle, it must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandPool} must be externally synchronized
  • *
* *
See Also
* *

{@link VkAllocationCallbacks}

* * @param device the logical device that destroys the command pool. * @param commandPool the handle of the command pool to destroy. * @param pAllocator controls host memory allocation as described in the Memory Allocation chapter. */ public static void vkDestroyCommandPool(VkDevice device, @NativeType("VkCommandPool") long commandPool, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator) { nvkDestroyCommandPool(device, commandPool, memAddressSafe(pAllocator)); } // --- [ vkResetCommandPool ] --- /** * Reset a command pool. * *
C Specification
* *

To reset a command pool, call:

* *
     * VkResult vkResetCommandPool(
     *     VkDevice                                    device,
     *     VkCommandPool                               commandPool,
     *     VkCommandPoolResetFlags                     flags);
* *
Description
* *

Resetting a command pool recycles all of the resources from all of the command buffers allocated from the command pool back to the command pool. All command buffers that have been allocated from the command pool are put in the initial state.

* *

Any primary command buffer allocated from another {@code VkCommandPool} that is in the recording or executable state and has a secondary command buffer allocated from {@code commandPool} recorded into it, becomes invalid.

* *
Valid Usage
* *
    *
  • All {@code VkCommandBuffer} objects allocated from {@code commandPool} must not be in the pending state
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code commandPool} must be a valid {@code VkCommandPool} handle
  • *
  • {@code flags} must be a valid combination of {@code VkCommandPoolResetFlagBits} values
  • *
  • {@code commandPool} must have been created, allocated, or retrieved from {@code device}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandPool} must be externally synchronized
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* * @param device the logical device that owns the command pool. * @param commandPool the command pool to reset. * @param flags a bitmask of {@code VkCommandPoolResetFlagBits} controlling the reset operation. */ @NativeType("VkResult") public static int vkResetCommandPool(VkDevice device, @NativeType("VkCommandPool") long commandPool, @NativeType("VkCommandPoolResetFlags") int flags) { long __functionAddress = device.getCapabilities().vkResetCommandPool; return callPJI(__functionAddress, device.address(), commandPool, flags); } // --- [ vkAllocateCommandBuffers ] --- /** Unsafe version of: {@link #vkAllocateCommandBuffers AllocateCommandBuffers} */ public static int nvkAllocateCommandBuffers(VkDevice device, long pAllocateInfo, long pCommandBuffers) { long __functionAddress = device.getCapabilities().vkAllocateCommandBuffers; return callPPPI(__functionAddress, device.address(), pAllocateInfo, pCommandBuffers); } /** * Allocate command buffers from an existing command pool. * *
C Specification
* *

To allocate command buffers, call:

* *
     * VkResult vkAllocateCommandBuffers(
     *     VkDevice                                    device,
     *     const VkCommandBufferAllocateInfo*          pAllocateInfo,
     *     VkCommandBuffer*                            pCommandBuffers);
* *
Description
* *

{@link #vkAllocateCommandBuffers AllocateCommandBuffers} can be used to create multiple command buffers. If the creation of any of those command buffers fails, the implementation must destroy all successfully created command buffer objects from this command, set all entries of the {@code pCommandBuffers} array to {@code NULL} and return the error.

* *

When command buffers are first allocated, they are in the initial state.

* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code pAllocateInfo} must be a valid pointer to a valid {@link VkCommandBufferAllocateInfo} structure
  • *
  • {@code pCommandBuffers} must be a valid pointer to an array of {@code pAllocateInfo}::commandBufferCount {@code VkCommandBuffer} handles
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code pAllocateInfo}::commandPool must be externally synchronized
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkCommandBufferAllocateInfo}

* * @param device the logical device that owns the command pool. * @param pAllocateInfo a pointer to an instance of the {@link VkCommandBufferAllocateInfo} structure describing parameters of the allocation. * @param pCommandBuffers a pointer to an array of {@code VkCommandBuffer} handles in which the resulting command buffer objects are returned. The array must be at least the length specified by the {@code commandBufferCount} member of {@code pAllocateInfo}. Each allocated command buffer begins in the initial state. */ @NativeType("VkResult") public static int vkAllocateCommandBuffers(VkDevice device, @NativeType("const VkCommandBufferAllocateInfo *") VkCommandBufferAllocateInfo pAllocateInfo, @NativeType("VkCommandBuffer *") PointerBuffer pCommandBuffers) { if (CHECKS) { check(pCommandBuffers, pAllocateInfo.commandBufferCount()); } return nvkAllocateCommandBuffers(device, pAllocateInfo.address(), memAddress(pCommandBuffers)); } // --- [ vkFreeCommandBuffers ] --- /** * Unsafe version of: {@link #vkFreeCommandBuffers FreeCommandBuffers} * * @param commandBufferCount the length of the {@code pCommandBuffers} array. */ public static void nvkFreeCommandBuffers(VkDevice device, long commandPool, int commandBufferCount, long pCommandBuffers) { long __functionAddress = device.getCapabilities().vkFreeCommandBuffers; callPJPV(__functionAddress, device.address(), commandPool, commandBufferCount, pCommandBuffers); } /** * Free command buffers. * *
C Specification
* *

To free command buffers, call:

* *
     * void vkFreeCommandBuffers(
     *     VkDevice                                    device,
     *     VkCommandPool                               commandPool,
     *     uint32_t                                    commandBufferCount,
     *     const VkCommandBuffer*                      pCommandBuffers);
* *
Description
* *

Any primary command buffer that is in the recording or executable state and has any element of {@code pCommandBuffers} recorded into it, becomes invalid.

* *
Valid Usage
* *
    *
  • All elements of {@code pCommandBuffers} must not be in the pending state
  • *
  • {@code pCommandBuffers} must be a valid pointer to an array of {@code commandBufferCount} {@code VkCommandBuffer} handles, each element of which must either be a valid handle or {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code commandPool} must be a valid {@code VkCommandPool} handle
  • *
  • {@code commandBufferCount} must be greater than 0
  • *
  • {@code commandPool} must have been created, allocated, or retrieved from {@code device}
  • *
  • Each element of {@code pCommandBuffers} that is a valid handle must have been created, allocated, or retrieved from {@code commandPool}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandPool} must be externally synchronized
  • *
  • Host access to each member of {@code pCommandBuffers} must be externally synchronized
  • *
* * @param device the logical device that owns the command pool. * @param commandPool the command pool from which the command buffers were allocated. * @param pCommandBuffers an array of handles of command buffers to free. */ public static void vkFreeCommandBuffers(VkDevice device, @NativeType("VkCommandPool") long commandPool, @NativeType("const VkCommandBuffer *") PointerBuffer pCommandBuffers) { nvkFreeCommandBuffers(device, commandPool, pCommandBuffers.remaining(), memAddress(pCommandBuffers)); } /** * Free command buffers. * *
C Specification
* *

To free command buffers, call:

* *
     * void vkFreeCommandBuffers(
     *     VkDevice                                    device,
     *     VkCommandPool                               commandPool,
     *     uint32_t                                    commandBufferCount,
     *     const VkCommandBuffer*                      pCommandBuffers);
* *
Description
* *

Any primary command buffer that is in the recording or executable state and has any element of {@code pCommandBuffers} recorded into it, becomes invalid.

* *
Valid Usage
* *
    *
  • All elements of {@code pCommandBuffers} must not be in the pending state
  • *
  • {@code pCommandBuffers} must be a valid pointer to an array of {@code commandBufferCount} {@code VkCommandBuffer} handles, each element of which must either be a valid handle or {@code NULL}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code device} must be a valid {@code VkDevice} handle
  • *
  • {@code commandPool} must be a valid {@code VkCommandPool} handle
  • *
  • {@code commandBufferCount} must be greater than 0
  • *
  • {@code commandPool} must have been created, allocated, or retrieved from {@code device}
  • *
  • Each element of {@code pCommandBuffers} that is a valid handle must have been created, allocated, or retrieved from {@code commandPool}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandPool} must be externally synchronized
  • *
  • Host access to each member of {@code pCommandBuffers} must be externally synchronized
  • *
* * @param device the logical device that owns the command pool. * @param commandPool the command pool from which the command buffers were allocated. */ public static void vkFreeCommandBuffers(VkDevice device, @NativeType("VkCommandPool") long commandPool, @NativeType("const VkCommandBuffer *") VkCommandBuffer pCommandBuffer) { MemoryStack stack = stackGet(); int stackPointer = stack.getPointer(); try { PointerBuffer pCommandBuffers = stack.pointers(pCommandBuffer); nvkFreeCommandBuffers(device, commandPool, 1, memAddress(pCommandBuffers)); } finally { stack.setPointer(stackPointer); } } // --- [ vkBeginCommandBuffer ] --- /** Unsafe version of: {@link #vkBeginCommandBuffer BeginCommandBuffer} */ public static int nvkBeginCommandBuffer(VkCommandBuffer commandBuffer, long pBeginInfo) { long __functionAddress = commandBuffer.getCapabilities().vkBeginCommandBuffer; return callPPI(__functionAddress, commandBuffer.address(), pBeginInfo); } /** * Start recording a command buffer. * *
C Specification
* *

To begin recording a command buffer, call:

* *
     * VkResult vkBeginCommandBuffer(
     *     VkCommandBuffer                             commandBuffer,
     *     const VkCommandBufferBeginInfo*             pBeginInfo);
* *
Valid Usage
* *
    *
  • {@code commandBuffer} must not be in the recording or pending state.
  • *
  • If {@code commandBuffer} was allocated from a {@code VkCommandPool} which did not have the {@link #VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT} flag set, {@code commandBuffer} must be in the initial state.
  • *
  • If {@code commandBuffer} is a secondary command buffer, the {@code pInheritanceInfo} member of {@code pBeginInfo} must be a valid {@link VkCommandBufferInheritanceInfo} structure
  • *
  • If {@code commandBuffer} is a secondary command buffer and either the {@code occlusionQueryEnable} member of the {@code pInheritanceInfo} member of {@code pBeginInfo} is {@link #VK_FALSE FALSE}, or the precise occlusion queries feature is not enabled, the {@code queryFlags} member of the {@code pInheritanceInfo} member {@code pBeginInfo} must not contain {@link #VK_QUERY_CONTROL_PRECISE_BIT QUERY_CONTROL_PRECISE_BIT}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code pBeginInfo} must be a valid pointer to a valid {@link VkCommandBufferBeginInfo} structure
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* *
See Also
* *

{@link VkCommandBufferBeginInfo}

* * @param commandBuffer the handle of the command buffer which is to be put in the recording state. * @param pBeginInfo an instance of the {@link VkCommandBufferBeginInfo} structure, which defines additional information about how the command buffer begins recording. */ @NativeType("VkResult") public static int vkBeginCommandBuffer(VkCommandBuffer commandBuffer, @NativeType("const VkCommandBufferBeginInfo *") VkCommandBufferBeginInfo pBeginInfo) { return nvkBeginCommandBuffer(commandBuffer, pBeginInfo.address()); } // --- [ vkEndCommandBuffer ] --- /** * Finish recording a command buffer. * *
C Specification
* *

To complete recording of a command buffer, call:

* *
     * VkResult vkEndCommandBuffer(
     *     VkCommandBuffer                             commandBuffer);
* *
Description
* *

If there was an error during recording, the application will be notified by an unsuccessful return code returned by {@link #vkEndCommandBuffer EndCommandBuffer}. If the application wishes to further use the command buffer, the command buffer must be reset. The command buffer must have been in the recording state, and is moved to the executable state.

* *
Valid Usage
* *
    *
  • {@code commandBuffer} must be in the recording state.
  • *
  • If {@code commandBuffer} is a primary command buffer, there must not be an active render pass instance
  • *
  • All queries made active during the recording of {@code commandBuffer} must have been made inactive
  • *
  • If {@code commandBuffer} is a secondary command buffer, there must not be an outstanding {@link EXTDebugMarker#vkCmdDebugMarkerBeginEXT CmdDebugMarkerBeginEXT} command recorded to {@code commandBuffer} that has not previously been ended by a call to {@link EXTDebugMarker#vkCmdDebugMarkerEndEXT CmdDebugMarkerEndEXT}.
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* * @param commandBuffer the command buffer to complete recording. */ @NativeType("VkResult") public static int vkEndCommandBuffer(VkCommandBuffer commandBuffer) { long __functionAddress = commandBuffer.getCapabilities().vkEndCommandBuffer; return callPI(__functionAddress, commandBuffer.address()); } // --- [ vkResetCommandBuffer ] --- /** * Reset a command buffer to the initial state. * *
C Specification
* *

To reset command buffers, call:

* *
     * VkResult vkResetCommandBuffer(
     *     VkCommandBuffer                             commandBuffer,
     *     VkCommandBufferResetFlags                   flags);
* *
Description
* *

Any primary command buffer that is in the recording or executable state and has {@code commandBuffer} recorded into it, becomes invalid.

* *
Valid Usage
* *
    *
  • {@code commandBuffer} must not be in the pending state
  • *
  • {@code commandBuffer} must have been allocated from a pool that was created with the {@link #VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code flags} must be a valid combination of {@code VkCommandBufferResetFlagBits} values
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
* *
Return Codes
* *
*
On success, this command returns
*
    *
  • {@link #VK_SUCCESS SUCCESS}
  • *
*
On failure, this command returns
*
    *
  • {@link #VK_ERROR_OUT_OF_HOST_MEMORY ERROR_OUT_OF_HOST_MEMORY}
  • *
  • {@link #VK_ERROR_OUT_OF_DEVICE_MEMORY ERROR_OUT_OF_DEVICE_MEMORY}
  • *
*
* * @param commandBuffer the command buffer to reset. The command buffer can be in any state other than pending, and is moved into the initial state. * @param flags a bitmask of {@code VkCommandBufferResetFlagBits} controlling the reset operation. */ @NativeType("VkResult") public static int vkResetCommandBuffer(VkCommandBuffer commandBuffer, @NativeType("VkCommandBufferResetFlags") int flags) { long __functionAddress = commandBuffer.getCapabilities().vkResetCommandBuffer; return callPI(__functionAddress, commandBuffer.address(), flags); } // --- [ vkCmdBindPipeline ] --- /** * Bind a pipeline object to a command buffer. * *
C Specification
* *

Once a pipeline has been created, it can be bound to the command buffer using the command:

* *
     * void vkCmdBindPipeline(
     *     VkCommandBuffer                             commandBuffer,
     *     VkPipelineBindPoint                         pipelineBindPoint,
     *     VkPipeline                                  pipeline);
* *
Description
* *

Once bound, a pipeline binding affects subsequent graphics or compute commands in the command buffer until a different pipeline is bound to the bind point. The pipeline bound to {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE} controls the behavior of {@link #vkCmdDispatch CmdDispatch} and {@link #vkCmdDispatchIndirect CmdDispatchIndirect}. The pipeline bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} controls the behavior of all drawing commands. No other commands are affected by the pipeline state.

* *
Valid Usage
* *
    *
  • If {@code pipelineBindPoint} is {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE}, the {@code VkCommandPool} that {@code commandBuffer} was allocated from must support compute operations
  • *
  • If {@code pipelineBindPoint} is {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}, the {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
  • If {@code pipelineBindPoint} is {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE}, {@code pipeline} must be a compute pipeline
  • *
  • If {@code pipelineBindPoint} is {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}, {@code pipeline} must be a graphics pipeline
  • *
  • If the variable multisample rate feature is not supported, {@code pipeline} is a graphics pipeline, the current subpass has no attachments, and this is not the first call to this function with a graphics pipeline after transitioning to the current subpass, then the sample count specified by this pipeline must match that set in the previous pipeline
  • *
  • If {@link VkPhysicalDeviceSampleLocationsPropertiesEXT}{@code ::variableSampleLocations} is {@link #VK_FALSE FALSE}, and {@code pipeline} is a graphics pipeline created with a {@link VkPipelineSampleLocationsStateCreateInfoEXT} structure having its {@code sampleLocationsEnable} member set to {@link #VK_TRUE TRUE} but without {@link EXTSampleLocations#VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT} enabled then the current render pass instance must have been begun by specifying a {@link VkRenderPassSampleLocationsBeginInfoEXT} structure whose {@code pPostSubpassSampleLocations} member contains an element with a {@code subpassIndex} matching the current subpass index and the {@code sampleLocationsInfo} member of that element must match the {@code sampleLocationsInfo} specified in {@link VkPipelineSampleLocationsStateCreateInfoEXT} when the pipeline was created
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code pipelineBindPoint} must be a valid {@code VkPipelineBindPoint} value
  • *
  • {@code pipeline} must be a valid {@code VkPipeline} handle
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics, or compute operations
  • *
  • Both of {@code commandBuffer}, and {@code pipeline} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics Compute
* * @param commandBuffer the command buffer that the pipeline will be bound to. * @param pipelineBindPoint a {@code VkPipelineBindPoint} value specifying whether to bind to the compute or graphics bind point. Binding one does not disturb the other. * @param pipeline the pipeline to be bound. */ public static void vkCmdBindPipeline(VkCommandBuffer commandBuffer, @NativeType("VkPipelineBindPoint") int pipelineBindPoint, @NativeType("VkPipeline") long pipeline) { long __functionAddress = commandBuffer.getCapabilities().vkCmdBindPipeline; callPJV(__functionAddress, commandBuffer.address(), pipelineBindPoint, pipeline); } // --- [ vkCmdSetViewport ] --- /** * Unsafe version of: {@link #vkCmdSetViewport CmdSetViewport} * * @param viewportCount the number of viewports whose parameters are updated by the command. */ public static void nvkCmdSetViewport(VkCommandBuffer commandBuffer, int firstViewport, int viewportCount, long pViewports) { long __functionAddress = commandBuffer.getCapabilities().vkCmdSetViewport; callPPV(__functionAddress, commandBuffer.address(), firstViewport, viewportCount, pViewports); } /** * Set the viewport on a command buffer. * *
C Specification
* *

If the bound pipeline state object was not created with the {@link #VK_DYNAMIC_STATE_VIEWPORT DYNAMIC_STATE_VIEWPORT} dynamic state enabled, viewport transformation parameters are specified using the {@code pViewports} member of {@link VkPipelineViewportStateCreateInfo} in the pipeline state object. If the pipeline state object was created with the {@link #VK_DYNAMIC_STATE_VIEWPORT DYNAMIC_STATE_VIEWPORT} dynamic state enabled, the viewport transformation parameters are dynamically set and changed with the command:

* *
     * void vkCmdSetViewport(
     *     VkCommandBuffer                             commandBuffer,
     *     uint32_t                                    firstViewport,
     *     uint32_t                                    viewportCount,
     *     const VkViewport*                           pViewports);
* *
Description
* *

The viewport parameters taken from element i of {@code pViewports} replace the current state for the viewport index firstViewport + i, for i in [0, viewportCount).

* *
Valid Usage
* *
    *
  • The currently bound graphics pipeline must have been created with the {@link #VK_DYNAMIC_STATE_VIEWPORT DYNAMIC_STATE_VIEWPORT} dynamic state enabled
  • *
  • {@code firstViewport} must be less than {@link VkPhysicalDeviceLimits}{@code ::maxViewports}
  • *
  • The sum of {@code firstViewport} and {@code viewportCount} must be between 1 and {@link VkPhysicalDeviceLimits}{@code ::maxViewports}, inclusive
  • *
  • If the multiple viewports feature is not enabled, {@code firstViewport} must be 0
  • *
  • If the multiple viewports feature is not enabled, {@code viewportCount} must be 1
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code pViewports} must be a valid pointer to an array of {@code viewportCount} {@link VkViewport} structures
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
  • {@code viewportCount} must be greater than 0
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics
* *
See Also
* *

{@link VkViewport}

* * @param commandBuffer the command buffer into which the command will be recorded. * @param firstViewport the index of the first viewport whose parameters are updated by the command. * @param pViewports a pointer to an array of {@link VkViewport} structures specifying viewport parameters. */ public static void vkCmdSetViewport(VkCommandBuffer commandBuffer, @NativeType("uint32_t") int firstViewport, @NativeType("const VkViewport *") VkViewport.Buffer pViewports) { nvkCmdSetViewport(commandBuffer, firstViewport, pViewports.remaining(), pViewports.address()); } // --- [ vkCmdSetScissor ] --- /** * Unsafe version of: {@link #vkCmdSetScissor CmdSetScissor} * * @param scissorCount the number of scissors whose rectangles are updated by the command. */ public static void nvkCmdSetScissor(VkCommandBuffer commandBuffer, int firstScissor, int scissorCount, long pScissors) { long __functionAddress = commandBuffer.getCapabilities().vkCmdSetScissor; callPPV(__functionAddress, commandBuffer.address(), firstScissor, scissorCount, pScissors); } /** * Set the dynamic scissor rectangles on a command buffer. * *
C Specification
* *

The scissor test determines if a fragment's framebuffer coordinates (xf,yf) lie within the scissor rectangle corresponding to the viewport index (see Controlling the Viewport) used by the primitive that generated the fragment. If the pipeline state object is created without {@link #VK_DYNAMIC_STATE_SCISSOR DYNAMIC_STATE_SCISSOR} enabled then the scissor rectangles are set by the {@link VkPipelineViewportStateCreateInfo} state of the pipeline state object. Otherwise, to dynamically set the scissor rectangles call:

* *
     * void vkCmdSetScissor(
     *     VkCommandBuffer                             commandBuffer,
     *     uint32_t                                    firstScissor,
     *     uint32_t                                    scissorCount,
     *     const VkRect2D*                             pScissors);
* *
Description
* *

The scissor rectangles taken from element i of {@code pScissors} replace the current state for the scissor index firstScissor + i, for i in [0, scissorCount).

* *

Each scissor rectangle is described by a {@link VkRect2D} structure, with the {@code offset.x} and {@code offset.y} values determining the upper left corner of the scissor rectangle, and the {@code extent.width} and {@code extent.height} values determining the size in pixels.

* *
Valid Usage
* *
    *
  • The currently bound graphics pipeline must have been created with the {@link #VK_DYNAMIC_STATE_SCISSOR DYNAMIC_STATE_SCISSOR} dynamic state enabled
  • *
  • {@code firstScissor} must be less than {@link VkPhysicalDeviceLimits}{@code ::maxViewports}
  • *
  • The sum of {@code firstScissor} and {@code scissorCount} must be between 1 and {@link VkPhysicalDeviceLimits}{@code ::maxViewports}, inclusive
  • *
  • If the multiple viewports feature is not enabled, {@code firstScissor} must be 0
  • *
  • If the multiple viewports feature is not enabled, {@code scissorCount} must be 1
  • *
  • The {@code x} and {@code y} members of {@code offset} must be greater than or equal to 0
  • *
  • Evaluation of (offset.x + extent.width) must not cause a signed integer addition overflow
  • *
  • Evaluation of (offset.y + extent.height) must not cause a signed integer addition overflow
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code pScissors} must be a valid pointer to an array of {@code scissorCount} {@link VkRect2D} structures
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
  • {@code scissorCount} must be greater than 0
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics
* *
See Also
* *

{@link VkRect2D}

* * @param commandBuffer the command buffer into which the command will be recorded. * @param firstScissor the index of the first scissor whose state is updated by the command. * @param pScissors a pointer to an array of {@link VkRect2D} structures defining scissor rectangles. */ public static void vkCmdSetScissor(VkCommandBuffer commandBuffer, @NativeType("uint32_t") int firstScissor, @NativeType("const VkRect2D *") VkRect2D.Buffer pScissors) { nvkCmdSetScissor(commandBuffer, firstScissor, pScissors.remaining(), pScissors.address()); } // --- [ vkCmdSetLineWidth ] --- /** * Set the dynamic line width state. * *
C Specification
* *

The line width is specified by the {@link VkPipelineRasterizationStateCreateInfo}{@code ::lineWidth} property of the currently active pipeline, if the pipeline was not created with {@link #VK_DYNAMIC_STATE_LINE_WIDTH DYNAMIC_STATE_LINE_WIDTH} enabled.

* *

Otherwise, the line width is set by calling {@link #vkCmdSetLineWidth CmdSetLineWidth}:

* *
     * void vkCmdSetLineWidth(
     *     VkCommandBuffer                             commandBuffer,
     *     float                                       lineWidth);
* *
Valid Usage
* *
    *
  • The currently bound graphics pipeline must have been created with the {@link #VK_DYNAMIC_STATE_LINE_WIDTH DYNAMIC_STATE_LINE_WIDTH} dynamic state enabled
  • *
  • If the wide lines feature is not enabled, {@code lineWidth} must be {@code 1.0}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics
* * @param commandBuffer the command buffer into which the command will be recorded. * @param lineWidth the width of rasterized line segments. */ public static void vkCmdSetLineWidth(VkCommandBuffer commandBuffer, float lineWidth) { long __functionAddress = commandBuffer.getCapabilities().vkCmdSetLineWidth; callPV(__functionAddress, commandBuffer.address(), lineWidth); } // --- [ vkCmdSetDepthBias ] --- /** * Set the depth bias dynamic state. * *
C Specification
* *

The depth values of all fragments generated by the rasterization of a polygon can be offset by a single value that is computed for that polygon. This behavior is controlled by the {@code depthBiasEnable}, {@code depthBiasConstantFactor}, {@code depthBiasClamp}, and {@code depthBiasSlopeFactor} members of {@link VkPipelineRasterizationStateCreateInfo}, or by the corresponding parameters to the {@link #vkCmdSetDepthBias CmdSetDepthBias} command if depth bias state is dynamic.

* *
     * void vkCmdSetDepthBias(
     *     VkCommandBuffer                             commandBuffer,
     *     float                                       depthBiasConstantFactor,
     *     float                                       depthBiasClamp,
     *     float                                       depthBiasSlopeFactor);
* *
Description
* *

If {@code depthBiasEnable} is {@link #VK_FALSE FALSE}, no depth bias is applied and the fragment's depth values are unchanged.

* *

{@code depthBiasSlopeFactor} scales the maximum depth slope of the polygon, and {@code depthBiasConstantFactor} scales an implementation-dependent constant that relates to the usable resolution of the depth buffer. The resulting values are summed to produce the depth bias value which is then clamped to a minimum or maximum value specified by {@code depthBiasClamp}. {@code depthBiasSlopeFactor}, {@code depthBiasConstantFactor}, and {@code depthBiasClamp} can each be positive, negative, or zero.

* *

The maximum depth slope m of a triangle is

* *
     *       m = sqrt((∂zf / ∂xf)2 + (∂zf / ∂yf)2)
* *

where (xf, yf, zf) is a point on the triangle. m may be approximated as

* *
     *       m = max(abs(∂zf / ∂xf), abs(∂zf / ∂yf))
* *

The minimum resolvable difference r is an implementation-dependent parameter that depends on the depth buffer representation. It is the smallest difference in framebuffer coordinate z values that is guaranteed to remain distinct throughout polygon rasterization and in the depth buffer. All pairs of fragments generated by the rasterization of two polygons with otherwise identical vertices, but zf values that differ by r, will have distinct depth values.

* *

For fixed-point depth buffer representations, r is constant throughout the range of the entire depth buffer. For floating-point depth buffers, there is no single minimum resolvable difference. In this case, the minimum resolvable difference for a given polygon is dependent on the maximum exponent, e, in the range of z values spanned by the primitive. If n is the number of bits in the floating-point mantissa, the minimum resolvable difference, r, for the given primitive is defined as

* *
*
r = 2e-n
*
* *

If a triangle is rasterized using the {@link NVFillRectangle#VK_POLYGON_MODE_FILL_RECTANGLE_NV POLYGON_MODE_FILL_RECTANGLE_NV} polygon mode, then this minimum resolvable difference may not be resolvable for samples outside of the triangle, where the depth is extrapolated.

* *

If no depth buffer is present, r is undefined.

* *

The bias value o for a polygon is

* *
     *         m × depthBiasSlopeFactor + r × depthBiasConstantFactor                     depthBiasClamp = 0 or NaN
     * o = min(m × depthBiasSlopeFactor + r × depthBiasConstantFactor, depthBiasClamp)    depthBiasClamp > 0
     *     max(m × depthBiasSlopeFactor + r × depthBiasConstantFactor, depthBiasClamp)    depthBiasClamp < 0
* *

m is computed as described above. If the depth buffer uses a fixed-point representation, m is a function of depth values in the range [0,1], and o is applied to depth values in the same range.

* *

For fixed-point depth buffers, fragment depth values are always limited to the range [0,1] by clamping after depth bias addition is performed. Fragment depth values are clamped even when the depth buffer uses a floating-point representation.

* *
Valid Usage
* *
    *
  • The currently bound graphics pipeline must have been created with the {@link #VK_DYNAMIC_STATE_DEPTH_BIAS DYNAMIC_STATE_DEPTH_BIAS} dynamic state enabled
  • *
  • If the depth bias clamping feature is not enabled, {@code depthBiasClamp} must be {@code 0.0}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics
* * @param commandBuffer the command buffer into which the command will be recorded. * @param depthBiasConstantFactor a scalar factor controlling the constant depth value added to each fragment. * @param depthBiasClamp the maximum (or minimum) depth bias of a fragment. * @param depthBiasSlopeFactor a scalar factor applied to a fragment’s slope in depth bias calculations. */ public static void vkCmdSetDepthBias(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor) { long __functionAddress = commandBuffer.getCapabilities().vkCmdSetDepthBias; callPV(__functionAddress, commandBuffer.address(), depthBiasConstantFactor, depthBiasClamp, depthBiasSlopeFactor); } // --- [ vkCmdSetBlendConstants ] --- /** Unsafe version of: {@link #vkCmdSetBlendConstants CmdSetBlendConstants} */ public static void nvkCmdSetBlendConstants(VkCommandBuffer commandBuffer, long blendConstants) { long __functionAddress = commandBuffer.getCapabilities().vkCmdSetBlendConstants; callPPV(__functionAddress, commandBuffer.address(), blendConstants); } /** * Set the values of blend constants. * *
C Specification
* *

Otherwise, to dynamically set and change the blend constant, call:

* *
     * void vkCmdSetBlendConstants(
     *     VkCommandBuffer                             commandBuffer,
     *     const float                                 blendConstants[4]);
* *
Valid Usage
* *
    *
  • The currently bound graphics pipeline must have been created with the {@link #VK_DYNAMIC_STATE_BLEND_CONSTANTS DYNAMIC_STATE_BLEND_CONSTANTS} dynamic state enabled
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics
* * @param commandBuffer the command buffer into which the command will be recorded. * @param blendConstants an array of four values specifying the R, G, B, and A components of the blend constant color used in blending, depending on the blend factor. */ public static void vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, @NativeType("const float *") FloatBuffer blendConstants) { if (CHECKS) { check(blendConstants, 4); } nvkCmdSetBlendConstants(commandBuffer, memAddress(blendConstants)); } // --- [ vkCmdSetDepthBounds ] --- /** * Set the depth bounds test values for a command buffer. * *
C Specification
* *

The depth bounds test conditionally disables coverage of a sample based on the outcome of a comparison between the value za in the depth attachment at location (xf,yf) (for the appropriate sample) and a range of values. The test is enabled or disabled by the {@code depthBoundsTestEnable} member of {@link VkPipelineDepthStencilStateCreateInfo}: If the pipeline state object is created without the {@link #VK_DYNAMIC_STATE_DEPTH_BOUNDS DYNAMIC_STATE_DEPTH_BOUNDS} dynamic state enabled then the range of values used in the depth bounds test are defined by the {@code minDepthBounds} and {@code maxDepthBounds} members of the {@link VkPipelineDepthStencilStateCreateInfo} structure. Otherwise, to dynamically set the depth bounds range values call:

* *
     * void vkCmdSetDepthBounds(
     *     VkCommandBuffer                             commandBuffer,
     *     float                                       minDepthBounds,
     *     float                                       maxDepthBounds);
* *
Valid Usage
* *
    *
  • The currently bound graphics pipeline must have been created with the {@link #VK_DYNAMIC_STATE_DEPTH_BOUNDS DYNAMIC_STATE_DEPTH_BOUNDS} dynamic state enabled
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics
* * @param commandBuffer the command buffer into which the command will be recorded. * @param minDepthBounds the lower bound of the range of depth values used in the depth bounds test. * @param maxDepthBounds the upper bound of the range. */ public static void vkCmdSetDepthBounds(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds) { long __functionAddress = commandBuffer.getCapabilities().vkCmdSetDepthBounds; callPV(__functionAddress, commandBuffer.address(), minDepthBounds, maxDepthBounds); } // --- [ vkCmdSetStencilCompareMask ] --- /** * Set the stencil compare mask dynamic state. * *
C Specification
* *

If the pipeline state object is created with the {@link #VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK DYNAMIC_STATE_STENCIL_COMPARE_MASK} dynamic state enabled, then to dynamically set the stencil compare mask call:

* *
     * void vkCmdSetStencilCompareMask(
     *     VkCommandBuffer                             commandBuffer,
     *     VkStencilFaceFlags                          faceMask,
     *     uint32_t                                    compareMask);
* *
Valid Usage
* *
    *
  • The currently bound graphics pipeline must have been created with the {@link #VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK DYNAMIC_STATE_STENCIL_COMPARE_MASK} dynamic state enabled
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code faceMask} must be a valid combination of {@code VkStencilFaceFlagBits} values
  • *
  • {@code faceMask} must not be 0
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics
* * @param commandBuffer the command buffer into which the command will be recorded. * @param faceMask a bitmask of {@code VkStencilFaceFlagBits} specifying the set of stencil state for which to update the compare mask. * @param compareMask the new value to use as the stencil compare mask. */ public static void vkCmdSetStencilCompareMask(VkCommandBuffer commandBuffer, @NativeType("VkStencilFaceFlags") int faceMask, @NativeType("uint32_t") int compareMask) { long __functionAddress = commandBuffer.getCapabilities().vkCmdSetStencilCompareMask; callPV(__functionAddress, commandBuffer.address(), faceMask, compareMask); } // --- [ vkCmdSetStencilWriteMask ] --- /** * Set the stencil write mask dynamic state. * *
C Specification
* *

If the pipeline state object is created with the {@link #VK_DYNAMIC_STATE_STENCIL_WRITE_MASK DYNAMIC_STATE_STENCIL_WRITE_MASK} dynamic state enabled, then to dynamically set the stencil write mask call:

* *
     * void vkCmdSetStencilWriteMask(
     *     VkCommandBuffer                             commandBuffer,
     *     VkStencilFaceFlags                          faceMask,
     *     uint32_t                                    writeMask);
* *
Valid Usage
* *
    *
  • The currently bound graphics pipeline must have been created with the {@link #VK_DYNAMIC_STATE_STENCIL_WRITE_MASK DYNAMIC_STATE_STENCIL_WRITE_MASK} dynamic state enabled
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code faceMask} must be a valid combination of {@code VkStencilFaceFlagBits} values
  • *
  • {@code faceMask} must not be 0
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics
* * @param commandBuffer the command buffer into which the command will be recorded. * @param faceMask a bitmask of {@code VkStencilFaceFlagBits} specifying the set of stencil state for which to update the write mask, as described above for {@link #vkCmdSetStencilCompareMask CmdSetStencilCompareMask}. * @param writeMask the new value to use as the stencil write mask. */ public static void vkCmdSetStencilWriteMask(VkCommandBuffer commandBuffer, @NativeType("VkStencilFaceFlags") int faceMask, @NativeType("uint32_t") int writeMask) { long __functionAddress = commandBuffer.getCapabilities().vkCmdSetStencilWriteMask; callPV(__functionAddress, commandBuffer.address(), faceMask, writeMask); } // --- [ vkCmdSetStencilReference ] --- /** * Set the stencil reference dynamic state. * *
C Specification
* *

If the pipeline state object is created with the {@link #VK_DYNAMIC_STATE_STENCIL_REFERENCE DYNAMIC_STATE_STENCIL_REFERENCE} dynamic state enabled, then to dynamically set the stencil reference value call:

* *
     * void vkCmdSetStencilReference(
     *     VkCommandBuffer                             commandBuffer,
     *     VkStencilFaceFlags                          faceMask,
     *     uint32_t                                    reference);
* *
Valid Usage
* *
    *
  • The currently bound graphics pipeline must have been created with the {@link #VK_DYNAMIC_STATE_STENCIL_REFERENCE DYNAMIC_STATE_STENCIL_REFERENCE} dynamic state enabled
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code faceMask} must be a valid combination of {@code VkStencilFaceFlagBits} values
  • *
  • {@code faceMask} must not be 0
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics
* * @param commandBuffer the command buffer into which the command will be recorded. * @param faceMask a bitmask of {@code VkStencilFaceFlagBits} specifying the set of stencil state for which to update the reference value, as described above for {@link #vkCmdSetStencilCompareMask CmdSetStencilCompareMask}. * @param reference the new value to use as the stencil reference value. */ public static void vkCmdSetStencilReference(VkCommandBuffer commandBuffer, @NativeType("VkStencilFaceFlags") int faceMask, @NativeType("uint32_t") int reference) { long __functionAddress = commandBuffer.getCapabilities().vkCmdSetStencilReference; callPV(__functionAddress, commandBuffer.address(), faceMask, reference); } // --- [ vkCmdBindDescriptorSets ] --- /** * Unsafe version of: {@link #vkCmdBindDescriptorSets CmdBindDescriptorSets} * * @param descriptorSetCount the number of elements in the {@code pDescriptorSets} array. * @param dynamicOffsetCount the number of dynamic offsets in the {@code pDynamicOffsets} array. */ public static void nvkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, int pipelineBindPoint, long layout, int firstSet, int descriptorSetCount, long pDescriptorSets, int dynamicOffsetCount, long pDynamicOffsets) { long __functionAddress = commandBuffer.getCapabilities().vkCmdBindDescriptorSets; callPJPPV(__functionAddress, commandBuffer.address(), pipelineBindPoint, layout, firstSet, descriptorSetCount, pDescriptorSets, dynamicOffsetCount, pDynamicOffsets); } /** * Binds descriptor sets to a command buffer. * *
C Specification
* *

To bind one or more descriptor sets to a command buffer, call:

* *
     * void vkCmdBindDescriptorSets(
     *     VkCommandBuffer                             commandBuffer,
     *     VkPipelineBindPoint                         pipelineBindPoint,
     *     VkPipelineLayout                            layout,
     *     uint32_t                                    firstSet,
     *     uint32_t                                    descriptorSetCount,
     *     const VkDescriptorSet*                      pDescriptorSets,
     *     uint32_t                                    dynamicOffsetCount,
     *     const uint32_t*                             pDynamicOffsets);
* *
Description
* *

{@link #vkCmdBindDescriptorSets CmdBindDescriptorSets} causes the sets numbered [{@code firstSet}.. {@code firstSet}+{@code descriptorSetCount}-1] to use the bindings stored in {@code pDescriptorSets}[0..{@code descriptorSetCount}-1] for subsequent rendering commands (either compute or graphics, according to the {@code pipelineBindPoint}). Any bindings that were previously applied via these sets are no longer valid.

* *

Once bound, a descriptor set affects rendering of subsequent graphics or compute commands in the command buffer until a different set is bound to the same set number, or else until the set is disturbed as described in Pipeline Layout Compatibility.

* *

A compatible descriptor set must be bound for all set numbers that any shaders in a pipeline access, at the time that a draw or dispatch command is recorded to execute using that pipeline. However, if none of the shaders in a pipeline statically use any bindings with a particular set number, then no descriptor set need be bound for that set number, even if the pipeline layout includes a non-trivial descriptor set layout for that set number.

* *

If any of the sets being bound include dynamic uniform or storage buffers, then {@code pDynamicOffsets} includes one element for each array element in each dynamic descriptor type binding in each set. Values are taken from {@code pDynamicOffsets} in an order such that all entries for set N come before set N+1; within a set, entries are ordered by the binding numbers in the descriptor set layouts; and within a binding array, elements are in order. {@code dynamicOffsetCount} must equal the total number of dynamic descriptors in the sets being bound.

* *

The effective offset used for dynamic uniform and storage buffer bindings is the sum of the relative offset taken from {@code pDynamicOffsets}, and the base address of the buffer plus base offset in the descriptor set. The length of the dynamic uniform and storage buffer bindings is the buffer range as specified in the descriptor set.

* *

Each of the {@code pDescriptorSets} must be compatible with the pipeline layout specified by {@code layout}. The layout used to program the bindings must also be compatible with the pipeline used in subsequent graphics or compute commands, as defined in the Pipeline Layout Compatibility section.

* *

The descriptor set contents bound by a call to {@link #vkCmdBindDescriptorSets CmdBindDescriptorSets} may be consumed during host execution of the command, or during shader execution of the resulting draws, or any time in between. Thus, the contents must not be altered (overwritten by an update command, or freed) between when the command is recorded and when the command completes executing on the queue. The contents of {@code pDynamicOffsets} are consumed immediately during execution of {@link #vkCmdBindDescriptorSets CmdBindDescriptorSets}. Once all pending uses have completed, it is legal to update and reuse a descriptor set.

* *
Valid Usage
* *
    *
  • Each element of {@code pDescriptorSets} must have been allocated with a {@code VkDescriptorSetLayout} that matches (is the same as, or identically defined as) the {@code VkDescriptorSetLayout} at set n in {@code layout}, where n is the sum of {@code firstSet} and the index into {@code pDescriptorSets}
  • *
  • {@code dynamicOffsetCount} must be equal to the total number of dynamic descriptors in {@code pDescriptorSets}
  • *
  • The sum of {@code firstSet} and {@code descriptorSetCount} must be less than or equal to {@link VkPipelineLayoutCreateInfo}{@code ::setLayoutCount} provided when {@code layout} was created
  • *
  • {@code pipelineBindPoint} must be supported by the {@code commandBuffer}’s parent {@code VkCommandPool}’s queue family
  • *
  • Each element of {@code pDynamicOffsets} must satisfy the required alignment for the corresponding descriptor binding’s descriptor type
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code pipelineBindPoint} must be a valid {@code VkPipelineBindPoint} value
  • *
  • {@code layout} must be a valid {@code VkPipelineLayout} handle
  • *
  • {@code pDescriptorSets} must be a valid pointer to an array of {@code descriptorSetCount} valid {@code VkDescriptorSet} handles
  • *
  • If {@code dynamicOffsetCount} is not 0, {@code pDynamicOffsets} must be a valid pointer to an array of {@code dynamicOffsetCount} {@code uint32_t} values
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics, or compute operations
  • *
  • {@code descriptorSetCount} must be greater than 0
  • *
  • Each of {@code commandBuffer}, {@code layout}, and the elements of {@code pDescriptorSets} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics Compute
* * @param commandBuffer the command buffer that the descriptor sets will be bound to. * @param pipelineBindPoint a {@code VkPipelineBindPoint} indicating whether the descriptors will be used by graphics pipelines or compute pipelines. There is a separate set of bind points for each of graphics and compute, so binding one does not disturb the other. * @param layout a {@code VkPipelineLayout} object used to program the bindings. * @param firstSet the set number of the first descriptor set to be bound. * @param pDescriptorSets an array of handles to {@code VkDescriptorSet} objects describing the descriptor sets to write to. * @param pDynamicOffsets a pointer to an array of {@code uint32_t} values specifying dynamic offsets. */ public static void vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, @NativeType("VkPipelineBindPoint") int pipelineBindPoint, @NativeType("VkPipelineLayout") long layout, @NativeType("uint32_t") int firstSet, @NativeType("const VkDescriptorSet *") LongBuffer pDescriptorSets, @Nullable @NativeType("const uint32_t *") IntBuffer pDynamicOffsets) { nvkCmdBindDescriptorSets(commandBuffer, pipelineBindPoint, layout, firstSet, pDescriptorSets.remaining(), memAddress(pDescriptorSets), remainingSafe(pDynamicOffsets), memAddressSafe(pDynamicOffsets)); } // --- [ vkCmdBindIndexBuffer ] --- /** * Bind an index buffer to a command buffer. * *
C Specification
* *

To bind an index buffer to a command buffer, call:

* *
     * void vkCmdBindIndexBuffer(
     *     VkCommandBuffer                             commandBuffer,
     *     VkBuffer                                    buffer,
     *     VkDeviceSize                                offset,
     *     VkIndexType                                 indexType);
* *
Valid Usage
* *
    *
  • {@code offset} must be less than the size of {@code buffer}
  • *
  • The sum of {@code offset} and the address of the range of {@code VkDeviceMemory} object that is backing {@code buffer}, must be a multiple of the type indicated by {@code indexType}
  • *
  • {@code buffer} must have been created with the {@link #VK_BUFFER_USAGE_INDEX_BUFFER_BIT BUFFER_USAGE_INDEX_BUFFER_BIT} flag
  • *
  • If {@code buffer} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code buffer} must be a valid {@code VkBuffer} handle
  • *
  • {@code indexType} must be a valid {@code VkIndexType} value
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
  • Both of {@code buffer}, and {@code commandBuffer} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics
* * @param commandBuffer the command buffer into which the command is recorded. * @param buffer the buffer being bound. * @param offset the starting offset in bytes within {@code buffer} used in index buffer address calculations. * @param indexType a {@code VkIndexType} value specifying whether indices are treated as 16 bits or 32 bits. */ public static void vkCmdBindIndexBuffer(VkCommandBuffer commandBuffer, @NativeType("VkBuffer") long buffer, @NativeType("VkDeviceSize") long offset, @NativeType("VkIndexType") int indexType) { long __functionAddress = commandBuffer.getCapabilities().vkCmdBindIndexBuffer; callPJJV(__functionAddress, commandBuffer.address(), buffer, offset, indexType); } // --- [ vkCmdBindVertexBuffers ] --- /** * Unsafe version of: {@link #vkCmdBindVertexBuffers CmdBindVertexBuffers} * * @param bindingCount the number of vertex input bindings whose state is updated by the command. */ public static void nvkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, int firstBinding, int bindingCount, long pBuffers, long pOffsets) { long __functionAddress = commandBuffer.getCapabilities().vkCmdBindVertexBuffers; callPPPV(__functionAddress, commandBuffer.address(), firstBinding, bindingCount, pBuffers, pOffsets); } /** * Bind vertex buffers to a command buffer. * *
C Specification
* *

To bind vertex buffers to a command buffer for use in subsequent draw commands, call:

* *
     * void vkCmdBindVertexBuffers(
     *     VkCommandBuffer                             commandBuffer,
     *     uint32_t                                    firstBinding,
     *     uint32_t                                    bindingCount,
     *     const VkBuffer*                             pBuffers,
     *     const VkDeviceSize*                         pOffsets);
* *
Description
* *

The values taken from elements i of {@code pBuffers} and {@code pOffsets} replace the current state for the vertex input binding firstBinding + i, for i in [0, bindingCount). The vertex input binding is updated to start at the offset indicated by {@code pOffsets}[i] from the start of the buffer {@code pBuffers}[i]. All vertex input attributes that use each of these bindings will use these updated addresses in their address calculations for subsequent draw commands.

* *
Valid Usage
* *
    *
  • {@code firstBinding} must be less than {@link VkPhysicalDeviceLimits}{@code ::maxVertexInputBindings}
  • *
  • The sum of {@code firstBinding} and {@code bindingCount} must be less than or equal to {@link VkPhysicalDeviceLimits}{@code ::maxVertexInputBindings}
  • *
  • All elements of {@code pOffsets} must be less than the size of the corresponding element in {@code pBuffers}
  • *
  • All elements of {@code pBuffers} must have been created with the {@link #VK_BUFFER_USAGE_VERTEX_BUFFER_BIT BUFFER_USAGE_VERTEX_BUFFER_BIT} flag
  • *
  • Each element of {@code pBuffers} that is non-sparse must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code pBuffers} must be a valid pointer to an array of {@code bindingCount} valid {@code VkBuffer} handles
  • *
  • {@code pOffsets} must be a valid pointer to an array of {@code bindingCount} {@code VkDeviceSize} values
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
  • {@code bindingCount} must be greater than 0
  • *
  • Both of {@code commandBuffer}, and the elements of {@code pBuffers} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics
* * @param commandBuffer the command buffer into which the command is recorded. * @param firstBinding the index of the first vertex input binding whose state is updated by the command. * @param pBuffers a pointer to an array of buffer handles. * @param pOffsets a pointer to an array of buffer offsets. */ public static void vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, @NativeType("uint32_t") int firstBinding, @NativeType("const VkBuffer *") LongBuffer pBuffers, @NativeType("const VkDeviceSize *") LongBuffer pOffsets) { if (CHECKS) { check(pOffsets, pBuffers.remaining()); } nvkCmdBindVertexBuffers(commandBuffer, firstBinding, pBuffers.remaining(), memAddress(pBuffers), memAddress(pOffsets)); } // --- [ vkCmdDraw ] --- /** * Draw primitives. * *
C Specification
* *

To record a non-indexed draw, call:

* *
     * void vkCmdDraw(
     *     VkCommandBuffer                             commandBuffer,
     *     uint32_t                                    vertexCount,
     *     uint32_t                                    instanceCount,
     *     uint32_t                                    firstVertex,
     *     uint32_t                                    firstInstance);
* *
Description
* *

When the command is executed, primitives are assembled using the current primitive topology and {@code vertexCount} consecutive vertex indices with the first {@code vertexIndex} value equal to {@code firstVertex}. The primitives are drawn {@code instanceCount} times with {@code instanceIndex} starting with {@code firstInstance} and increasing sequentially for each instance. The assembled primitives execute the currently bound graphics pipeline.

* *
Valid Usage
* *
    *
  • The current render pass must be compatible with the {@code renderPass} member of the {@link VkGraphicsPipelineCreateInfo} structure specified when creating the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}.
  • *
  • The subpass index of the current render pass must be equal to the {@code subpass} member of the {@link VkGraphicsPipelineCreateInfo} structure specified when creating the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}.
  • *
  • For each set n that is statically used by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}, a descriptor set must have been bound to n at {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}, with a {@code VkPipelineLayout} that is compatible for set n, with the {@code VkPipelineLayout} used to create the current {@code VkPipeline}, as described in the “Pipeline Layout Compatibility” section
  • *
  • For each push constant that is statically used by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}, a push constant value must have been set for {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}, with a {@code VkPipelineLayout} that is compatible for push constants, with the {@code VkPipelineLayout} used to create the current {@code VkPipeline}, as described in the “Pipeline Layout Compatibility” section
  • *
  • Descriptors in each bound descriptor set, specified via {@link #vkCmdBindDescriptorSets CmdBindDescriptorSets}, must be valid if they are statically used by the currently bound {@code VkPipeline} object, specified via {@link #vkCmdBindPipeline CmdBindPipeline}
  • *
  • All vertex input bindings accessed via vertex input variables declared in the vertex shader entry point’s interface must have valid buffers bound
  • *
  • For a given vertex buffer binding, any attribute data fetched must be entirely contained within the corresponding vertex buffer binding, as described in the “Vertex Input Description” section
  • *
  • A valid graphics pipeline must be bound to the current command buffer with {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}
  • *
  • If the {@code VkPipeline} object currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} requires any dynamic state, that state must have been set on the current command buffer
  • *
  • Every input attachment used by the current subpass must be bound to the pipeline via a descriptor set
  • *
  • If any {@code VkSampler} object that is accessed from a shader by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} uses unnormalized coordinates, it must not be used to sample from any {@code VkImage} with a {@code VkImageView} of the type {@link #VK_IMAGE_VIEW_TYPE_3D IMAGE_VIEW_TYPE_3D}, {@link #VK_IMAGE_VIEW_TYPE_CUBE IMAGE_VIEW_TYPE_CUBE}, {@link #VK_IMAGE_VIEW_TYPE_1D_ARRAY IMAGE_VIEW_TYPE_1D_ARRAY}, {@link #VK_IMAGE_VIEW_TYPE_2D_ARRAY IMAGE_VIEW_TYPE_2D_ARRAY} or {@link #VK_IMAGE_VIEW_TYPE_CUBE_ARRAY IMAGE_VIEW_TYPE_CUBE_ARRAY}, in any shader stage
  • *
  • If any {@code VkSampler} object that is accessed from a shader by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} uses unnormalized coordinates, it must not be used with any of the SPIR-V {@code OpImageSample*} or {@code OpImageSparseSample*} instructions with {@code ImplicitLod}, {@code Dref} or {@code Proj} in their name, in any shader stage
  • *
  • If any {@code VkSampler} object that is accessed from a shader by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} uses unnormalized coordinates, it must not be used with any of the SPIR-V {@code OpImageSample*} or {@code OpImageSparseSample*} instructions that includes a LOD bias or any offset values, in any shader stage
  • *
  • If the robust buffer access feature is not enabled, and any shader stage in the {@code VkPipeline} object currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} accesses a uniform buffer, it must not access values outside of the range of that buffer specified in the currently bound descriptor set
  • *
  • If the robust buffer access feature is not enabled, and any shader stage in the {@code VkPipeline} object currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} accesses a storage buffer, it must not access values outside of the range of that buffer specified in the currently bound descriptor set
  • *
  • Any {@code VkImageView} being sampled with {@link #VK_FILTER_LINEAR FILTER_LINEAR} as a result of this command must be of a format which supports linear filtering, as specified by the {@link #VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT} flag in {@link VkFormatProperties}{@code ::linearTilingFeatures} (for a linear image) or {@link VkFormatProperties}{@code ::optimalTilingFeatures}(for an optimally tiled image) returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • Image subresources used as attachments in the current render pass must not be accessed in any way other than as an attachment by this command.
  • *
  • Any {@code VkImageView} being sampled with {@link IMGFilterCubic#VK_FILTER_CUBIC_IMG FILTER_CUBIC_IMG} as a result of this command must be of a format which supports cubic filtering, as specified by the {@link IMGFilterCubic#VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG} flag in {@link VkFormatProperties}{@code ::linearTilingFeatures} (for a linear image) or {@link VkFormatProperties}{@code ::optimalTilingFeatures}(for an optimally tiled image) returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • Any {@code VkImageView} being sampled with {@link IMGFilterCubic#VK_FILTER_CUBIC_IMG FILTER_CUBIC_IMG} as a result of this command must not have a {@code VkImageViewType} of {@link #VK_IMAGE_VIEW_TYPE_3D IMAGE_VIEW_TYPE_3D}, {@link #VK_IMAGE_VIEW_TYPE_CUBE IMAGE_VIEW_TYPE_CUBE}, or {@link #VK_IMAGE_VIEW_TYPE_CUBE_ARRAY IMAGE_VIEW_TYPE_CUBE_ARRAY}
  • *
  • If the draw is recorded in a render pass instance with multiview enabled, the maximum instance index must be less than or equal to {@link VkPhysicalDeviceMultiviewPropertiesKHX}{@code ::maxMultiviewInstanceIndex}.
  • *
  • If the currently bound graphics pipeline was created with {@link VkPipelineSampleLocationsStateCreateInfoEXT}{@code ::sampleLocationsEnable} set to {@link #VK_TRUE TRUE} and the current subpass has a depth/stencil attachment, then that attachment must have been created with the {@link EXTSampleLocations#VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT} bit set
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
  • This command must only be called inside of a render pass instance
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryInsideGraphicsGraphics
* * @param commandBuffer the command buffer into which the command is recorded. * @param vertexCount the number of vertices to draw. * @param instanceCount the number of instances to draw. * @param firstVertex the index of the first vertex to draw. * @param firstInstance the instance ID of the first instance to draw. */ public static void vkCmdDraw(VkCommandBuffer commandBuffer, @NativeType("uint32_t") int vertexCount, @NativeType("uint32_t") int instanceCount, @NativeType("uint32_t") int firstVertex, @NativeType("uint32_t") int firstInstance) { long __functionAddress = commandBuffer.getCapabilities().vkCmdDraw; callPV(__functionAddress, commandBuffer.address(), vertexCount, instanceCount, firstVertex, firstInstance); } // --- [ vkCmdDrawIndexed ] --- /** * Issue an indexed draw into a command buffer. * *
C Specification
* *

To record an indexed draw, call:

* *
     * void vkCmdDrawIndexed(
     *     VkCommandBuffer                             commandBuffer,
     *     uint32_t                                    indexCount,
     *     uint32_t                                    instanceCount,
     *     uint32_t                                    firstIndex,
     *     int32_t                                     vertexOffset,
     *     uint32_t                                    firstInstance);
* *
Description
* *

When the command is executed, primitives are assembled using the current primitive topology and {@code indexCount} vertices whose indices are retrieved from the index buffer. The index buffer is treated as an array of tightly packed unsigned integers of size defined by the {@link #vkCmdBindIndexBuffer CmdBindIndexBuffer}{@code ::indexType} parameter with which the buffer was bound.

* *

The first vertex index is at an offset of {@code firstIndex} * {@code indexSize} + {@code offset} within the currently bound index buffer, where {@code offset} is the offset specified by {@link #vkCmdBindIndexBuffer CmdBindIndexBuffer} and {@code indexSize} is the byte size of the type specified by {@code indexType}. Subsequent index values are retrieved from consecutive locations in the index buffer. Indices are first compared to the primitive restart value, then zero extended to 32 bits (if the {@code indexType} is {@link #VK_INDEX_TYPE_UINT16 INDEX_TYPE_UINT16}) and have {@code vertexOffset} added to them, before being supplied as the {@code vertexIndex} value.

* *

The primitives are drawn {@code instanceCount} times with {@code instanceIndex} starting with {@code firstInstance} and increasing sequentially for each instance. The assembled primitives execute the currently bound graphics pipeline.

* *
Valid Usage
* *
    *
  • The current render pass must be compatible with the {@code renderPass} member of the {@link VkGraphicsPipelineCreateInfo} structure specified when creating the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}.
  • *
  • The subpass index of the current render pass must be equal to the {@code subpass} member of the {@link VkGraphicsPipelineCreateInfo} structure specified when creating the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}.
  • *
  • For each set n that is statically used by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}, a descriptor set must have been bound to n at {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}, with a {@code VkPipelineLayout} that is compatible for set n, with the {@code VkPipelineLayout} used to create the current {@code VkPipeline}, as described in the “Pipeline Layout Compatibility” section
  • *
  • For each push constant that is statically used by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}, a push constant value must have been set for {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}, with a {@code VkPipelineLayout} that is compatible for push constants, with the {@code VkPipelineLayout} used to create the current {@code VkPipeline}, as described in the “Pipeline Layout Compatibility” section
  • *
  • Descriptors in each bound descriptor set, specified via {@link #vkCmdBindDescriptorSets CmdBindDescriptorSets}, must be valid if they are statically used by the currently bound {@code VkPipeline} object, specified via {@link #vkCmdBindPipeline CmdBindPipeline}
  • *
  • All vertex input bindings accessed via vertex input variables declared in the vertex shader entry point’s interface must have valid buffers bound
  • *
  • For a given vertex buffer binding, any attribute data fetched must be entirely contained within the corresponding vertex buffer binding, as described in the “Vertex Input Description” section
  • *
  • A valid graphics pipeline must be bound to the current command buffer with {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}
  • *
  • If the {@code VkPipeline} object currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} requires any dynamic state, that state must have been set on the current command buffer
  • *
  • (indexSize * (firstIndex + indexCount) + offset) must be less than or equal to the size of the currently bound index buffer, with indexSize being based on the type specified by {@code indexType}, where the index buffer, {@code indexType}, and {@code offset} are specified via {@link #vkCmdBindIndexBuffer CmdBindIndexBuffer}
  • *
  • Every input attachment used by the current subpass must be bound to the pipeline via a descriptor set
  • *
  • If any {@code VkSampler} object that is accessed from a shader by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} uses unnormalized coordinates, it must not be used to sample from any {@code VkImage} with a {@code VkImageView} of the type {@link #VK_IMAGE_VIEW_TYPE_3D IMAGE_VIEW_TYPE_3D}, {@link #VK_IMAGE_VIEW_TYPE_CUBE IMAGE_VIEW_TYPE_CUBE}, {@link #VK_IMAGE_VIEW_TYPE_1D_ARRAY IMAGE_VIEW_TYPE_1D_ARRAY}, {@link #VK_IMAGE_VIEW_TYPE_2D_ARRAY IMAGE_VIEW_TYPE_2D_ARRAY} or {@link #VK_IMAGE_VIEW_TYPE_CUBE_ARRAY IMAGE_VIEW_TYPE_CUBE_ARRAY}, in any shader stage
  • *
  • If any {@code VkSampler} object that is accessed from a shader by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} uses unnormalized coordinates, it must not be used with any of the SPIR-V {@code OpImageSample*} or {@code OpImageSparseSample*} instructions with {@code ImplicitLod}, {@code Dref} or {@code Proj} in their name, in any shader stage
  • *
  • If any {@code VkSampler} object that is accessed from a shader by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} uses unnormalized coordinates, it must not be used with any of the SPIR-V {@code OpImageSample*} or {@code OpImageSparseSample*} instructions that includes a LOD bias or any offset values, in any shader stage
  • *
  • If the robust buffer access feature is not enabled, and any shader stage in the {@code VkPipeline} object currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} accesses a uniform buffer, it must not access values outside of the range of that buffer specified in the currently bound descriptor set
  • *
  • If the robust buffer access feature is not enabled, and any shader stage in the {@code VkPipeline} object currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} accesses a storage buffer, it must not access values outside of the range of that buffer specified in the currently bound descriptor set
  • *
  • Any {@code VkImageView} being sampled with {@link #VK_FILTER_LINEAR FILTER_LINEAR} as a result of this command must be of a format which supports linear filtering, as specified by the {@link #VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT} flag in {@link VkFormatProperties}{@code ::linearTilingFeatures} (for a linear image) or {@link VkFormatProperties}{@code ::optimalTilingFeatures}(for an optimally tiled image) returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • Image subresources used as attachments in the current render pass must not be accessed in any way other than as an attachment by this command.
  • *
  • Any {@code VkImageView} being sampled with {@link IMGFilterCubic#VK_FILTER_CUBIC_IMG FILTER_CUBIC_IMG} as a result of this command must be of a format which supports cubic filtering, as specified by the {@link IMGFilterCubic#VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG} flag in {@link VkFormatProperties}{@code ::linearTilingFeatures} (for a linear image) or {@link VkFormatProperties}{@code ::optimalTilingFeatures}(for an optimally tiled image) returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • Any {@code VkImageView} being sampled with {@link IMGFilterCubic#VK_FILTER_CUBIC_IMG FILTER_CUBIC_IMG} as a result of this command must not have a {@code VkImageViewType} of {@link #VK_IMAGE_VIEW_TYPE_3D IMAGE_VIEW_TYPE_3D}, {@link #VK_IMAGE_VIEW_TYPE_CUBE IMAGE_VIEW_TYPE_CUBE}, or {@link #VK_IMAGE_VIEW_TYPE_CUBE_ARRAY IMAGE_VIEW_TYPE_CUBE_ARRAY}
  • *
  • If the draw is recorded in a render pass instance with multiview enabled, the maximum instance index must be less than or equal to {@link VkPhysicalDeviceMultiviewPropertiesKHX}{@code ::maxMultiviewInstanceIndex}.
  • *
  • If the currently bound graphics pipeline was created with {@link VkPipelineSampleLocationsStateCreateInfoEXT}{@code ::sampleLocationsEnable} set to {@link #VK_TRUE TRUE} and the current subpass has a depth/stencil attachment, then that attachment must have been created with the {@link EXTSampleLocations#VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT} bit set
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
  • This command must only be called inside of a render pass instance
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryInsideGraphicsGraphics
* * @param commandBuffer the command buffer into which the command is recorded. * @param indexCount the number of vertices to draw. * @param instanceCount the number of instances to draw. * @param firstIndex the base index within the index buffer. * @param vertexOffset the value added to the vertex index before indexing into the vertex buffer. * @param firstInstance the instance ID of the first instance to draw. */ public static void vkCmdDrawIndexed(VkCommandBuffer commandBuffer, @NativeType("uint32_t") int indexCount, @NativeType("uint32_t") int instanceCount, @NativeType("uint32_t") int firstIndex, @NativeType("int32_t") int vertexOffset, @NativeType("uint32_t") int firstInstance) { long __functionAddress = commandBuffer.getCapabilities().vkCmdDrawIndexed; callPV(__functionAddress, commandBuffer.address(), indexCount, instanceCount, firstIndex, vertexOffset, firstInstance); } // --- [ vkCmdDrawIndirect ] --- /** * Issue an indirect draw into a command buffer. * *
C Specification
* *

To record a non-indexed indirect draw, call:

* *
     * void vkCmdDrawIndirect(
     *     VkCommandBuffer                             commandBuffer,
     *     VkBuffer                                    buffer,
     *     VkDeviceSize                                offset,
     *     uint32_t                                    drawCount,
     *     uint32_t                                    stride);
* *
Description
* *

{@link #vkCmdDrawIndirect CmdDrawIndirect} behaves similarly to {@link #vkCmdDraw CmdDraw} except that the parameters are read by the device from a buffer during execution. {@code drawCount} draws are executed by the command, with parameters taken from {@code buffer} starting at {@code offset} and increasing by {@code stride} bytes for each successive draw. The parameters of each draw are encoded in an array of {@link VkDrawIndirectCommand} structures. If {@code drawCount} is less than or equal to one, {@code stride} is ignored.

* *
Valid Usage
* *
    *
  • If {@code buffer} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code buffer} must have been created with the {@link #VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT BUFFER_USAGE_INDIRECT_BUFFER_BIT} bit set
  • *
  • {@code offset} must be a multiple of 4
  • *
  • If {@code drawCount} is greater than 1, {@code stride} must be a multiple of 4 and must be greater than or equal to {@code sizeof}({@link VkDrawIndirectCommand})
  • *
  • If the multi-draw indirect feature is not enabled, {@code drawCount} must be 0 or 1
  • *
  • If the drawIndirectFirstInstance feature is not enabled, all the {@code firstInstance} members of the {@link VkDrawIndirectCommand} structures accessed by this command must be 0
  • *
  • The current render pass must be compatible with the {@code renderPass} member of the {@link VkGraphicsPipelineCreateInfo} structure specified when creating the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}.
  • *
  • The subpass index of the current render pass must be equal to the {@code subpass} member of the {@link VkGraphicsPipelineCreateInfo} structure specified when creating the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}.
  • *
  • For each set n that is statically used by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}, a descriptor set must have been bound to n at {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}, with a {@code VkPipelineLayout} that is compatible for set n, with the {@code VkPipelineLayout} used to create the current {@code VkPipeline}, as described in the “Pipeline Layout Compatibility” section
  • *
  • For each push constant that is statically used by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}, a push constant value must have been set for {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}, with a {@code VkPipelineLayout} that is compatible for push constants, with the {@code VkPipelineLayout} used to create the current {@code VkPipeline}, as described in the “Pipeline Layout Compatibility” section
  • *
  • Descriptors in each bound descriptor set, specified via {@link #vkCmdBindDescriptorSets CmdBindDescriptorSets}, must be valid if they are statically used by the currently bound {@code VkPipeline} object, specified via {@link #vkCmdBindPipeline CmdBindPipeline}
  • *
  • All vertex input bindings accessed via vertex input variables declared in the vertex shader entry point’s interface must have valid buffers bound
  • *
  • A valid graphics pipeline must be bound to the current command buffer with {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}
  • *
  • If the {@code VkPipeline} object currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} requires any dynamic state, that state must have been set on the current command buffer
  • *
  • If {@code drawCount} is equal to 1, (offset sizeof({@link VkDrawIndirectCommand})) must be less than or equal to the size of {@code buffer}
  • *
  • If {@code drawCount} is greater than 1, (stride × (drawCount - 1) + offset sizeof({@link VkDrawIndirectCommand})) must be less than or equal to the size of {@code buffer}
  • *
  • {@code drawCount} must be less than or equal to {@link VkPhysicalDeviceLimits}{@code ::maxDrawIndirectCount}
  • *
  • Every input attachment used by the current subpass must be bound to the pipeline via a descriptor set
  • *
  • If any {@code VkSampler} object that is accessed from a shader by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} uses unnormalized coordinates, it must not be used to sample from any {@code VkImage} with a {@code VkImageView} of the type {@link #VK_IMAGE_VIEW_TYPE_3D IMAGE_VIEW_TYPE_3D}, {@link #VK_IMAGE_VIEW_TYPE_CUBE IMAGE_VIEW_TYPE_CUBE}, {@link #VK_IMAGE_VIEW_TYPE_1D_ARRAY IMAGE_VIEW_TYPE_1D_ARRAY}, {@link #VK_IMAGE_VIEW_TYPE_2D_ARRAY IMAGE_VIEW_TYPE_2D_ARRAY} or {@link #VK_IMAGE_VIEW_TYPE_CUBE_ARRAY IMAGE_VIEW_TYPE_CUBE_ARRAY}, in any shader stage
  • *
  • If any {@code VkSampler} object that is accessed from a shader by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} uses unnormalized coordinates, it must not be used with any of the SPIR-V {@code OpImageSample*} or {@code OpImageSparseSample*} instructions with {@code ImplicitLod}, {@code Dref} or {@code Proj} in their name, in any shader stage
  • *
  • If any {@code VkSampler} object that is accessed from a shader by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} uses unnormalized coordinates, it must not be used with any of the SPIR-V {@code OpImageSample*} or {@code OpImageSparseSample*} instructions that includes a LOD bias or any offset values, in any shader stage
  • *
  • If the robust buffer access feature is not enabled, and any shader stage in the {@code VkPipeline} object currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} accesses a uniform buffer, it must not access values outside of the range of that buffer specified in the currently bound descriptor set
  • *
  • If the robust buffer access feature is not enabled, and any shader stage in the {@code VkPipeline} object currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} accesses a storage buffer, it must not access values outside of the range of that buffer specified in the currently bound descriptor set
  • *
  • Any {@code VkImageView} being sampled with {@link #VK_FILTER_LINEAR FILTER_LINEAR} as a result of this command must be of a format which supports linear filtering, as specified by the {@link #VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT} flag in {@link VkFormatProperties}{@code ::linearTilingFeatures} (for a linear image) or {@link VkFormatProperties}{@code ::optimalTilingFeatures}(for an optimally tiled image) returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • Image subresources used as attachments in the current render pass must not be accessed in any way other than as an attachment by this command.
  • *
  • Any {@code VkImageView} being sampled with {@link IMGFilterCubic#VK_FILTER_CUBIC_IMG FILTER_CUBIC_IMG} as a result of this command must be of a format which supports cubic filtering, as specified by the {@link IMGFilterCubic#VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG} flag in {@link VkFormatProperties}{@code ::linearTilingFeatures} (for a linear image) or {@link VkFormatProperties}{@code ::optimalTilingFeatures}(for an optimally tiled image) returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • Any {@code VkImageView} being sampled with {@link IMGFilterCubic#VK_FILTER_CUBIC_IMG FILTER_CUBIC_IMG} as a result of this command must not have a {@code VkImageViewType} of {@link #VK_IMAGE_VIEW_TYPE_3D IMAGE_VIEW_TYPE_3D}, {@link #VK_IMAGE_VIEW_TYPE_CUBE IMAGE_VIEW_TYPE_CUBE}, or {@link #VK_IMAGE_VIEW_TYPE_CUBE_ARRAY IMAGE_VIEW_TYPE_CUBE_ARRAY}
  • *
  • If the draw is recorded in a render pass instance with multiview enabled, the maximum instance index must be less than or equal to {@link VkPhysicalDeviceMultiviewPropertiesKHX}{@code ::maxMultiviewInstanceIndex}.
  • *
  • If the currently bound graphics pipeline was created with {@link VkPipelineSampleLocationsStateCreateInfoEXT}{@code ::sampleLocationsEnable} set to {@link #VK_TRUE TRUE} and the current subpass has a depth/stencil attachment, then that attachment must have been created with the {@link EXTSampleLocations#VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT} bit set
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code buffer} must be a valid {@code VkBuffer} handle
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
  • This command must only be called inside of a render pass instance
  • *
  • Both of {@code buffer}, and {@code commandBuffer} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryInsideGraphicsGraphics
* * @param commandBuffer the command buffer into which the command is recorded. * @param buffer the buffer containing draw parameters. * @param offset the byte offset into {@code buffer} where parameters begin. * @param drawCount the number of draws to execute, and can be zero. * @param stride the byte stride between successive sets of draw parameters. */ public static void vkCmdDrawIndirect(VkCommandBuffer commandBuffer, @NativeType("VkBuffer") long buffer, @NativeType("VkDeviceSize") long offset, @NativeType("uint32_t") int drawCount, @NativeType("uint32_t") int stride) { long __functionAddress = commandBuffer.getCapabilities().vkCmdDrawIndirect; callPJJV(__functionAddress, commandBuffer.address(), buffer, offset, drawCount, stride); } // --- [ vkCmdDrawIndexedIndirect ] --- /** * Perform an indexed indirect draw. * *
C Specification
* *

To record an indexed indirect draw, call:

* *
     * void vkCmdDrawIndexedIndirect(
     *     VkCommandBuffer                             commandBuffer,
     *     VkBuffer                                    buffer,
     *     VkDeviceSize                                offset,
     *     uint32_t                                    drawCount,
     *     uint32_t                                    stride);
* *
Description
* *

{@link #vkCmdDrawIndexedIndirect CmdDrawIndexedIndirect} behaves similarly to {@link #vkCmdDrawIndexed CmdDrawIndexed} except that the parameters are read by the device from a buffer during execution. {@code drawCount} draws are executed by the command, with parameters taken from {@code buffer} starting at {@code offset} and increasing by {@code stride} bytes for each successive draw. The parameters of each draw are encoded in an array of {@link VkDrawIndexedIndirectCommand} structures. If {@code drawCount} is less than or equal to one, {@code stride} is ignored.

* *
Valid Usage
* *
    *
  • If {@code buffer} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code buffer} must have been created with the {@link #VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT BUFFER_USAGE_INDIRECT_BUFFER_BIT} bit set
  • *
  • {@code offset} must be a multiple of 4
  • *
  • If {@code drawCount} is greater than 1, {@code stride} must be a multiple of 4 and must be greater than or equal to {@code sizeof}({@link VkDrawIndexedIndirectCommand})
  • *
  • If the multi-draw indirect feature is not enabled, {@code drawCount} must be 0 or 1
  • *
  • If the drawIndirectFirstInstance feature is not enabled, all the {@code firstInstance} members of the {@link VkDrawIndexedIndirectCommand} structures accessed by this command must be 0
  • *
  • The current render pass must be compatible with the {@code renderPass} member of the {@link VkGraphicsPipelineCreateInfo} structure specified when creating the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}.
  • *
  • The subpass index of the current render pass must be equal to the {@code subpass} member of the {@link VkGraphicsPipelineCreateInfo} structure specified when creating the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}.
  • *
  • For each set n that is statically used by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}, a descriptor set must have been bound to n at {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}, with a {@code VkPipelineLayout} that is compatible for set n, with the {@code VkPipelineLayout} used to create the current {@code VkPipeline}, as described in the “Pipeline Layout Compatibility” section
  • *
  • For each push constant that is statically used by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}, a push constant value must have been set for {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}, with a {@code VkPipelineLayout} that is compatible for push constants, with the {@code VkPipelineLayout} used to create the current {@code VkPipeline}, as described in the “Pipeline Layout Compatibility” section
  • *
  • Descriptors in each bound descriptor set, specified via {@link #vkCmdBindDescriptorSets CmdBindDescriptorSets}, must be valid if they are statically used by the currently bound {@code VkPipeline} object, specified via {@link #vkCmdBindPipeline CmdBindPipeline}
  • *
  • All vertex input bindings accessed via vertex input variables declared in the vertex shader entry point’s interface must have valid buffers bound
  • *
  • A valid graphics pipeline must be bound to the current command buffer with {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS}
  • *
  • If the {@code VkPipeline} object currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} requires any dynamic state, that state must have been set on the current command buffer
  • *
  • If {@code drawCount} is equal to 1, (offset sizeof({@link VkDrawIndexedIndirectCommand})) must be less than or equal to the size of {@code buffer}
  • *
  • If {@code drawCount} is greater than 1, (stride × (drawCount - 1) + offset sizeof({@link VkDrawIndexedIndirectCommand})) must be less than or equal to the size of {@code buffer}
  • *
  • {@code drawCount} must be less than or equal to {@link VkPhysicalDeviceLimits}{@code ::maxDrawIndirectCount}
  • *
  • Every input attachment used by the current subpass must be bound to the pipeline via a descriptor set
  • *
  • If any {@code VkSampler} object that is accessed from a shader by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} uses unnormalized coordinates, it must not be used to sample from any {@code VkImage} with a {@code VkImageView} of the type {@link #VK_IMAGE_VIEW_TYPE_3D IMAGE_VIEW_TYPE_3D}, {@link #VK_IMAGE_VIEW_TYPE_CUBE IMAGE_VIEW_TYPE_CUBE}, {@link #VK_IMAGE_VIEW_TYPE_1D_ARRAY IMAGE_VIEW_TYPE_1D_ARRAY}, {@link #VK_IMAGE_VIEW_TYPE_2D_ARRAY IMAGE_VIEW_TYPE_2D_ARRAY} or {@link #VK_IMAGE_VIEW_TYPE_CUBE_ARRAY IMAGE_VIEW_TYPE_CUBE_ARRAY}, in any shader stage
  • *
  • If any {@code VkSampler} object that is accessed from a shader by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} uses unnormalized coordinates, it must not be used with any of the SPIR-V {@code OpImageSample*} or {@code OpImageSparseSample*} instructions with {@code ImplicitLod}, {@code Dref} or {@code Proj} in their name, in any shader stage
  • *
  • If any {@code VkSampler} object that is accessed from a shader by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} uses unnormalized coordinates, it must not be used with any of the SPIR-V {@code OpImageSample*} or {@code OpImageSparseSample*} instructions that includes a LOD bias or any offset values, in any shader stage
  • *
  • If the robust buffer access feature is not enabled, and any shader stage in the {@code VkPipeline} object currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} accesses a uniform buffer, it must not access values outside of the range of that buffer specified in the currently bound descriptor set
  • *
  • If the robust buffer access feature is not enabled, and any shader stage in the {@code VkPipeline} object currently bound to {@link #VK_PIPELINE_BIND_POINT_GRAPHICS PIPELINE_BIND_POINT_GRAPHICS} accesses a storage buffer, it must not access values outside of the range of that buffer specified in the currently bound descriptor set
  • *
  • Any {@code VkImageView} being sampled with {@link #VK_FILTER_LINEAR FILTER_LINEAR} as a result of this command must be of a format which supports linear filtering, as specified by the {@link #VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT} flag in {@link VkFormatProperties}{@code ::linearTilingFeatures} (for a linear image) or {@link VkFormatProperties}{@code ::optimalTilingFeatures}(for an optimally tiled image) returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • Image subresources used as attachments in the current render pass must not be accessed in any way other than as an attachment by this command.
  • *
  • Any {@code VkImageView} being sampled with {@link IMGFilterCubic#VK_FILTER_CUBIC_IMG FILTER_CUBIC_IMG} as a result of this command must be of a format which supports cubic filtering, as specified by the {@link IMGFilterCubic#VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG} flag in {@link VkFormatProperties}{@code ::linearTilingFeatures} (for a linear image) or {@link VkFormatProperties}{@code ::optimalTilingFeatures}(for an optimally tiled image) returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • Any {@code VkImageView} being sampled with {@link IMGFilterCubic#VK_FILTER_CUBIC_IMG FILTER_CUBIC_IMG} as a result of this command must not have a {@code VkImageViewType} of {@link #VK_IMAGE_VIEW_TYPE_3D IMAGE_VIEW_TYPE_3D}, {@link #VK_IMAGE_VIEW_TYPE_CUBE IMAGE_VIEW_TYPE_CUBE}, or {@link #VK_IMAGE_VIEW_TYPE_CUBE_ARRAY IMAGE_VIEW_TYPE_CUBE_ARRAY}
  • *
  • If the draw is recorded in a render pass instance with multiview enabled, the maximum instance index must be less than or equal to {@link VkPhysicalDeviceMultiviewPropertiesKHX}{@code ::maxMultiviewInstanceIndex}.
  • *
  • If the currently bound graphics pipeline was created with {@link VkPipelineSampleLocationsStateCreateInfoEXT}{@code ::sampleLocationsEnable} set to {@link #VK_TRUE TRUE} and the current subpass has a depth/stencil attachment, then that attachment must have been created with the {@link EXTSampleLocations#VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT} bit set
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code buffer} must be a valid {@code VkBuffer} handle
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
  • This command must only be called inside of a render pass instance
  • *
  • Both of {@code buffer}, and {@code commandBuffer} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryInsideGraphicsGraphics
* * @param commandBuffer the command buffer into which the command is recorded. * @param buffer the buffer containing draw parameters. * @param offset the byte offset into {@code buffer} where parameters begin. * @param drawCount the number of draws to execute, and can be zero. * @param stride the byte stride between successive sets of draw parameters. */ public static void vkCmdDrawIndexedIndirect(VkCommandBuffer commandBuffer, @NativeType("VkBuffer") long buffer, @NativeType("VkDeviceSize") long offset, @NativeType("uint32_t") int drawCount, @NativeType("uint32_t") int stride) { long __functionAddress = commandBuffer.getCapabilities().vkCmdDrawIndexedIndirect; callPJJV(__functionAddress, commandBuffer.address(), buffer, offset, drawCount, stride); } // --- [ vkCmdDispatch ] --- /** * Dispatch compute work items. * *
C Specification
* *

To record a dispatch, call:

* *
     * void vkCmdDispatch(
     *     VkCommandBuffer                             commandBuffer,
     *     uint32_t                                    groupCountX,
     *     uint32_t                                    groupCountY,
     *     uint32_t                                    groupCountZ);
* *
Description
* *

When the command is executed, a global workgroup consisting of groupCountX × groupCountY × groupCountZ local workgroups is assembled.

* *
Valid Usage
* *
    *
  • {@code groupCountX} must be less than or equal to {@link VkPhysicalDeviceLimits}{@code ::maxComputeWorkGroupCount}[0]
  • *
  • {@code groupCountY} must be less than or equal to {@link VkPhysicalDeviceLimits}{@code ::maxComputeWorkGroupCount}[1]
  • *
  • {@code groupCountZ} must be less than or equal to {@link VkPhysicalDeviceLimits}{@code ::maxComputeWorkGroupCount}[2]
  • *
  • For each set n that is statically used by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE}, a descriptor set must have been bound to n at {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE}, with a {@code VkPipelineLayout} that is compatible for set n, with the {@code VkPipelineLayout} used to create the current {@code VkPipeline}, as described in the “Pipeline Layout Compatibility” section
  • *
  • Descriptors in each bound descriptor set, specified via {@link #vkCmdBindDescriptorSets CmdBindDescriptorSets}, must be valid if they are statically used by the currently bound {@code VkPipeline} object, specified via {@link #vkCmdBindPipeline CmdBindPipeline}
  • *
  • A valid compute pipeline must be bound to the current command buffer with {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE}
  • *
  • For each push constant that is statically used by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE}, a push constant value must have been set for {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE}, with a {@code VkPipelineLayout} that is compatible for push constants with the one used to create the current {@code VkPipeline}, as described in the “Pipeline Layout Compatibility” section
  • *
  • If any {@code VkSampler} object that is accessed from a shader by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE} uses unnormalized coordinates, it must not be used to sample from any {@code VkImage} with a {@code VkImageView} of the type {@link #VK_IMAGE_VIEW_TYPE_3D IMAGE_VIEW_TYPE_3D}, {@link #VK_IMAGE_VIEW_TYPE_CUBE IMAGE_VIEW_TYPE_CUBE}, {@link #VK_IMAGE_VIEW_TYPE_1D_ARRAY IMAGE_VIEW_TYPE_1D_ARRAY}, {@link #VK_IMAGE_VIEW_TYPE_2D_ARRAY IMAGE_VIEW_TYPE_2D_ARRAY} or {@link #VK_IMAGE_VIEW_TYPE_CUBE_ARRAY IMAGE_VIEW_TYPE_CUBE_ARRAY}, in any shader stage
  • *
  • If any {@code VkSampler} object that is accessed from a shader by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE} uses unnormalized coordinates, it must not be used with any of the SPIR-V {@code OpImageSample*} or {@code OpImageSparseSample*} instructions with {@code ImplicitLod}, {@code Dref} or {@code Proj} in their name, in any shader stage
  • *
  • If any {@code VkSampler} object that is accessed from a shader by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE} uses unnormalized coordinates, it must not be used with any of the SPIR-V {@code OpImageSample*} or {@code OpImageSparseSample*} instructions that includes a LOD bias or any offset values, in any shader stage
  • *
  • If the robust buffer access feature is not enabled, and any shader stage in the {@code VkPipeline} object currently bound to {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE} accesses a uniform buffer, it must not access values outside of the range of that buffer specified in the currently bound descriptor set
  • *
  • If the robust buffer access feature is not enabled, and any shader stage in the {@code VkPipeline} object currently bound to {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE} accesses a storage buffer, it must not access values outside of the range of that buffer specified in the currently bound descriptor set
  • *
  • Any {@code VkImageView} being sampled with {@link #VK_FILTER_LINEAR FILTER_LINEAR} as a result of this command must be of a format which supports linear filtering, as specified by the {@link #VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT} flag in {@link VkFormatProperties}{@code ::linearTilingFeatures} (for a linear image) or {@link VkFormatProperties}{@code ::optimalTilingFeatures}(for an optimally tiled image) returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • Any {@code VkImageView} being sampled with {@link IMGFilterCubic#VK_FILTER_CUBIC_IMG FILTER_CUBIC_IMG} as a result of this command must be of a format which supports cubic filtering, as specified by the {@link IMGFilterCubic#VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG} flag in {@link VkFormatProperties}{@code ::linearTilingFeatures} (for a linear image) or {@link VkFormatProperties}{@code ::optimalTilingFeatures}(for an optimally tiled image) returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • Any {@code VkImageView} being sampled with {@link IMGFilterCubic#VK_FILTER_CUBIC_IMG FILTER_CUBIC_IMG} as a result of this command must not have a {@code VkImageViewType} of {@link #VK_IMAGE_VIEW_TYPE_3D IMAGE_VIEW_TYPE_3D}, {@link #VK_IMAGE_VIEW_TYPE_CUBE IMAGE_VIEW_TYPE_CUBE}, or {@link #VK_IMAGE_VIEW_TYPE_CUBE_ARRAY IMAGE_VIEW_TYPE_CUBE_ARRAY}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideComputeCompute
* * @param commandBuffer the command buffer into which the command will be recorded. * @param groupCountX the number of local workgroups to dispatch in the X dimension. * @param groupCountY the number of local workgroups to dispatch in the Y dimension. * @param groupCountZ the number of local workgroups to dispatch in the Z dimension. */ public static void vkCmdDispatch(VkCommandBuffer commandBuffer, @NativeType("uint32_t") int groupCountX, @NativeType("uint32_t") int groupCountY, @NativeType("uint32_t") int groupCountZ) { long __functionAddress = commandBuffer.getCapabilities().vkCmdDispatch; callPV(__functionAddress, commandBuffer.address(), groupCountX, groupCountY, groupCountZ); } // --- [ vkCmdDispatchIndirect ] --- /** * Dispatch compute work items using indirect parameters. * *
C Specification
* *

To record an indirect command dispatch, call:

* *
     * void vkCmdDispatchIndirect(
     *     VkCommandBuffer                             commandBuffer,
     *     VkBuffer                                    buffer,
     *     VkDeviceSize                                offset);
* *
Description
* *

{@link #vkCmdDispatchIndirect CmdDispatchIndirect} behaves similarly to {@link #vkCmdDispatch CmdDispatch} except that the parameters are read by the device from a buffer during execution. The parameters of the dispatch are encoded in a {@link VkDispatchIndirectCommand} structure taken from {@code buffer} starting at {@code offset}.

* *
Valid Usage
* *
    *
  • If {@code buffer} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • For each set n that is statically used by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE}, a descriptor set must have been bound to n at {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE}, with a {@code VkPipelineLayout} that is compatible for set n, with the {@code VkPipelineLayout} used to create the current {@code VkPipeline}, as described in the “Pipeline Layout Compatibility” section
  • *
  • Descriptors in each bound descriptor set, specified via {@link #vkCmdBindDescriptorSets CmdBindDescriptorSets}, must be valid if they are statically used by the currently bound {@code VkPipeline} object, specified via {@link #vkCmdBindPipeline CmdBindPipeline}
  • *
  • A valid compute pipeline must be bound to the current command buffer with {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE}
  • *
  • {@code buffer} must have been created with the {@link #VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT BUFFER_USAGE_INDIRECT_BUFFER_BIT} bit set
  • *
  • {@code offset} must be a multiple of 4
  • *
  • The sum of {@code offset} and the size of {@link VkDispatchIndirectCommand} must be less than or equal to the size of {@code buffer}
  • *
  • For each push constant that is statically used by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE}, a push constant value must have been set for {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE}, with a {@code VkPipelineLayout} that is compatible for push constants with the one used to create the current {@code VkPipeline}, as described in the “Pipeline Layout Compatibility” section
  • *
  • If any {@code VkSampler} object that is accessed from a shader by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE} uses unnormalized coordinates, it must not be used to sample from any {@code VkImage} with a {@code VkImageView} of the type {@link #VK_IMAGE_VIEW_TYPE_3D IMAGE_VIEW_TYPE_3D}, {@link #VK_IMAGE_VIEW_TYPE_CUBE IMAGE_VIEW_TYPE_CUBE}, {@link #VK_IMAGE_VIEW_TYPE_1D_ARRAY IMAGE_VIEW_TYPE_1D_ARRAY}, {@link #VK_IMAGE_VIEW_TYPE_2D_ARRAY IMAGE_VIEW_TYPE_2D_ARRAY} or {@link #VK_IMAGE_VIEW_TYPE_CUBE_ARRAY IMAGE_VIEW_TYPE_CUBE_ARRAY}, in any shader stage
  • *
  • If any {@code VkSampler} object that is accessed from a shader by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE} uses unnormalized coordinates, it must not be used with any of the SPIR-V {@code OpImageSample*} or {@code OpImageSparseSample*} instructions with {@code ImplicitLod}, {@code Dref} or {@code Proj} in their name, in any shader stage
  • *
  • If any {@code VkSampler} object that is accessed from a shader by the {@code VkPipeline} currently bound to {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE} uses unnormalized coordinates, it must not be used with any of the SPIR-V {@code OpImageSample*} or {@code OpImageSparseSample*} instructions that includes a LOD bias or any offset values, in any shader stage
  • *
  • If the robust buffer access feature is not enabled, and any shader stage in the {@code VkPipeline} object currently bound to {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE} accesses a uniform buffer, it must not access values outside of the range of that buffer specified in the currently bound descriptor set
  • *
  • If the robust buffer access feature is not enabled, and any shader stage in the {@code VkPipeline} object currently bound to {@link #VK_PIPELINE_BIND_POINT_COMPUTE PIPELINE_BIND_POINT_COMPUTE} accesses a storage buffer, it must not access values outside of the range of that buffer specified in the currently bound descriptor set
  • *
  • Any {@code VkImageView} being sampled with {@link #VK_FILTER_LINEAR FILTER_LINEAR} as a result of this command must be of a format which supports linear filtering, as specified by the {@link #VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT} flag in {@link VkFormatProperties}{@code ::linearTilingFeatures} (for a linear image) or {@link VkFormatProperties}{@code ::optimalTilingFeatures}(for an optimally tiled image) returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • Any {@code VkImageView} being sampled with {@link IMGFilterCubic#VK_FILTER_CUBIC_IMG FILTER_CUBIC_IMG} as a result of this command must be of a format which supports cubic filtering, as specified by the {@link IMGFilterCubic#VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG} flag in {@link VkFormatProperties}{@code ::linearTilingFeatures} (for a linear image) or {@link VkFormatProperties}{@code ::optimalTilingFeatures}(for an optimally tiled image) returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • Any {@code VkImageView} being sampled with {@link IMGFilterCubic#VK_FILTER_CUBIC_IMG FILTER_CUBIC_IMG} as a result of this command must not have a {@code VkImageViewType} of {@link #VK_IMAGE_VIEW_TYPE_3D IMAGE_VIEW_TYPE_3D}, {@link #VK_IMAGE_VIEW_TYPE_CUBE IMAGE_VIEW_TYPE_CUBE}, or {@link #VK_IMAGE_VIEW_TYPE_CUBE_ARRAY IMAGE_VIEW_TYPE_CUBE_ARRAY}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code buffer} must be a valid {@code VkBuffer} handle
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • Both of {@code buffer}, and {@code commandBuffer} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideComputeCompute
* * @param commandBuffer the command buffer into which the command will be recorded. * @param buffer the buffer containing dispatch parameters. * @param offset the byte offset into {@code buffer} where parameters begin. */ public static void vkCmdDispatchIndirect(VkCommandBuffer commandBuffer, @NativeType("VkBuffer") long buffer, @NativeType("VkDeviceSize") long offset) { long __functionAddress = commandBuffer.getCapabilities().vkCmdDispatchIndirect; callPJJV(__functionAddress, commandBuffer.address(), buffer, offset); } // --- [ vkCmdCopyBuffer ] --- /** * Unsafe version of: {@link #vkCmdCopyBuffer CmdCopyBuffer} * * @param regionCount the number of regions to copy. */ public static void nvkCmdCopyBuffer(VkCommandBuffer commandBuffer, long srcBuffer, long dstBuffer, int regionCount, long pRegions) { long __functionAddress = commandBuffer.getCapabilities().vkCmdCopyBuffer; callPJJPV(__functionAddress, commandBuffer.address(), srcBuffer, dstBuffer, regionCount, pRegions); } /** * Copy data between buffer regions. * *
C Specification
* *

To copy data between buffer objects, call:

* *
     * void vkCmdCopyBuffer(
     *     VkCommandBuffer                             commandBuffer,
     *     VkBuffer                                    srcBuffer,
     *     VkBuffer                                    dstBuffer,
     *     uint32_t                                    regionCount,
     *     const VkBufferCopy*                         pRegions);
* *
Description
* *

Each region in {@code pRegions} is copied from the source buffer to the same region of the destination buffer. {@code srcBuffer} and {@code dstBuffer} can be the same buffer or alias the same memory, but the result is undefined if the copy regions overlap in memory.

* *
Valid Usage
* *
    *
  • The {@code size} member of each element of {@code pRegions} must be greater than 0
  • *
  • The {@code srcOffset} member of each element of {@code pRegions} must be less than the size of {@code srcBuffer}
  • *
  • The {@code dstOffset} member of each element of {@code pRegions} must be less than the size of {@code dstBuffer}
  • *
  • The {@code size} member of each element of {@code pRegions} must be less than or equal to the size of {@code srcBuffer} minus {@code srcOffset}
  • *
  • The {@code size} member of each element of {@code pRegions} must be less than or equal to the size of {@code dstBuffer} minus {@code dstOffset}
  • *
  • The union of the source regions, and the union of the destination regions, specified by the elements of {@code pRegions}, must not overlap in memory
  • *
  • {@code srcBuffer} must have been created with {@link #VK_BUFFER_USAGE_TRANSFER_SRC_BIT BUFFER_USAGE_TRANSFER_SRC_BIT} usage flag
  • *
  • If {@code srcBuffer} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code dstBuffer} must have been created with {@link #VK_BUFFER_USAGE_TRANSFER_DST_BIT BUFFER_USAGE_TRANSFER_DST_BIT} usage flag
  • *
  • If {@code dstBuffer} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code srcBuffer} must be a valid {@code VkBuffer} handle
  • *
  • {@code dstBuffer} must be a valid {@code VkBuffer} handle
  • *
  • {@code pRegions} must be a valid pointer to an array of {@code regionCount} {@link VkBufferCopy} structures
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support transfer, graphics, or compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • {@code regionCount} must be greater than 0
  • *
  • Each of {@code commandBuffer}, {@code dstBuffer}, and {@code srcBuffer} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideTransfer Graphics ComputeTransfer
* *
See Also
* *

{@link VkBufferCopy}

* * @param commandBuffer the command buffer into which the command will be recorded. * @param srcBuffer the source buffer. * @param dstBuffer the destination buffer. * @param pRegions a pointer to an array of {@link VkBufferCopy} structures specifying the regions to copy. */ public static void vkCmdCopyBuffer(VkCommandBuffer commandBuffer, @NativeType("VkBuffer") long srcBuffer, @NativeType("VkBuffer") long dstBuffer, @NativeType("const VkBufferCopy *") VkBufferCopy.Buffer pRegions) { nvkCmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, pRegions.remaining(), pRegions.address()); } // --- [ vkCmdCopyImage ] --- /** * Unsafe version of: {@link #vkCmdCopyImage CmdCopyImage} * * @param regionCount the number of regions to copy. */ public static void nvkCmdCopyImage(VkCommandBuffer commandBuffer, long srcImage, int srcImageLayout, long dstImage, int dstImageLayout, int regionCount, long pRegions) { long __functionAddress = commandBuffer.getCapabilities().vkCmdCopyImage; callPJJPV(__functionAddress, commandBuffer.address(), srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions); } /** * Copy data between images. * *
C Specification
* *

To copy data between image objects, call:

* *
     * void vkCmdCopyImage(
     *     VkCommandBuffer                             commandBuffer,
     *     VkImage                                     srcImage,
     *     VkImageLayout                               srcImageLayout,
     *     VkImage                                     dstImage,
     *     VkImageLayout                               dstImageLayout,
     *     uint32_t                                    regionCount,
     *     const VkImageCopy*                          pRegions);
* *
Description
* *

Each region in {@code pRegions} is copied from the source image to the same region of the destination image. {@code srcImage} and {@code dstImage} can be the same image or alias the same memory.

* *

The formats of {@code srcImage} and {@code dstImage} must be compatible. Formats are considered compatible if their element size is the same between both formats. For example, {@link #VK_FORMAT_R8G8B8A8_UNORM FORMAT_R8G8B8A8_UNORM} is compatible with {@link #VK_FORMAT_R32_UINT FORMAT_R32_UINT} because both texels are 4 bytes in size. Depth/stencil formats must match exactly.

* *

If the format of {@code srcImage} or {@code dstImage} is a multi-planar image format, regions of each plane to be copied must be specified separately using the {@code srcSubresource} and {@code dstSubresource} members of the {@link VkImageCopy} structure. In this case, the {@code aspectMask} of the {@code srcSubresource} or {@code dstSubresource} that refers to the multi-planar image must be {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR}, {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR}, or {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_2_BIT_KHR IMAGE_ASPECT_PLANE_2_BIT_KHR}. For the purposes of {@link #vkCmdCopyImage CmdCopyImage}, each plane of a multi-planar image is treated as having the format listed in the “Compatible formats of planes of multi-planar formats” section for the plane identified by the {@code aspectMask} of the corresponding subresource. This applies both to {@code VkFormat} and to coordinates used in the copy, which correspond to texels in the plane rather than how these texels map to coordinates in the image as a whole.

* *
Note
* *

For example, the {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR} plane of a {@link KHRSamplerYcbcrConversion#VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR} image is compatible with an image of format {@link #VK_FORMAT_R8G8_UNORM FORMAT_R8G8_UNORM} and (less usefully) with the {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR} plane of an image of format {@link KHRSamplerYcbcrConversion#VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR}, as each texel is 2 bytes in size.

*
* *

{@link #vkCmdCopyImage CmdCopyImage} allows copying between size-compatible compressed and uncompressed internal formats. Formats are size-compatible if the element size of the uncompressed format is equal to the element size (compressed texel block size) of the compressed format. Such a copy does not perform on-the-fly compression or decompression. When copying from an uncompressed format to a compressed format, each texel of uncompressed data of the source image is copied as a raw value to the corresponding compressed texel block of the destination image. When copying from a compressed format to an uncompressed format, each compressed texel block of the source image is copied as a raw value to the corresponding texel of uncompressed data in the destination image. Thus, for example, it is legal to copy between a 128-bit uncompressed format and a compressed format which has a 128-bit sized compressed texel block representing 4{times}4 texels (using 8 bits per texel), or between a 64-bit uncompressed format and a compressed format which has a 64-bit sized compressed texel block representing 4{times}4 texels (using 4 bits per texel).

* *

When copying between compressed and uncompressed formats the {@code extent} members represent the texel dimensions of the source image and not the destination. When copying from a compressed image to an uncompressed image the image texel dimensions written to the uncompressed image will be source extent divided by the compressed texel block dimensions. When copying from an uncompressed image to a compressed image the image texel dimensions written to the compressed image will be the source extent multiplied by the compressed texel block dimensions. In both cases the number of bytes read and the number of bytes written will be identical.

* *

Copying to or from block-compressed images is typically done in multiples of the compressed texel block size. For this reason the {@code extent} must be a multiple of the compressed texel block dimension. There is one exception to this rule which is required: to handle compressed images created with dimensions that are not a multiple of the compressed texel block dimensions: if the {@code srcImage} is compressed, then:

* *
    *
  • If {@code extent.width} is not a multiple of the compressed texel block width, then (extent.width + srcOffset.x) must equal the image subresource width.
  • *
  • If {@code extent.height} is not a multiple of the compressed texel block height, then (extent.height + srcOffset.y) must equal the image subresource height.
  • *
  • If {@code extent.depth} is not a multiple of the compressed texel block depth, then (extent.depth + srcOffset.z) must equal the image subresource depth.
  • *
* *

Similarly, if the {@code dstImage} is compressed, then:

* *
    *
  • If {@code extent.width} is not a multiple of the compressed texel block width, then (extent.width + dstOffset.x) must equal the image subresource width.
  • *
  • If {@code extent.height} is not a multiple of the compressed texel block height, then (extent.height + dstOffset.y) must equal the image subresource height.
  • *
  • If {@code extent.depth} is not a multiple of the compressed texel block depth, then (extent.depth + dstOffset.z) must equal the image subresource depth.
  • *
* *

This allows the last compressed texel block of the image in each non-multiple dimension to be included as a source or destination of the copy.

* *

"{@code _422}" image formats that are not multi-planar are treated as having a 2{times}1 compressed texel block for the purposes of these rules.

* *

{@link #vkCmdCopyImage CmdCopyImage} can be used to copy image data between multisample images, but both images must have the same number of samples.

* *
Valid Usage
* *
    *
  • The source region specified by each element of {@code pRegions} must be a region that is contained within {@code srcImage} if the {@code srcImage}’s {@code VkFormat} is not a multi-planar format, and must be a region that is contained within the plane being copied if the {@code srcImage}’s {@code VkFormat} is a multi-planar format
  • *
  • The destination region specified by each element of {@code pRegions} must be a region that is contained within {@code dstImage} if the {@code dstImage}’s {@code VkFormat} is not a multi-planar format, and must be a region that is contained within the plane being copied to if the {@code dstImage}’s {@code VkFormat} is a multi-planar format
  • *
  • The union of all source regions, and the union of all destination regions, specified by the elements of {@code pRegions}, must not overlap in memory
  • *
  • {@code srcImage} must use a format that supports {@link KHRMaintenance1#VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR}, which is indicated by {@link VkFormatProperties}{@code ::linearTilingFeatures} (for linearly tiled images) or {@link VkFormatProperties}{@code ::optimalTilingFeatures} (for optimally tiled images) - as returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • {@code srcImage} must have been created with {@link #VK_IMAGE_USAGE_TRANSFER_SRC_BIT IMAGE_USAGE_TRANSFER_SRC_BIT} usage flag
  • *
  • If {@code srcImage} is non-sparse then the image or disjoint plane to be copied must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code srcImageLayout} must specify the layout of the image subresources of {@code srcImage} specified in {@code pRegions} at the time this command is executed on a {@code VkDevice} {@code srcImageLayout} must be {@link #VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL}, {@link #VK_IMAGE_LAYOUT_GENERAL IMAGE_LAYOUT_GENERAL}, or {@link KHRSharedPresentableImage#VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR IMAGE_LAYOUT_SHARED_PRESENT_KHR}
  • *
  • {@code dstImage} must use a format that supports {@link KHRMaintenance1#VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR FORMAT_FEATURE_TRANSFER_DST_BIT_KHR}, which is indicated by {@link VkFormatProperties}{@code ::linearTilingFeatures} (for linearly tiled images) or {@link VkFormatProperties}{@code ::optimalTilingFeatures} (for optimally tiled images) - as returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • {@code dstImage} must have been created with {@link #VK_IMAGE_USAGE_TRANSFER_DST_BIT IMAGE_USAGE_TRANSFER_DST_BIT} usage flag
  • *
  • If {@code dstImage} is non-sparse then the image or disjoint plane that is the destination of the copy must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code dstImageLayout} must specify the layout of the image subresources of {@code dstImage} specified in {@code pRegions} at the time this command is executed on a {@code VkDevice}
  • *
  • {@code dstImageLayout} must be {@link #VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL}, {@link #VK_IMAGE_LAYOUT_GENERAL IMAGE_LAYOUT_GENERAL}, or {@link KHRSharedPresentableImage#VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR IMAGE_LAYOUT_SHARED_PRESENT_KHR}
  • *
  • If the {@code VkFormat} of each of {@code srcImage} and {@code dstImage} is not a multi-planar format, the {@code VkFormat} of each of {@code srcImage} and {@code dstImage} must be compatible, as defined below
  • *
  • In a copy to or from a plane of a multi-planar image, the {@code VkFormat} of the image and plane must be compatible according to the description of compatible planes for the plane being copied
  • *
  • When a copy is performed to or from an image with a multi-planar format, the {@code aspectMask} of the {@code srcSubresource} and/or {@code dstSubresource} that refers to the multi-planar image must be {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR}, {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR}, or {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_2_BIT_KHR IMAGE_ASPECT_PLANE_2_BIT_KHR} (with {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_2_BIT_KHR IMAGE_ASPECT_PLANE_2_BIT_KHR} valid only for a {@code VkFormat} with three planes)
  • *
  • The sample count of {@code srcImage} and {@code dstImage} must match
  • *
  • The {@code srcSubresource.mipLevel} member of each element of {@code pRegions} must be less than the {@code mipLevels} specified in {@link VkImageCreateInfo} when {@code srcImage} was created
  • *
  • The {@code dstSubresource.mipLevel} member of each element of {@code pRegions} must be less than the {@code mipLevels} specified in {@link VkImageCreateInfo} when {@code dstImage} was created
  • *
  • The srcSubresource.baseArrayLayer srcSubresource.layerCount of each element of {@code pRegions} must be less than or equal to the {@code arrayLayers} specified in {@link VkImageCreateInfo} when {@code srcImage} was created
  • *
  • The dstSubresource.baseArrayLayer dstSubresource.layerCount of each element of {@code pRegions} must be less than or equal to the {@code arrayLayers} specified in {@link VkImageCreateInfo} when {@code dstImage} was created
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code srcImage} must be a valid {@code VkImage} handle
  • *
  • {@code srcImageLayout} must be a valid {@code VkImageLayout} value
  • *
  • {@code dstImage} must be a valid {@code VkImage} handle
  • *
  • {@code dstImageLayout} must be a valid {@code VkImageLayout} value
  • *
  • {@code pRegions} must be a valid pointer to an array of {@code regionCount} valid {@link VkImageCopy} structures
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support transfer, graphics, or compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • {@code regionCount} must be greater than 0
  • *
  • Each of {@code commandBuffer}, {@code dstImage}, and {@code srcImage} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideTransfer Graphics ComputeTransfer
* *
See Also
* *

{@link VkImageCopy}

* * @param commandBuffer the command buffer into which the command will be recorded. * @param srcImage the source image. * @param srcImageLayout the current layout of the source image subresource. * @param dstImage the destination image. * @param dstImageLayout the current layout of the destination image subresource. * @param pRegions a pointer to an array of {@link VkImageCopy} structures specifying the regions to copy. */ public static void vkCmdCopyImage(VkCommandBuffer commandBuffer, @NativeType("VkImage") long srcImage, @NativeType("VkImageLayout") int srcImageLayout, @NativeType("VkImage") long dstImage, @NativeType("VkImageLayout") int dstImageLayout, @NativeType("const VkImageCopy *") VkImageCopy.Buffer pRegions) { nvkCmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, pRegions.remaining(), pRegions.address()); } // --- [ vkCmdBlitImage ] --- /** * Unsafe version of: {@link #vkCmdBlitImage CmdBlitImage} * * @param regionCount the number of regions to blit. */ public static void nvkCmdBlitImage(VkCommandBuffer commandBuffer, long srcImage, int srcImageLayout, long dstImage, int dstImageLayout, int regionCount, long pRegions, int filter) { long __functionAddress = commandBuffer.getCapabilities().vkCmdBlitImage; callPJJPV(__functionAddress, commandBuffer.address(), srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter); } /** * Copy regions of an image, potentially performing format conversion,. * *
C Specification
* *

To copy regions of a source image into a destination image, potentially performing format conversion, arbitrary scaling, and filtering, call:

* *
     * void vkCmdBlitImage(
     *     VkCommandBuffer                             commandBuffer,
     *     VkImage                                     srcImage,
     *     VkImageLayout                               srcImageLayout,
     *     VkImage                                     dstImage,
     *     VkImageLayout                               dstImageLayout,
     *     uint32_t                                    regionCount,
     *     const VkImageBlit*                          pRegions,
     *     VkFilter                                    filter);
* *
Description
* *

{@link #vkCmdBlitImage CmdBlitImage} must not be used for multisampled source or destination images. Use {@link #vkCmdResolveImage CmdResolveImage} for this purpose.

* *

As the sizes of the source and destination extents can differ in any dimension, texels in the source extent are scaled and filtered to the destination extent. Scaling occurs via the following operations:

* *
    *
  • For each destination texel, the integer coordinate of that texel is converted to an unnormalized texture coordinate, using the effective inverse of the equations described in unnormalized to integer conversion: * *
    *
    ubase = i + ½
    *
    vbase = j + ½
    *
    wbase = k + ½
    *
    *
  • *
  • These base coordinates are then offset by the first destination offset: * *
    *
    uoffset = ubase - xdst0
    *
    voffset = vbase - ydst0
    *
    woffset = wbase - zdst0
    *
    aoffset = a - baseArrayCountdst
    *
    *
  • *
  • The scale is determined from the source and destination regions, and applied to the offset coordinates: * *
    *
    scale_u = (xsrc1 - xsrc0) / (xdst1 - xdst0)
    *
    scale_v = (ysrc1 - ysrc0) / (ydst1 - ydst0)
    *
    scale_w = (zsrc1 - zsrc0) / (zdst1 - zdst0)
    *
    uscaled = uoffset * scaleu
    *
    vscaled = voffset * scalev
    *
    wscaled = woffset * scalew
    *
    *
  • *
  • Finally the source offset is added to the scaled coordinates, to determine the final unnormalized coordinates used to sample from {@code srcImage}: * *
    *
    u = uscaled + xsrc0
    *
    v = vscaled + ysrc0
    *
    w = wscaled + zsrc0
    *
    q = mipLevel
    *
    a = aoffset + baseArrayCountsrc
    *
    *
  • *
* *

These coordinates are used to sample from the source image, as described in Image Operations chapter, with the filter mode equal to that of {@code filter}, a mipmap mode of {@link #VK_SAMPLER_MIPMAP_MODE_NEAREST SAMPLER_MIPMAP_MODE_NEAREST} and an address mode of {@link #VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE}. Implementations must clamp at the edge of the source image, and may additionally clamp to the edge of the source region.

* *
Note
* *

Due to allowable rounding errors in the generation of the source texture coordinates, it is not always possible to guarantee exactly which source texels will be sampled for a given blit. As rounding errors are implementation dependent, the exact results of a blitting operation are also implementation dependent.

*
* *

Blits are done layer by layer starting with the {@code baseArrayLayer} member of {@code srcSubresource} for the source and {@code dstSubresource} for the destination. {@code layerCount} layers are blitted to the destination image.

* *

3D textures are blitted slice by slice. Slices in the source region bounded by {@code srcOffsets}[0].{@code z} and {@code srcOffsets}[1].{@code z} are copied to slices in the destination region bounded by {@code dstOffsets}[0].{@code z} and {@code dstOffsets}[1].{@code z}. For each destination slice, a source z coordinate is linearly interpolated between {@code srcOffsets}[0].{@code z} and {@code srcOffsets}[1].{@code z}. If the {@code filter} parameter is {@link #VK_FILTER_LINEAR FILTER_LINEAR} then the value sampled from the source image is taken by doing linear filtering using the interpolated z coordinate. If {@code filter} parameter is {@link #VK_FILTER_NEAREST FILTER_NEAREST} then value sampled from the source image is taken from the single nearest slice (with undefined rounding mode).

* *

The following filtering and conversion rules apply:

* *
    *
  • Integer formats can only be converted to other integer formats with the same signedness.
  • *
  • No format conversion is supported between depth/stencil images. The formats must match.
  • *
  • Format conversions on unorm, snorm, unscaled and packed float formats of the copied aspect of the image are performed by first converting the pixels to float values.
  • *
  • For sRGB source formats, nonlinear RGB values are converted to linear representation prior to filtering.
  • *
  • After filtering, the float values are first clamped and then cast to the destination image format. In case of sRGB destination format, linear RGB values are converted to nonlinear representation before writing the pixel to the image.
  • *
* *

Signed and unsigned integers are converted by first clamping to the representable range of the destination format, then casting the value.

* *
Valid Usage
* *
    *
  • The source region specified by each element of {@code pRegions} must be a region that is contained within {@code srcImage}
  • *
  • The destination region specified by each element of {@code pRegions} must be a region that is contained within {@code dstImage}
  • *
  • The union of all destination regions, specified by the elements of {@code pRegions}, must not overlap in memory with any texel that may be sampled during the blit operation
  • *
  • {@code srcImage} must use a format that supports {@link #VK_FORMAT_FEATURE_BLIT_SRC_BIT FORMAT_FEATURE_BLIT_SRC_BIT}, which is indicated by {@link VkFormatProperties}{@code ::linearTilingFeatures} (for linearly tiled images) or {@link VkFormatProperties}{@code ::optimalTilingFeatures} (for optimally tiled images) - as returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • {@code srcImage} must not use a format listed in the “Formats requiring sampler Y'CBCR conversion for {@link #VK_IMAGE_ASPECT_COLOR_BIT IMAGE_ASPECT_COLOR_BIT} image views” table
  • *
  • {@code srcImage} must have been created with {@link #VK_IMAGE_USAGE_TRANSFER_SRC_BIT IMAGE_USAGE_TRANSFER_SRC_BIT} usage flag
  • *
  • If {@code srcImage} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code srcImageLayout} must specify the layout of the image subresources of {@code srcImage} specified in {@code pRegions} at the time this command is executed on a {@code VkDevice}
  • *
  • {@code srcImageLayout} must be {@link KHRSharedPresentableImage#VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR IMAGE_LAYOUT_SHARED_PRESENT_KHR}, {@link #VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL} or {@link #VK_IMAGE_LAYOUT_GENERAL IMAGE_LAYOUT_GENERAL}
  • *
  • {@code dstImage} must use a format that supports {@link #VK_FORMAT_FEATURE_BLIT_DST_BIT FORMAT_FEATURE_BLIT_DST_BIT}, which is indicated by {@link VkFormatProperties}{@code ::linearTilingFeatures} (for linearly tiled images) or {@link VkFormatProperties}{@code ::optimalTilingFeatures} (for optimally tiled images) - as returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • {@code dstImage} must not use a format listed in the “Formats requiring sampler Y'CBCR conversion for {@link #VK_IMAGE_ASPECT_COLOR_BIT IMAGE_ASPECT_COLOR_BIT} image views” table
  • *
  • {@code dstImage} must have been created with {@link #VK_IMAGE_USAGE_TRANSFER_DST_BIT IMAGE_USAGE_TRANSFER_DST_BIT} usage flag
  • *
  • If {@code dstImage} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code dstImageLayout} must specify the layout of the image subresources of {@code dstImage} specified in {@code pRegions} at the time this command is executed on a {@code VkDevice}
  • *
  • {@code dstImageLayout} must be {@link KHRSharedPresentableImage#VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR IMAGE_LAYOUT_SHARED_PRESENT_KHR}, {@link #VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL} or {@link #VK_IMAGE_LAYOUT_GENERAL IMAGE_LAYOUT_GENERAL}
  • *
  • The sample count of {@code srcImage} and {@code dstImage} must both be equal to {@link #VK_SAMPLE_COUNT_1_BIT SAMPLE_COUNT_1_BIT}
  • *
  • If either of {@code srcImage} or {@code dstImage} was created with a signed integer {@code VkFormat}, the other must also have been created with a signed integer {@code VkFormat}
  • *
  • If either of {@code srcImage} or {@code dstImage} was created with an unsigned integer {@code VkFormat}, the other must also have been created with an unsigned integer {@code VkFormat}
  • *
  • If either of {@code srcImage} or {@code dstImage} was created with a depth/stencil format, the other must have exactly the same format
  • *
  • If {@code srcImage} was created with a depth/stencil format, {@code filter} must be {@link #VK_FILTER_NEAREST FILTER_NEAREST}
  • *
  • {@code srcImage} must have been created with a {@code samples} value of {@link #VK_SAMPLE_COUNT_1_BIT SAMPLE_COUNT_1_BIT}
  • *
  • {@code dstImage} must have been created with a {@code samples} value of {@link #VK_SAMPLE_COUNT_1_BIT SAMPLE_COUNT_1_BIT}
  • *
  • If {@code filter} is {@link #VK_FILTER_LINEAR FILTER_LINEAR}, {@code srcImage} must be of a format which supports linear filtering, as specified by the {@link #VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT} flag in {@link VkFormatProperties}{@code ::linearTilingFeatures} (for a linear image) or {@link VkFormatProperties}{@code ::optimalTilingFeatures}(for an optimally tiled image) returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • If {@code filter} is {@link IMGFilterCubic#VK_FILTER_CUBIC_IMG FILTER_CUBIC_IMG}, {@code srcImage} must be of a format which supports cubic filtering, as specified by the {@link IMGFilterCubic#VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG} flag in {@link VkFormatProperties}{@code ::linearTilingFeatures} (for a linear image) or {@link VkFormatProperties}{@code ::optimalTilingFeatures}(for an optimally tiled image) returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • If {@code filter} is {@link IMGFilterCubic#VK_FILTER_CUBIC_IMG FILTER_CUBIC_IMG}, {@code srcImage} must have a {@code VkImageType} of {@link #VK_IMAGE_TYPE_3D IMAGE_TYPE_3D}
  • *
  • The {@code srcSubresource.mipLevel} member of each element of {@code pRegions} must be less than the {@code mipLevels} specified in {@link VkImageCreateInfo} when {@code srcImage} was created
  • *
  • The {@code dstSubresource.mipLevel} member of each element of {@code pRegions} must be less than the {@code mipLevels} specified in {@link VkImageCreateInfo} when {@code dstImage} was created
  • *
  • The srcSubresource.baseArrayLayer srcSubresource.layerCount of each element of {@code pRegions} must be less than or equal to the {@code arrayLayers} specified in {@link VkImageCreateInfo} when {@code srcImage} was created
  • *
  • The dstSubresource.baseArrayLayer dstSubresource.layerCount of each element of {@code pRegions} must be less than or equal to the {@code arrayLayers} specified in {@link VkImageCreateInfo} when {@code dstImage} was created
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code srcImage} must be a valid {@code VkImage} handle
  • *
  • {@code srcImageLayout} must be a valid {@code VkImageLayout} value
  • *
  • {@code dstImage} must be a valid {@code VkImage} handle
  • *
  • {@code dstImageLayout} must be a valid {@code VkImageLayout} value
  • *
  • {@code pRegions} must be a valid pointer to an array of {@code regionCount} valid {@link VkImageBlit} structures
  • *
  • {@code filter} must be a valid {@code VkFilter} value
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • {@code regionCount} must be greater than 0
  • *
  • Each of {@code commandBuffer}, {@code dstImage}, and {@code srcImage} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideGraphicsTransfer
* *
See Also
* *

{@link VkImageBlit}

* * @param commandBuffer the command buffer into which the command will be recorded. * @param srcImage the source image. * @param srcImageLayout the layout of the source image subresources for the blit. * @param dstImage the destination image. * @param dstImageLayout the layout of the destination image subresources for the blit. * @param pRegions a pointer to an array of {@link VkImageBlit} structures specifying the regions to blit. * @param filter a {@code VkFilter} specifying the filter to apply if the blits require scaling. */ public static void vkCmdBlitImage(VkCommandBuffer commandBuffer, @NativeType("VkImage") long srcImage, @NativeType("VkImageLayout") int srcImageLayout, @NativeType("VkImage") long dstImage, @NativeType("VkImageLayout") int dstImageLayout, @NativeType("const VkImageBlit *") VkImageBlit.Buffer pRegions, @NativeType("VkFilter") int filter) { nvkCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, pRegions.remaining(), pRegions.address(), filter); } // --- [ vkCmdCopyBufferToImage ] --- /** * Unsafe version of: {@link #vkCmdCopyBufferToImage CmdCopyBufferToImage} * * @param regionCount the number of regions to copy. */ public static void nvkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, long srcBuffer, long dstImage, int dstImageLayout, int regionCount, long pRegions) { long __functionAddress = commandBuffer.getCapabilities().vkCmdCopyBufferToImage; callPJJPV(__functionAddress, commandBuffer.address(), srcBuffer, dstImage, dstImageLayout, regionCount, pRegions); } /** * Copy data from a buffer into an image. * *
C Specification
* *

To copy data from a buffer object to an image object, call:

* *
     * void vkCmdCopyBufferToImage(
     *     VkCommandBuffer                             commandBuffer,
     *     VkBuffer                                    srcBuffer,
     *     VkImage                                     dstImage,
     *     VkImageLayout                               dstImageLayout,
     *     uint32_t                                    regionCount,
     *     const VkBufferImageCopy*                    pRegions);
* *
Description
* *

Each region in {@code pRegions} is copied from the specified region of the source buffer to the specified region of the destination image.

* *

If the format of {@code dstImage} is a multi-planar image format), regions of each plane to be a target of a copy must be specified separately using the {@code pRegions} member of the {@link VkBufferImageCopy} structure. In this case, the {@code aspectMask} of {@code imageSubresource} must be {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR}, {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR}, or {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_2_BIT_KHR IMAGE_ASPECT_PLANE_2_BIT_KHR}. For the purposes of {@link #vkCmdCopyBufferToImage CmdCopyBufferToImage}, each plane of a multi-planar image is treated as having the format listed in the “Compatible formats of planes of multi-planar formats” section for the plane identified by the {@code aspectMask} of the corresponding subresource. This applies both to {@code VkFormat} and to coordinates used in the copy, which correspond to texels in the plane rather than how these texels map to coordinates in the image as a whole.

* *
Valid Usage
* *
    *
  • The buffer region specified by each element of {@code pRegions} must be a region that is contained within {@code srcBuffer}
  • *
  • The image region specified by each element of {@code pRegions} must be a region that is contained within {@code dstImage} if the {@code dstImage}’s {@code VkFormat} is not a multi-planar format, and must be a region that is contained within the plane being copied to if the {@code dstImage}’s {@code VkFormat} is a multi-planar format
  • *
  • The union of all source regions, and the union of all destination regions, specified by the elements of {@code pRegions}, must not overlap in memory
  • *
  • {@code srcBuffer} must have been created with {@link #VK_BUFFER_USAGE_TRANSFER_SRC_BIT BUFFER_USAGE_TRANSFER_SRC_BIT} usage flag
  • *
  • {@code dstImage} must use a format that supports {@link KHRMaintenance1#VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR FORMAT_FEATURE_TRANSFER_DST_BIT_KHR}, which is indicated by {@link VkFormatProperties}{@code ::linearTilingFeatures} (for linearly tiled images) or {@link VkFormatProperties}{@code ::optimalTilingFeatures} (for optimally tiled images) - as returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • If {@code srcBuffer} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code dstImage} must have been created with {@link #VK_IMAGE_USAGE_TRANSFER_DST_BIT IMAGE_USAGE_TRANSFER_DST_BIT} usage flag
  • *
  • If {@code dstImage} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code dstImage} must have a sample count equal to {@link #VK_SAMPLE_COUNT_1_BIT SAMPLE_COUNT_1_BIT}
  • *
  • {@code dstImageLayout} must specify the layout of the image subresources of {@code dstImage} specified in {@code pRegions} at the time this command is executed on a {@code VkDevice}
  • *
  • {@code dstImageLayout} must be {@link #VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL}, {@link #VK_IMAGE_LAYOUT_GENERAL IMAGE_LAYOUT_GENERAL}, or {@link KHRSharedPresentableImage#VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR IMAGE_LAYOUT_SHARED_PRESENT_KHR}
  • *
  • The {@code imageSubresource.mipLevel} member of each element of {@code pRegions} must be less than the {@code mipLevels} specified in {@link VkImageCreateInfo} when {@code dstImage} was created
  • *
  • The imageSubresource.baseArrayLayer imageSubresource.layerCount of each element of {@code pRegions} must be less than or equal to the {@code arrayLayers} specified in {@link VkImageCreateInfo} when {@code dstImage} was created
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code srcBuffer} must be a valid {@code VkBuffer} handle
  • *
  • {@code dstImage} must be a valid {@code VkImage} handle
  • *
  • {@code dstImageLayout} must be a valid {@code VkImageLayout} value
  • *
  • {@code pRegions} must be a valid pointer to an array of {@code regionCount} valid {@link VkBufferImageCopy} structures
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support transfer, graphics, or compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • {@code regionCount} must be greater than 0
  • *
  • Each of {@code commandBuffer}, {@code dstImage}, and {@code srcBuffer} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideTransfer Graphics ComputeTransfer
* *
See Also
* *

{@link VkBufferImageCopy}

* * @param commandBuffer the command buffer into which the command will be recorded. * @param srcBuffer the source buffer. * @param dstImage the destination image. * @param dstImageLayout the layout of the destination image subresources for the copy. * @param pRegions a pointer to an array of {@link VkBufferImageCopy} structures specifying the regions to copy. */ public static void vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer, @NativeType("VkBuffer") long srcBuffer, @NativeType("VkImage") long dstImage, @NativeType("VkImageLayout") int dstImageLayout, @NativeType("const VkBufferImageCopy *") VkBufferImageCopy.Buffer pRegions) { nvkCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, pRegions.remaining(), pRegions.address()); } // --- [ vkCmdCopyImageToBuffer ] --- /** * Unsafe version of: {@link #vkCmdCopyImageToBuffer CmdCopyImageToBuffer} * * @param regionCount the number of regions to copy. */ public static void nvkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, long srcImage, int srcImageLayout, long dstBuffer, int regionCount, long pRegions) { long __functionAddress = commandBuffer.getCapabilities().vkCmdCopyImageToBuffer; callPJJPV(__functionAddress, commandBuffer.address(), srcImage, srcImageLayout, dstBuffer, regionCount, pRegions); } /** * Copy image data into a buffer. * *
C Specification
* *

To copy data from an image object to a buffer object, call:

* *
     * void vkCmdCopyImageToBuffer(
     *     VkCommandBuffer                             commandBuffer,
     *     VkImage                                     srcImage,
     *     VkImageLayout                               srcImageLayout,
     *     VkBuffer                                    dstBuffer,
     *     uint32_t                                    regionCount,
     *     const VkBufferImageCopy*                    pRegions);
* *
Description
* *

Each region in {@code pRegions} is copied from the specified region of the source image to the specified region of the destination buffer.

* *

If the {@code VkFormat} of {@code srcImage} is a multi-planar image format, regions of each plane to be a source of a copy must be specified separately using the {@code pRegions} member of the {@link VkBufferImageCopy} structure. In this case, the {@code aspectMask} of {@code imageSubresource} must be {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_0_BIT_KHR IMAGE_ASPECT_PLANE_0_BIT_KHR}, {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_1_BIT_KHR IMAGE_ASPECT_PLANE_1_BIT_KHR}, or {@link KHRSamplerYcbcrConversion#VK_IMAGE_ASPECT_PLANE_2_BIT_KHR IMAGE_ASPECT_PLANE_2_BIT_KHR}. For the purposes of {@link #vkCmdCopyBufferToImage CmdCopyBufferToImage}, each plane of a multi-planar image is treated as having the format listed in the “Compatible formats of planes of multi-planar formats” section for the plane identified by the {@code aspectMask} of the corresponding subresource. This applies both to {@code VkFormat} and to coordinates used in the copy, which correspond to texels in the plane rather than how these texels map to coordinates in the image as a whole.

* *
Valid Usage
* *
    *
  • The image region specified by each element of {@code pRegions} must be a region that is contained within {@code srcImage} if the {@code srcImage}’s {@code VkFormat} is not a multi-planar format, and must be a region that is contained within the plane being copied if the {@code srcImage}’s {@code VkFormat} is a multi-planar format
  • *
  • The buffer region specified by each element of {@code pRegions} must be a region that is contained within {@code dstBuffer}
  • *
  • The union of all source regions, and the union of all destination regions, specified by the elements of {@code pRegions}, must not overlap in memory
  • *
  • {@code srcImage} must use a format that supports {@link KHRMaintenance1#VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR}, which is indicated by {@link VkFormatProperties}{@code ::linearTilingFeatures} (for linearly tiled images) or {@link VkFormatProperties}{@code ::optimalTilingFeatures} (for optimally tiled images) - as returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • {@code srcImage} must have been created with {@link #VK_IMAGE_USAGE_TRANSFER_SRC_BIT IMAGE_USAGE_TRANSFER_SRC_BIT} usage flag
  • *
  • If {@code srcImage} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code srcImage} must have a sample count equal to {@link #VK_SAMPLE_COUNT_1_BIT SAMPLE_COUNT_1_BIT}
  • *
  • {@code srcImageLayout} must specify the layout of the image subresources of {@code srcImage} specified in {@code pRegions} at the time this command is executed on a {@code VkDevice}
  • *
  • {@code srcImageLayout} must be {@link KHRSharedPresentableImage#VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR IMAGE_LAYOUT_SHARED_PRESENT_KHR}, {@link #VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL} or {@link #VK_IMAGE_LAYOUT_GENERAL IMAGE_LAYOUT_GENERAL}
  • *
  • {@code dstBuffer} must have been created with {@link #VK_BUFFER_USAGE_TRANSFER_DST_BIT BUFFER_USAGE_TRANSFER_DST_BIT} usage flag
  • *
  • If {@code dstBuffer} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • The {@code imageSubresource.mipLevel} member of each element of {@code pRegions} must be less than the {@code mipLevels} specified in {@link VkImageCreateInfo} when {@code srcImage} was created
  • *
  • The imageSubresource.baseArrayLayer imageSubresource.layerCount of each element of {@code pRegions} must be less than or equal to the {@code arrayLayers} specified in {@link VkImageCreateInfo} when {@code srcImage} was created
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code srcImage} must be a valid {@code VkImage} handle
  • *
  • {@code srcImageLayout} must be a valid {@code VkImageLayout} value
  • *
  • {@code dstBuffer} must be a valid {@code VkBuffer} handle
  • *
  • {@code pRegions} must be a valid pointer to an array of {@code regionCount} valid {@link VkBufferImageCopy} structures
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support transfer, graphics, or compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • {@code regionCount} must be greater than 0
  • *
  • Each of {@code commandBuffer}, {@code dstBuffer}, and {@code srcImage} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideTransfer Graphics ComputeTransfer
* *
See Also
* *

{@link VkBufferImageCopy}

* * @param commandBuffer the command buffer into which the command will be recorded. * @param srcImage the source image. * @param srcImageLayout the layout of the source image subresources for the copy. * @param dstBuffer the destination buffer. * @param pRegions a pointer to an array of {@link VkBufferImageCopy} structures specifying the regions to copy. */ public static void vkCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, @NativeType("VkImage") long srcImage, @NativeType("VkImageLayout") int srcImageLayout, @NativeType("VkBuffer") long dstBuffer, @NativeType("const VkBufferImageCopy *") VkBufferImageCopy.Buffer pRegions) { nvkCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, pRegions.remaining(), pRegions.address()); } // --- [ vkCmdUpdateBuffer ] --- /** * Unsafe version of: {@link #vkCmdUpdateBuffer CmdUpdateBuffer} * * @param dataSize the number of bytes to update, and must be a multiple of 4. */ public static void nvkCmdUpdateBuffer(VkCommandBuffer commandBuffer, long dstBuffer, long dstOffset, long dataSize, long pData) { long __functionAddress = commandBuffer.getCapabilities().vkCmdUpdateBuffer; callPJJJPV(__functionAddress, commandBuffer.address(), dstBuffer, dstOffset, dataSize, pData); } /** * Update a buffer's contents from host memory. * *
C Specification
* *

To update buffer data inline in a command buffer, call:

* *
     * void vkCmdUpdateBuffer(
     *     VkCommandBuffer                             commandBuffer,
     *     VkBuffer                                    dstBuffer,
     *     VkDeviceSize                                dstOffset,
     *     VkDeviceSize                                dataSize,
     *     const void*                                 pData);
* *
Description
* *

{@code dataSize} must be less than or equal to 65536 bytes. For larger updates, applications can use buffer to buffer copies.

* *
Note
* *

Buffer updates performed with {@link #vkCmdUpdateBuffer CmdUpdateBuffer} first copy the data into command buffer memory when the command is recorded (which requires additional storage and may incur an additional allocation), and then copy the data from the command buffer into {@code dstBuffer} when the command is executed on a device.

* *

The additional cost of this functionality compared to buffer to buffer copies means it is only recommended for very small amounts of data, and is why it is limited to only 65536 bytes.

* *

Applications can work around this by issuing multiple {@link #vkCmdUpdateBuffer CmdUpdateBuffer} commands to different ranges of the same buffer, but it is strongly recommended that they should not.

*
* *

The source data is copied from the user pointer to the command buffer when the command is called.

* *

{@link #vkCmdUpdateBuffer CmdUpdateBuffer} is only allowed outside of a render pass. This command is treated as "{@code transfer}" operation, for the purposes of synchronization barriers. The {@link #VK_BUFFER_USAGE_TRANSFER_DST_BIT BUFFER_USAGE_TRANSFER_DST_BIT} must be specified in {@code usage} of {@link VkBufferCreateInfo} in order for the buffer to be compatible with {@link #vkCmdUpdateBuffer CmdUpdateBuffer}.

* *
Valid Usage
* *
    *
  • {@code dstOffset} must be less than the size of {@code dstBuffer}
  • *
  • {@code dataSize} must be less than or equal to the size of {@code dstBuffer} minus {@code dstOffset}
  • *
  • {@code dstBuffer} must have been created with {@link #VK_BUFFER_USAGE_TRANSFER_DST_BIT BUFFER_USAGE_TRANSFER_DST_BIT} usage flag
  • *
  • If {@code dstBuffer} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code dstOffset} must be a multiple of 4
  • *
  • {@code dataSize} must be less than or equal to 65536
  • *
  • {@code dataSize} must be a multiple of 4
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code dstBuffer} must be a valid {@code VkBuffer} handle
  • *
  • {@code pData} must be a valid pointer to an array of {@code dataSize} bytes
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support transfer, graphics, or compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • {@code dataSize} must be greater than 0
  • *
  • Both of {@code commandBuffer}, and {@code dstBuffer} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideTransfer Graphics ComputeTransfer
* * @param commandBuffer the command buffer into which the command will be recorded. * @param dstBuffer a handle to the buffer to be updated. * @param dstOffset the byte offset into the buffer to start updating, and must be a multiple of 4. * @param pData a pointer to the source data for the buffer update, and must be at least {@code dataSize} bytes in size. */ public static void vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, @NativeType("VkBuffer") long dstBuffer, @NativeType("VkDeviceSize") long dstOffset, @NativeType("const void *") ByteBuffer pData) { nvkCmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, pData.remaining(), memAddress(pData)); } /** * Update a buffer's contents from host memory. * *
C Specification
* *

To update buffer data inline in a command buffer, call:

* *
     * void vkCmdUpdateBuffer(
     *     VkCommandBuffer                             commandBuffer,
     *     VkBuffer                                    dstBuffer,
     *     VkDeviceSize                                dstOffset,
     *     VkDeviceSize                                dataSize,
     *     const void*                                 pData);
* *
Description
* *

{@code dataSize} must be less than or equal to 65536 bytes. For larger updates, applications can use buffer to buffer copies.

* *
Note
* *

Buffer updates performed with {@link #vkCmdUpdateBuffer CmdUpdateBuffer} first copy the data into command buffer memory when the command is recorded (which requires additional storage and may incur an additional allocation), and then copy the data from the command buffer into {@code dstBuffer} when the command is executed on a device.

* *

The additional cost of this functionality compared to buffer to buffer copies means it is only recommended for very small amounts of data, and is why it is limited to only 65536 bytes.

* *

Applications can work around this by issuing multiple {@link #vkCmdUpdateBuffer CmdUpdateBuffer} commands to different ranges of the same buffer, but it is strongly recommended that they should not.

*
* *

The source data is copied from the user pointer to the command buffer when the command is called.

* *

{@link #vkCmdUpdateBuffer CmdUpdateBuffer} is only allowed outside of a render pass. This command is treated as "{@code transfer}" operation, for the purposes of synchronization barriers. The {@link #VK_BUFFER_USAGE_TRANSFER_DST_BIT BUFFER_USAGE_TRANSFER_DST_BIT} must be specified in {@code usage} of {@link VkBufferCreateInfo} in order for the buffer to be compatible with {@link #vkCmdUpdateBuffer CmdUpdateBuffer}.

* *
Valid Usage
* *
    *
  • {@code dstOffset} must be less than the size of {@code dstBuffer}
  • *
  • {@code dataSize} must be less than or equal to the size of {@code dstBuffer} minus {@code dstOffset}
  • *
  • {@code dstBuffer} must have been created with {@link #VK_BUFFER_USAGE_TRANSFER_DST_BIT BUFFER_USAGE_TRANSFER_DST_BIT} usage flag
  • *
  • If {@code dstBuffer} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code dstOffset} must be a multiple of 4
  • *
  • {@code dataSize} must be less than or equal to 65536
  • *
  • {@code dataSize} must be a multiple of 4
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code dstBuffer} must be a valid {@code VkBuffer} handle
  • *
  • {@code pData} must be a valid pointer to an array of {@code dataSize} bytes
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support transfer, graphics, or compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • {@code dataSize} must be greater than 0
  • *
  • Both of {@code commandBuffer}, and {@code dstBuffer} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideTransfer Graphics ComputeTransfer
* * @param commandBuffer the command buffer into which the command will be recorded. * @param dstBuffer a handle to the buffer to be updated. * @param dstOffset the byte offset into the buffer to start updating, and must be a multiple of 4. * @param pData a pointer to the source data for the buffer update, and must be at least {@code dataSize} bytes in size. */ public static void vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, @NativeType("VkBuffer") long dstBuffer, @NativeType("VkDeviceSize") long dstOffset, @NativeType("const void *") ShortBuffer pData) { nvkCmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, pData.remaining() << 1, memAddress(pData)); } /** * Update a buffer's contents from host memory. * *
C Specification
* *

To update buffer data inline in a command buffer, call:

* *
     * void vkCmdUpdateBuffer(
     *     VkCommandBuffer                             commandBuffer,
     *     VkBuffer                                    dstBuffer,
     *     VkDeviceSize                                dstOffset,
     *     VkDeviceSize                                dataSize,
     *     const void*                                 pData);
* *
Description
* *

{@code dataSize} must be less than or equal to 65536 bytes. For larger updates, applications can use buffer to buffer copies.

* *
Note
* *

Buffer updates performed with {@link #vkCmdUpdateBuffer CmdUpdateBuffer} first copy the data into command buffer memory when the command is recorded (which requires additional storage and may incur an additional allocation), and then copy the data from the command buffer into {@code dstBuffer} when the command is executed on a device.

* *

The additional cost of this functionality compared to buffer to buffer copies means it is only recommended for very small amounts of data, and is why it is limited to only 65536 bytes.

* *

Applications can work around this by issuing multiple {@link #vkCmdUpdateBuffer CmdUpdateBuffer} commands to different ranges of the same buffer, but it is strongly recommended that they should not.

*
* *

The source data is copied from the user pointer to the command buffer when the command is called.

* *

{@link #vkCmdUpdateBuffer CmdUpdateBuffer} is only allowed outside of a render pass. This command is treated as "{@code transfer}" operation, for the purposes of synchronization barriers. The {@link #VK_BUFFER_USAGE_TRANSFER_DST_BIT BUFFER_USAGE_TRANSFER_DST_BIT} must be specified in {@code usage} of {@link VkBufferCreateInfo} in order for the buffer to be compatible with {@link #vkCmdUpdateBuffer CmdUpdateBuffer}.

* *
Valid Usage
* *
    *
  • {@code dstOffset} must be less than the size of {@code dstBuffer}
  • *
  • {@code dataSize} must be less than or equal to the size of {@code dstBuffer} minus {@code dstOffset}
  • *
  • {@code dstBuffer} must have been created with {@link #VK_BUFFER_USAGE_TRANSFER_DST_BIT BUFFER_USAGE_TRANSFER_DST_BIT} usage flag
  • *
  • If {@code dstBuffer} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code dstOffset} must be a multiple of 4
  • *
  • {@code dataSize} must be less than or equal to 65536
  • *
  • {@code dataSize} must be a multiple of 4
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code dstBuffer} must be a valid {@code VkBuffer} handle
  • *
  • {@code pData} must be a valid pointer to an array of {@code dataSize} bytes
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support transfer, graphics, or compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • {@code dataSize} must be greater than 0
  • *
  • Both of {@code commandBuffer}, and {@code dstBuffer} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideTransfer Graphics ComputeTransfer
* * @param commandBuffer the command buffer into which the command will be recorded. * @param dstBuffer a handle to the buffer to be updated. * @param dstOffset the byte offset into the buffer to start updating, and must be a multiple of 4. * @param pData a pointer to the source data for the buffer update, and must be at least {@code dataSize} bytes in size. */ public static void vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, @NativeType("VkBuffer") long dstBuffer, @NativeType("VkDeviceSize") long dstOffset, @NativeType("const void *") IntBuffer pData) { nvkCmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, pData.remaining() << 2, memAddress(pData)); } /** * Update a buffer's contents from host memory. * *
C Specification
* *

To update buffer data inline in a command buffer, call:

* *
     * void vkCmdUpdateBuffer(
     *     VkCommandBuffer                             commandBuffer,
     *     VkBuffer                                    dstBuffer,
     *     VkDeviceSize                                dstOffset,
     *     VkDeviceSize                                dataSize,
     *     const void*                                 pData);
* *
Description
* *

{@code dataSize} must be less than or equal to 65536 bytes. For larger updates, applications can use buffer to buffer copies.

* *
Note
* *

Buffer updates performed with {@link #vkCmdUpdateBuffer CmdUpdateBuffer} first copy the data into command buffer memory when the command is recorded (which requires additional storage and may incur an additional allocation), and then copy the data from the command buffer into {@code dstBuffer} when the command is executed on a device.

* *

The additional cost of this functionality compared to buffer to buffer copies means it is only recommended for very small amounts of data, and is why it is limited to only 65536 bytes.

* *

Applications can work around this by issuing multiple {@link #vkCmdUpdateBuffer CmdUpdateBuffer} commands to different ranges of the same buffer, but it is strongly recommended that they should not.

*
* *

The source data is copied from the user pointer to the command buffer when the command is called.

* *

{@link #vkCmdUpdateBuffer CmdUpdateBuffer} is only allowed outside of a render pass. This command is treated as "{@code transfer}" operation, for the purposes of synchronization barriers. The {@link #VK_BUFFER_USAGE_TRANSFER_DST_BIT BUFFER_USAGE_TRANSFER_DST_BIT} must be specified in {@code usage} of {@link VkBufferCreateInfo} in order for the buffer to be compatible with {@link #vkCmdUpdateBuffer CmdUpdateBuffer}.

* *
Valid Usage
* *
    *
  • {@code dstOffset} must be less than the size of {@code dstBuffer}
  • *
  • {@code dataSize} must be less than or equal to the size of {@code dstBuffer} minus {@code dstOffset}
  • *
  • {@code dstBuffer} must have been created with {@link #VK_BUFFER_USAGE_TRANSFER_DST_BIT BUFFER_USAGE_TRANSFER_DST_BIT} usage flag
  • *
  • If {@code dstBuffer} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code dstOffset} must be a multiple of 4
  • *
  • {@code dataSize} must be less than or equal to 65536
  • *
  • {@code dataSize} must be a multiple of 4
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code dstBuffer} must be a valid {@code VkBuffer} handle
  • *
  • {@code pData} must be a valid pointer to an array of {@code dataSize} bytes
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support transfer, graphics, or compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • {@code dataSize} must be greater than 0
  • *
  • Both of {@code commandBuffer}, and {@code dstBuffer} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideTransfer Graphics ComputeTransfer
* * @param commandBuffer the command buffer into which the command will be recorded. * @param dstBuffer a handle to the buffer to be updated. * @param dstOffset the byte offset into the buffer to start updating, and must be a multiple of 4. * @param pData a pointer to the source data for the buffer update, and must be at least {@code dataSize} bytes in size. */ public static void vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, @NativeType("VkBuffer") long dstBuffer, @NativeType("VkDeviceSize") long dstOffset, @NativeType("const void *") LongBuffer pData) { nvkCmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, pData.remaining() << 3, memAddress(pData)); } /** * Update a buffer's contents from host memory. * *
C Specification
* *

To update buffer data inline in a command buffer, call:

* *
     * void vkCmdUpdateBuffer(
     *     VkCommandBuffer                             commandBuffer,
     *     VkBuffer                                    dstBuffer,
     *     VkDeviceSize                                dstOffset,
     *     VkDeviceSize                                dataSize,
     *     const void*                                 pData);
* *
Description
* *

{@code dataSize} must be less than or equal to 65536 bytes. For larger updates, applications can use buffer to buffer copies.

* *
Note
* *

Buffer updates performed with {@link #vkCmdUpdateBuffer CmdUpdateBuffer} first copy the data into command buffer memory when the command is recorded (which requires additional storage and may incur an additional allocation), and then copy the data from the command buffer into {@code dstBuffer} when the command is executed on a device.

* *

The additional cost of this functionality compared to buffer to buffer copies means it is only recommended for very small amounts of data, and is why it is limited to only 65536 bytes.

* *

Applications can work around this by issuing multiple {@link #vkCmdUpdateBuffer CmdUpdateBuffer} commands to different ranges of the same buffer, but it is strongly recommended that they should not.

*
* *

The source data is copied from the user pointer to the command buffer when the command is called.

* *

{@link #vkCmdUpdateBuffer CmdUpdateBuffer} is only allowed outside of a render pass. This command is treated as "{@code transfer}" operation, for the purposes of synchronization barriers. The {@link #VK_BUFFER_USAGE_TRANSFER_DST_BIT BUFFER_USAGE_TRANSFER_DST_BIT} must be specified in {@code usage} of {@link VkBufferCreateInfo} in order for the buffer to be compatible with {@link #vkCmdUpdateBuffer CmdUpdateBuffer}.

* *
Valid Usage
* *
    *
  • {@code dstOffset} must be less than the size of {@code dstBuffer}
  • *
  • {@code dataSize} must be less than or equal to the size of {@code dstBuffer} minus {@code dstOffset}
  • *
  • {@code dstBuffer} must have been created with {@link #VK_BUFFER_USAGE_TRANSFER_DST_BIT BUFFER_USAGE_TRANSFER_DST_BIT} usage flag
  • *
  • If {@code dstBuffer} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code dstOffset} must be a multiple of 4
  • *
  • {@code dataSize} must be less than or equal to 65536
  • *
  • {@code dataSize} must be a multiple of 4
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code dstBuffer} must be a valid {@code VkBuffer} handle
  • *
  • {@code pData} must be a valid pointer to an array of {@code dataSize} bytes
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support transfer, graphics, or compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • {@code dataSize} must be greater than 0
  • *
  • Both of {@code commandBuffer}, and {@code dstBuffer} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideTransfer Graphics ComputeTransfer
* * @param commandBuffer the command buffer into which the command will be recorded. * @param dstBuffer a handle to the buffer to be updated. * @param dstOffset the byte offset into the buffer to start updating, and must be a multiple of 4. * @param pData a pointer to the source data for the buffer update, and must be at least {@code dataSize} bytes in size. */ public static void vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, @NativeType("VkBuffer") long dstBuffer, @NativeType("VkDeviceSize") long dstOffset, @NativeType("const void *") FloatBuffer pData) { nvkCmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, pData.remaining() << 2, memAddress(pData)); } /** * Update a buffer's contents from host memory. * *
C Specification
* *

To update buffer data inline in a command buffer, call:

* *
     * void vkCmdUpdateBuffer(
     *     VkCommandBuffer                             commandBuffer,
     *     VkBuffer                                    dstBuffer,
     *     VkDeviceSize                                dstOffset,
     *     VkDeviceSize                                dataSize,
     *     const void*                                 pData);
* *
Description
* *

{@code dataSize} must be less than or equal to 65536 bytes. For larger updates, applications can use buffer to buffer copies.

* *
Note
* *

Buffer updates performed with {@link #vkCmdUpdateBuffer CmdUpdateBuffer} first copy the data into command buffer memory when the command is recorded (which requires additional storage and may incur an additional allocation), and then copy the data from the command buffer into {@code dstBuffer} when the command is executed on a device.

* *

The additional cost of this functionality compared to buffer to buffer copies means it is only recommended for very small amounts of data, and is why it is limited to only 65536 bytes.

* *

Applications can work around this by issuing multiple {@link #vkCmdUpdateBuffer CmdUpdateBuffer} commands to different ranges of the same buffer, but it is strongly recommended that they should not.

*
* *

The source data is copied from the user pointer to the command buffer when the command is called.

* *

{@link #vkCmdUpdateBuffer CmdUpdateBuffer} is only allowed outside of a render pass. This command is treated as "{@code transfer}" operation, for the purposes of synchronization barriers. The {@link #VK_BUFFER_USAGE_TRANSFER_DST_BIT BUFFER_USAGE_TRANSFER_DST_BIT} must be specified in {@code usage} of {@link VkBufferCreateInfo} in order for the buffer to be compatible with {@link #vkCmdUpdateBuffer CmdUpdateBuffer}.

* *
Valid Usage
* *
    *
  • {@code dstOffset} must be less than the size of {@code dstBuffer}
  • *
  • {@code dataSize} must be less than or equal to the size of {@code dstBuffer} minus {@code dstOffset}
  • *
  • {@code dstBuffer} must have been created with {@link #VK_BUFFER_USAGE_TRANSFER_DST_BIT BUFFER_USAGE_TRANSFER_DST_BIT} usage flag
  • *
  • If {@code dstBuffer} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code dstOffset} must be a multiple of 4
  • *
  • {@code dataSize} must be less than or equal to 65536
  • *
  • {@code dataSize} must be a multiple of 4
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code dstBuffer} must be a valid {@code VkBuffer} handle
  • *
  • {@code pData} must be a valid pointer to an array of {@code dataSize} bytes
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support transfer, graphics, or compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • {@code dataSize} must be greater than 0
  • *
  • Both of {@code commandBuffer}, and {@code dstBuffer} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideTransfer Graphics ComputeTransfer
* * @param commandBuffer the command buffer into which the command will be recorded. * @param dstBuffer a handle to the buffer to be updated. * @param dstOffset the byte offset into the buffer to start updating, and must be a multiple of 4. * @param pData a pointer to the source data for the buffer update, and must be at least {@code dataSize} bytes in size. */ public static void vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, @NativeType("VkBuffer") long dstBuffer, @NativeType("VkDeviceSize") long dstOffset, @NativeType("const void *") DoubleBuffer pData) { nvkCmdUpdateBuffer(commandBuffer, dstBuffer, dstOffset, pData.remaining() << 3, memAddress(pData)); } // --- [ vkCmdFillBuffer ] --- /** * Fill a region of a buffer with a fixed value. * *
C Specification
* *

To clear buffer data, call:

* *
     * void vkCmdFillBuffer(
     *     VkCommandBuffer                             commandBuffer,
     *     VkBuffer                                    dstBuffer,
     *     VkDeviceSize                                dstOffset,
     *     VkDeviceSize                                size,
     *     uint32_t                                    data);
* *
Description
* *

{@link #vkCmdFillBuffer CmdFillBuffer} is treated as "{@code transfer}" operation for the purposes of synchronization barriers. The {@link #VK_BUFFER_USAGE_TRANSFER_DST_BIT BUFFER_USAGE_TRANSFER_DST_BIT} must be specified in {@code usage} of {@link VkBufferCreateInfo} in order for the buffer to be compatible with {@link #vkCmdFillBuffer CmdFillBuffer}.

* *
Valid Usage
* *
    *
  • {@code dstOffset} must be less than the size of {@code dstBuffer}
  • *
  • {@code dstOffset} must be a multiple of 4
  • *
  • If {@code size} is not equal to {@link #VK_WHOLE_SIZE WHOLE_SIZE}, {@code size} must be greater than 0
  • *
  • If {@code size} is not equal to {@link #VK_WHOLE_SIZE WHOLE_SIZE}, {@code size} must be less than or equal to the size of {@code dstBuffer} minus {@code dstOffset}
  • *
  • If {@code size} is not equal to {@link #VK_WHOLE_SIZE WHOLE_SIZE}, {@code size} must be a multiple of 4
  • *
  • {@code dstBuffer} must have been created with {@link #VK_BUFFER_USAGE_TRANSFER_DST_BIT BUFFER_USAGE_TRANSFER_DST_BIT} usage flag
  • *
  • If {@code dstBuffer} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code dstBuffer} must be a valid {@code VkBuffer} handle
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support transfer, graphics or compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • Both of {@code commandBuffer}, and {@code dstBuffer} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideTransfer Graphics ComputeTransfer
* * @param commandBuffer the command buffer into which the command will be recorded. * @param dstBuffer the buffer to be filled. * @param dstOffset the byte offset into the buffer at which to start filling, and must be a multiple of 4. * @param size the number of bytes to fill, and must be either a multiple of 4, or {@link #VK_WHOLE_SIZE WHOLE_SIZE} to fill the range from {@code offset} to the end of the buffer. If {@link #VK_WHOLE_SIZE WHOLE_SIZE} is used and the remaining size of the buffer is not a multiple of 4, then the nearest smaller multiple is used. * @param data the 4-byte word written repeatedly to the buffer to fill {@code size} bytes of data. The data word is written to memory according to the host endianness. */ public static void vkCmdFillBuffer(VkCommandBuffer commandBuffer, @NativeType("VkBuffer") long dstBuffer, @NativeType("VkDeviceSize") long dstOffset, @NativeType("VkDeviceSize") long size, @NativeType("uint32_t") int data) { long __functionAddress = commandBuffer.getCapabilities().vkCmdFillBuffer; callPJJJV(__functionAddress, commandBuffer.address(), dstBuffer, dstOffset, size, data); } // --- [ vkCmdClearColorImage ] --- /** * Unsafe version of: {@link #vkCmdClearColorImage CmdClearColorImage} * * @param rangeCount the number of image subresource range structures in {@code pRanges}. */ public static void nvkCmdClearColorImage(VkCommandBuffer commandBuffer, long image, int imageLayout, long pColor, int rangeCount, long pRanges) { long __functionAddress = commandBuffer.getCapabilities().vkCmdClearColorImage; callPJPPV(__functionAddress, commandBuffer.address(), image, imageLayout, pColor, rangeCount, pRanges); } /** * Clear regions of a color image. * *
C Specification
* *

To clear one or more subranges of a color image, call:

* *
     * void vkCmdClearColorImage(
     *     VkCommandBuffer                             commandBuffer,
     *     VkImage                                     image,
     *     VkImageLayout                               imageLayout,
     *     const VkClearColorValue*                    pColor,
     *     uint32_t                                    rangeCount,
     *     const VkImageSubresourceRange*              pRanges);
* *
Description
* *

Each specified range in {@code pRanges} is cleared to the value specified by {@code pColor}.

* *
Valid Usage
* *
    *
  • {@code image} must use a format that supports {@link KHRMaintenance1#VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR FORMAT_FEATURE_TRANSFER_DST_BIT_KHR}, which is indicated by {@link VkFormatProperties}{@code ::linearTilingFeatures} (for linearly tiled images) or {@link VkFormatProperties}{@code ::optimalTilingFeatures} (for optimally tiled images) - as returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • {@code image} must have been created with {@link #VK_IMAGE_USAGE_TRANSFER_DST_BIT IMAGE_USAGE_TRANSFER_DST_BIT} usage flag
  • *
  • {@code image} must not use a format listed in the “Formats requiring sampler Y'CBCR conversion for {@link #VK_IMAGE_ASPECT_COLOR_BIT IMAGE_ASPECT_COLOR_BIT} image views” table
  • *
  • If {@code image} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code imageLayout} must specify the layout of the image subresource ranges of {@code image} specified in {@code pRanges} at the time this command is executed on a {@code VkDevice}
  • *
  • {@code imageLayout} must be {@link #VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL}, {@link #VK_IMAGE_LAYOUT_GENERAL IMAGE_LAYOUT_GENERAL}, or {@link KHRSharedPresentableImage#VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR IMAGE_LAYOUT_SHARED_PRESENT_KHR}
  • *
  • The {@link VkImageSubresourceRange}{@code ::baseMipLevel} members of the elements of the {@code pRanges} array must each be less than the {@code mipLevels} specified in {@link VkImageCreateInfo} when {@code image} was created
  • *
  • For each {@link VkImageSubresourceRange} element of {@code pRanges}, if the {@code levelCount} member is not {@link #VK_REMAINING_MIP_LEVELS REMAINING_MIP_LEVELS}, then baseMipLevel + levelCount must be less than the {@code mipLevels} specified in {@link VkImageCreateInfo} when {@code image} was created
  • *
  • The {@link VkImageSubresourceRange}{@code ::baseArrayLayer} members of the elements of the {@code pRanges} array must each be less than the {@code arrayLayers} specified in {@link VkImageCreateInfo} when {@code image} was created
  • *
  • For each {@link VkImageSubresourceRange} element of {@code pRanges}, if the {@code layerCount} member is not {@link #VK_REMAINING_ARRAY_LAYERS REMAINING_ARRAY_LAYERS}, then baseArrayLayer + layerCount must be less than the {@code arrayLayers} specified in {@link VkImageCreateInfo} when {@code image} was created
  • *
  • {@code image} must not have a compressed or depth/stencil format
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code image} must be a valid {@code VkImage} handle
  • *
  • {@code imageLayout} must be a valid {@code VkImageLayout} value
  • *
  • {@code pColor} must be a valid pointer to a valid {@link VkClearColorValue} union
  • *
  • {@code pRanges} must be a valid pointer to an array of {@code rangeCount} valid {@link VkImageSubresourceRange} structures
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics, or compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • {@code rangeCount} must be greater than 0
  • *
  • Both of {@code commandBuffer}, and {@code image} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideGraphics ComputeTransfer
* *
See Also
* *

{@link VkClearColorValue}, {@link VkImageSubresourceRange}

* * @param commandBuffer the command buffer into which the command will be recorded. * @param image the image to be cleared. * @param imageLayout specifies the current layout of the image subresource ranges to be cleared, and must be {@link KHRSharedPresentableImage#VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR IMAGE_LAYOUT_SHARED_PRESENT_KHR}, {@link #VK_IMAGE_LAYOUT_GENERAL IMAGE_LAYOUT_GENERAL} or {@link #VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL}. * @param pColor a pointer to a {@link VkClearColorValue} structure that contains the values the image subresource ranges will be cleared to (see the “Clear Values” section below). * @param pRanges points to an array of {@link VkImageSubresourceRange} structures that describe a range of mipmap levels, array layers, and aspects to be cleared, as described in Image Views. The {@code aspectMask} of all image subresource ranges must only include {@link #VK_IMAGE_ASPECT_COLOR_BIT IMAGE_ASPECT_COLOR_BIT}. */ public static void vkCmdClearColorImage(VkCommandBuffer commandBuffer, @NativeType("VkImage") long image, @NativeType("VkImageLayout") int imageLayout, @NativeType("const VkClearColorValue *") VkClearColorValue pColor, @NativeType("const VkImageSubresourceRange *") VkImageSubresourceRange.Buffer pRanges) { nvkCmdClearColorImage(commandBuffer, image, imageLayout, pColor.address(), pRanges.remaining(), pRanges.address()); } /** * Clear regions of a color image. * *
C Specification
* *

To clear one or more subranges of a color image, call:

* *
     * void vkCmdClearColorImage(
     *     VkCommandBuffer                             commandBuffer,
     *     VkImage                                     image,
     *     VkImageLayout                               imageLayout,
     *     const VkClearColorValue*                    pColor,
     *     uint32_t                                    rangeCount,
     *     const VkImageSubresourceRange*              pRanges);
* *
Description
* *

Each specified range in {@code pRanges} is cleared to the value specified by {@code pColor}.

* *
Valid Usage
* *
    *
  • {@code image} must use a format that supports {@link KHRMaintenance1#VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR FORMAT_FEATURE_TRANSFER_DST_BIT_KHR}, which is indicated by {@link VkFormatProperties}{@code ::linearTilingFeatures} (for linearly tiled images) or {@link VkFormatProperties}{@code ::optimalTilingFeatures} (for optimally tiled images) - as returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • {@code image} must have been created with {@link #VK_IMAGE_USAGE_TRANSFER_DST_BIT IMAGE_USAGE_TRANSFER_DST_BIT} usage flag
  • *
  • {@code image} must not use a format listed in the “Formats requiring sampler Y'CBCR conversion for {@link #VK_IMAGE_ASPECT_COLOR_BIT IMAGE_ASPECT_COLOR_BIT} image views” table
  • *
  • If {@code image} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code imageLayout} must specify the layout of the image subresource ranges of {@code image} specified in {@code pRanges} at the time this command is executed on a {@code VkDevice}
  • *
  • {@code imageLayout} must be {@link #VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL}, {@link #VK_IMAGE_LAYOUT_GENERAL IMAGE_LAYOUT_GENERAL}, or {@link KHRSharedPresentableImage#VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR IMAGE_LAYOUT_SHARED_PRESENT_KHR}
  • *
  • The {@link VkImageSubresourceRange}{@code ::baseMipLevel} members of the elements of the {@code pRanges} array must each be less than the {@code mipLevels} specified in {@link VkImageCreateInfo} when {@code image} was created
  • *
  • For each {@link VkImageSubresourceRange} element of {@code pRanges}, if the {@code levelCount} member is not {@link #VK_REMAINING_MIP_LEVELS REMAINING_MIP_LEVELS}, then baseMipLevel + levelCount must be less than the {@code mipLevels} specified in {@link VkImageCreateInfo} when {@code image} was created
  • *
  • The {@link VkImageSubresourceRange}{@code ::baseArrayLayer} members of the elements of the {@code pRanges} array must each be less than the {@code arrayLayers} specified in {@link VkImageCreateInfo} when {@code image} was created
  • *
  • For each {@link VkImageSubresourceRange} element of {@code pRanges}, if the {@code layerCount} member is not {@link #VK_REMAINING_ARRAY_LAYERS REMAINING_ARRAY_LAYERS}, then baseArrayLayer + layerCount must be less than the {@code arrayLayers} specified in {@link VkImageCreateInfo} when {@code image} was created
  • *
  • {@code image} must not have a compressed or depth/stencil format
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code image} must be a valid {@code VkImage} handle
  • *
  • {@code imageLayout} must be a valid {@code VkImageLayout} value
  • *
  • {@code pColor} must be a valid pointer to a valid {@link VkClearColorValue} union
  • *
  • {@code pRanges} must be a valid pointer to an array of {@code rangeCount} valid {@link VkImageSubresourceRange} structures
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics, or compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • {@code rangeCount} must be greater than 0
  • *
  • Both of {@code commandBuffer}, and {@code image} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideGraphics ComputeTransfer
* *
See Also
* *

{@link VkClearColorValue}, {@link VkImageSubresourceRange}

* * @param commandBuffer the command buffer into which the command will be recorded. * @param image the image to be cleared. * @param imageLayout specifies the current layout of the image subresource ranges to be cleared, and must be {@link KHRSharedPresentableImage#VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR IMAGE_LAYOUT_SHARED_PRESENT_KHR}, {@link #VK_IMAGE_LAYOUT_GENERAL IMAGE_LAYOUT_GENERAL} or {@link #VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL}. * @param pColor a pointer to a {@link VkClearColorValue} structure that contains the values the image subresource ranges will be cleared to (see the “Clear Values” section below). */ public static void vkCmdClearColorImage(VkCommandBuffer commandBuffer, @NativeType("VkImage") long image, @NativeType("VkImageLayout") int imageLayout, @NativeType("const VkClearColorValue *") VkClearColorValue pColor, @NativeType("const VkImageSubresourceRange *") VkImageSubresourceRange pRange) { nvkCmdClearColorImage(commandBuffer, image, imageLayout, pColor.address(), 1, pRange.address()); } // --- [ vkCmdClearDepthStencilImage ] --- /** * Unsafe version of: {@link #vkCmdClearDepthStencilImage CmdClearDepthStencilImage} * * @param rangeCount the number of image subresource range structures in {@code pRanges}. */ public static void nvkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, long image, int imageLayout, long pDepthStencil, int rangeCount, long pRanges) { long __functionAddress = commandBuffer.getCapabilities().vkCmdClearDepthStencilImage; callPJPPV(__functionAddress, commandBuffer.address(), image, imageLayout, pDepthStencil, rangeCount, pRanges); } /** * Fill regions of a combined depth/stencil image. * *
C Specification
* *

To clear one or more subranges of a depth/stencil image, call:

* *
     * void vkCmdClearDepthStencilImage(
     *     VkCommandBuffer                             commandBuffer,
     *     VkImage                                     image,
     *     VkImageLayout                               imageLayout,
     *     const VkClearDepthStencilValue*             pDepthStencil,
     *     uint32_t                                    rangeCount,
     *     const VkImageSubresourceRange*              pRanges);
* *
Valid Usage
* *
    *
  • {@code image} must use a format that supports {@link KHRMaintenance1#VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR FORMAT_FEATURE_TRANSFER_DST_BIT_KHR}, which is indicated by {@link VkFormatProperties}{@code ::linearTilingFeatures} (for linearly tiled images) or {@link VkFormatProperties}{@code ::optimalTilingFeatures} (for optimally tiled images) - as returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • {@code image} must have been created with {@link #VK_IMAGE_USAGE_TRANSFER_DST_BIT IMAGE_USAGE_TRANSFER_DST_BIT} usage flag
  • *
  • If {@code image} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code imageLayout} must specify the layout of the image subresource ranges of {@code image} specified in {@code pRanges} at the time this command is executed on a {@code VkDevice}
  • *
  • {@code imageLayout} must be either of {@link #VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL} or {@link #VK_IMAGE_LAYOUT_GENERAL IMAGE_LAYOUT_GENERAL}
  • *
  • The {@link VkImageSubresourceRange}{@code ::baseMipLevel} members of the elements of the {@code pRanges} array must each be less than the {@code mipLevels} specified in {@link VkImageCreateInfo} when {@code image} was created
  • *
  • For each {@link VkImageSubresourceRange} element of {@code pRanges}, if the {@code levelCount} member is not {@link #VK_REMAINING_MIP_LEVELS REMAINING_MIP_LEVELS}, then baseMipLevel + levelCount must be less than the {@code mipLevels} specified in {@link VkImageCreateInfo} when {@code image} was created
  • *
  • The {@link VkImageSubresourceRange}{@code ::baseArrayLayer} members of the elements of the {@code pRanges} array must each be less than the {@code arrayLayers} specified in {@link VkImageCreateInfo} when {@code image} was created
  • *
  • For each {@link VkImageSubresourceRange} element of {@code pRanges}, if the {@code layerCount} member is not {@link #VK_REMAINING_ARRAY_LAYERS REMAINING_ARRAY_LAYERS}, then baseArrayLayer + layerCount must be less than the {@code arrayLayers} specified in {@link VkImageCreateInfo} when {@code image} was created
  • *
  • {@code image} must have a depth/stencil format
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code image} must be a valid {@code VkImage} handle
  • *
  • {@code imageLayout} must be a valid {@code VkImageLayout} value
  • *
  • {@code pDepthStencil} must be a valid pointer to a valid {@link VkClearDepthStencilValue} structure
  • *
  • {@code pRanges} must be a valid pointer to an array of {@code rangeCount} valid {@link VkImageSubresourceRange} structures
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • {@code rangeCount} must be greater than 0
  • *
  • Both of {@code commandBuffer}, and {@code image} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideGraphicsTransfer
* *
See Also
* *

{@link VkClearDepthStencilValue}, {@link VkImageSubresourceRange}

* * @param commandBuffer the command buffer into which the command will be recorded. * @param image the image to be cleared. * @param imageLayout specifies the current layout of the image subresource ranges to be cleared, and must be {@link #VK_IMAGE_LAYOUT_GENERAL IMAGE_LAYOUT_GENERAL} or {@link #VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL}. * @param pDepthStencil a pointer to a {@link VkClearDepthStencilValue} structure that contains the values the depth and stencil image subresource ranges will be cleared to (see the “Clear Values” section below). * @param pRanges points to an array of {@link VkImageSubresourceRange} structures that describe a range of mipmap levels, array layers, and aspects to be cleared, as described in Image Views. The {@code aspectMask} of each image subresource range in {@code pRanges} can include {@link #VK_IMAGE_ASPECT_DEPTH_BIT IMAGE_ASPECT_DEPTH_BIT} if the image format has a depth component, and {@link #VK_IMAGE_ASPECT_STENCIL_BIT IMAGE_ASPECT_STENCIL_BIT} if the image format has a stencil component. {@code pDepthStencil} is a pointer to a {@link VkClearDepthStencilValue} structure that contains the values the image subresource ranges will be cleared to (see the “Clear Values” section below). */ public static void vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, @NativeType("VkImage") long image, @NativeType("VkImageLayout") int imageLayout, @NativeType("const VkClearDepthStencilValue *") VkClearDepthStencilValue pDepthStencil, @NativeType("const VkImageSubresourceRange *") VkImageSubresourceRange.Buffer pRanges) { nvkCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil.address(), pRanges.remaining(), pRanges.address()); } /** * Fill regions of a combined depth/stencil image. * *
C Specification
* *

To clear one or more subranges of a depth/stencil image, call:

* *
     * void vkCmdClearDepthStencilImage(
     *     VkCommandBuffer                             commandBuffer,
     *     VkImage                                     image,
     *     VkImageLayout                               imageLayout,
     *     const VkClearDepthStencilValue*             pDepthStencil,
     *     uint32_t                                    rangeCount,
     *     const VkImageSubresourceRange*              pRanges);
* *
Valid Usage
* *
    *
  • {@code image} must use a format that supports {@link KHRMaintenance1#VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR FORMAT_FEATURE_TRANSFER_DST_BIT_KHR}, which is indicated by {@link VkFormatProperties}{@code ::linearTilingFeatures} (for linearly tiled images) or {@link VkFormatProperties}{@code ::optimalTilingFeatures} (for optimally tiled images) - as returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • {@code image} must have been created with {@link #VK_IMAGE_USAGE_TRANSFER_DST_BIT IMAGE_USAGE_TRANSFER_DST_BIT} usage flag
  • *
  • If {@code image} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code imageLayout} must specify the layout of the image subresource ranges of {@code image} specified in {@code pRanges} at the time this command is executed on a {@code VkDevice}
  • *
  • {@code imageLayout} must be either of {@link #VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL} or {@link #VK_IMAGE_LAYOUT_GENERAL IMAGE_LAYOUT_GENERAL}
  • *
  • The {@link VkImageSubresourceRange}{@code ::baseMipLevel} members of the elements of the {@code pRanges} array must each be less than the {@code mipLevels} specified in {@link VkImageCreateInfo} when {@code image} was created
  • *
  • For each {@link VkImageSubresourceRange} element of {@code pRanges}, if the {@code levelCount} member is not {@link #VK_REMAINING_MIP_LEVELS REMAINING_MIP_LEVELS}, then baseMipLevel + levelCount must be less than the {@code mipLevels} specified in {@link VkImageCreateInfo} when {@code image} was created
  • *
  • The {@link VkImageSubresourceRange}{@code ::baseArrayLayer} members of the elements of the {@code pRanges} array must each be less than the {@code arrayLayers} specified in {@link VkImageCreateInfo} when {@code image} was created
  • *
  • For each {@link VkImageSubresourceRange} element of {@code pRanges}, if the {@code layerCount} member is not {@link #VK_REMAINING_ARRAY_LAYERS REMAINING_ARRAY_LAYERS}, then baseArrayLayer + layerCount must be less than the {@code arrayLayers} specified in {@link VkImageCreateInfo} when {@code image} was created
  • *
  • {@code image} must have a depth/stencil format
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code image} must be a valid {@code VkImage} handle
  • *
  • {@code imageLayout} must be a valid {@code VkImageLayout} value
  • *
  • {@code pDepthStencil} must be a valid pointer to a valid {@link VkClearDepthStencilValue} structure
  • *
  • {@code pRanges} must be a valid pointer to an array of {@code rangeCount} valid {@link VkImageSubresourceRange} structures
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • {@code rangeCount} must be greater than 0
  • *
  • Both of {@code commandBuffer}, and {@code image} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideGraphicsTransfer
* *
See Also
* *

{@link VkClearDepthStencilValue}, {@link VkImageSubresourceRange}

* * @param commandBuffer the command buffer into which the command will be recorded. * @param image the image to be cleared. * @param imageLayout specifies the current layout of the image subresource ranges to be cleared, and must be {@link #VK_IMAGE_LAYOUT_GENERAL IMAGE_LAYOUT_GENERAL} or {@link #VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL}. * @param pDepthStencil a pointer to a {@link VkClearDepthStencilValue} structure that contains the values the depth and stencil image subresource ranges will be cleared to (see the “Clear Values” section below). */ public static void vkCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, @NativeType("VkImage") long image, @NativeType("VkImageLayout") int imageLayout, @NativeType("const VkClearDepthStencilValue *") VkClearDepthStencilValue pDepthStencil, @NativeType("const VkImageSubresourceRange *") VkImageSubresourceRange pRange) { nvkCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil.address(), 1, pRange.address()); } // --- [ vkCmdClearAttachments ] --- /** * Unsafe version of: {@link #vkCmdClearAttachments CmdClearAttachments} * * @param attachmentCount the number of entries in the {@code pAttachments} array. * @param rectCount the number of entries in the {@code pRects} array. */ public static void nvkCmdClearAttachments(VkCommandBuffer commandBuffer, int attachmentCount, long pAttachments, int rectCount, long pRects) { long __functionAddress = commandBuffer.getCapabilities().vkCmdClearAttachments; callPPPV(__functionAddress, commandBuffer.address(), attachmentCount, pAttachments, rectCount, pRects); } /** * Clear regions within currently bound framebuffer attachments. * *
C Specification
* *

To clear one or more regions of color and depth/stencil attachments inside a render pass instance, call:

* *
     * void vkCmdClearAttachments(
     *     VkCommandBuffer                             commandBuffer,
     *     uint32_t                                    attachmentCount,
     *     const VkClearAttachment*                    pAttachments,
     *     uint32_t                                    rectCount,
     *     const VkClearRect*                          pRects);
* *
Description
* *

{@link #vkCmdClearAttachments CmdClearAttachments} can clear multiple regions of each attachment used in the current subpass of a render pass instance. This command must be called only inside a render pass instance, and implicitly selects the images to clear based on the current framebuffer attachments and the command parameters.

* *
Valid Usage
* *
    *
  • If the {@code aspectMask} member of any element of {@code pAttachments} contains {@link #VK_IMAGE_ASPECT_COLOR_BIT IMAGE_ASPECT_COLOR_BIT}, the {@code colorAttachment} member of that element must refer to a valid color attachment in the current subpass
  • *
  • The rectangular region specified by each element of {@code pRects} must be contained within the render area of the current render pass instance
  • *
  • The layers specified by each element of {@code pRects} must be contained within every attachment that {@code pAttachments} refers to
  • *
  • If the render pass instance this is recorded in uses multiview, then {@code baseArrayLayer} must be zero and {@code layerCount} must be one.
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code pAttachments} must be a valid pointer to an array of {@code attachmentCount} valid {@link VkClearAttachment} structures
  • *
  • {@code pRects} must be a valid pointer to an array of {@code rectCount} {@link VkClearRect} structures
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
  • This command must only be called inside of a render pass instance
  • *
  • {@code attachmentCount} must be greater than 0
  • *
  • {@code rectCount} must be greater than 0
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryInsideGraphicsGraphics
* *
See Also
* *

{@link VkClearAttachment}, {@link VkClearRect}

* * @param commandBuffer the command buffer into which the command will be recorded. * @param pAttachments a pointer to an array of {@link VkClearAttachment} structures defining the attachments to clear and the clear values to use. * @param pRects points to an array of {@link VkClearRect} structures defining regions within each selected attachment to clear. */ public static void vkCmdClearAttachments(VkCommandBuffer commandBuffer, @NativeType("const VkClearAttachment *") VkClearAttachment.Buffer pAttachments, @NativeType("const VkClearRect *") VkClearRect.Buffer pRects) { nvkCmdClearAttachments(commandBuffer, pAttachments.remaining(), pAttachments.address(), pRects.remaining(), pRects.address()); } // --- [ vkCmdResolveImage ] --- /** * Unsafe version of: {@link #vkCmdResolveImage CmdResolveImage} * * @param regionCount the number of regions to resolve. */ public static void nvkCmdResolveImage(VkCommandBuffer commandBuffer, long srcImage, int srcImageLayout, long dstImage, int dstImageLayout, int regionCount, long pRegions) { long __functionAddress = commandBuffer.getCapabilities().vkCmdResolveImage; callPJJPV(__functionAddress, commandBuffer.address(), srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions); } /** * Resolve regions of an image. * *
C Specification
* *

To resolve a multisample image to a non-multisample image, call:

* *
     * void vkCmdResolveImage(
     *     VkCommandBuffer                             commandBuffer,
     *     VkImage                                     srcImage,
     *     VkImageLayout                               srcImageLayout,
     *     VkImage                                     dstImage,
     *     VkImageLayout                               dstImageLayout,
     *     uint32_t                                    regionCount,
     *     const VkImageResolve*                       pRegions);
* *
Description
* *

During the resolve the samples corresponding to each pixel location in the source are converted to a single sample before being written to the destination. If the source formats are floating-point or normalized types, the sample values for each pixel are resolved in an implementation-dependent manner. If the source formats are integer types, a single sample's value is selected for each pixel.

* *

{@code srcOffset} and {@code dstOffset} select the initial {@code x}, {@code y}, and {@code z} offsets in texels of the sub-regions of the source and destination image data. {@code extent} is the size in texels of the source image to resolve in {@code width}, {@code height} and {@code depth}.

* *

Resolves are done layer by layer starting with {@code baseArrayLayer} member of {@code srcSubresource} for the source and {@code dstSubresource} for the destination. {@code layerCount} layers are resolved to the destination image.

* *
Valid Usage
* *
    *
  • The source region specified by each element of {@code pRegions} must be a region that is contained within {@code srcImage}
  • *
  • The destination region specified by each element of {@code pRegions} must be a region that is contained within {@code dstImage}
  • *
  • The union of all source regions, and the union of all destination regions, specified by the elements of {@code pRegions}, must not overlap in memory
  • *
  • If {@code srcImage} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code srcImage} must have a sample count equal to any valid sample count value other than {@link #VK_SAMPLE_COUNT_1_BIT SAMPLE_COUNT_1_BIT}
  • *
  • If {@code dstImage} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code dstImage} must have a sample count equal to {@link #VK_SAMPLE_COUNT_1_BIT SAMPLE_COUNT_1_BIT}
  • *
  • {@code srcImageLayout} must specify the layout of the image subresources of {@code srcImage} specified in {@code pRegions} at the time this command is executed on a {@code VkDevice}
  • *
  • {@code srcImageLayout} must be {@link KHRSharedPresentableImage#VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR IMAGE_LAYOUT_SHARED_PRESENT_KHR}, {@link #VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL} or {@link #VK_IMAGE_LAYOUT_GENERAL IMAGE_LAYOUT_GENERAL}
  • *
  • {@code dstImageLayout} must specify the layout of the image subresources of {@code dstImage} specified in {@code pRegions} at the time this command is executed on a {@code VkDevice}
  • *
  • {@code dstImageLayout} must be {@link KHRSharedPresentableImage#VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR IMAGE_LAYOUT_SHARED_PRESENT_KHR}, {@link #VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL} or {@link #VK_IMAGE_LAYOUT_GENERAL IMAGE_LAYOUT_GENERAL}
  • *
  • If {@code dstImage} was created with {@code tiling} equal to {@link #VK_IMAGE_TILING_LINEAR IMAGE_TILING_LINEAR}, {@code dstImage} must have been created with a {@code format} that supports being a color attachment, as specified by the {@link #VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT FORMAT_FEATURE_COLOR_ATTACHMENT_BIT} flag in {@link VkFormatProperties}{@code ::linearTilingFeatures} returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • If {@code dstImage} was created with {@code tiling} equal to {@link #VK_IMAGE_TILING_OPTIMAL IMAGE_TILING_OPTIMAL}, {@code dstImage} must have been created with a {@code format} that supports being a color attachment, as specified by the {@link #VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT FORMAT_FEATURE_COLOR_ATTACHMENT_BIT} flag in {@link VkFormatProperties}{@code ::optimalTilingFeatures} returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • {@code srcImage} and {@code dstImage} must have been created with the same image format
  • *
  • The {@code srcSubresource.mipLevel} member of each element of {@code pRegions} must be less than the {@code mipLevels} specified in {@link VkImageCreateInfo} when {@code srcImage} was created
  • *
  • The {@code dstSubresource.mipLevel} member of each element of {@code pRegions} must be less than the {@code mipLevels} specified in {@link VkImageCreateInfo} when {@code dstImage} was created
  • *
  • The srcSubresource.baseArrayLayer srcSubresource.layerCount of each element of {@code pRegions} must be less than or equal to the {@code arrayLayers} specified in {@link VkImageCreateInfo} when {@code srcImage} was created
  • *
  • The dstSubresource.baseArrayLayer dstSubresource.layerCount of each element of {@code pRegions} must be less than or equal to the {@code arrayLayers} specified in {@link VkImageCreateInfo} when {@code dstImage} was created
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code srcImage} must be a valid {@code VkImage} handle
  • *
  • {@code srcImageLayout} must be a valid {@code VkImageLayout} value
  • *
  • {@code dstImage} must be a valid {@code VkImage} handle
  • *
  • {@code dstImageLayout} must be a valid {@code VkImageLayout} value
  • *
  • {@code pRegions} must be a valid pointer to an array of {@code regionCount} valid {@link VkImageResolve} structures
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • {@code regionCount} must be greater than 0
  • *
  • Each of {@code commandBuffer}, {@code dstImage}, and {@code srcImage} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideGraphicsTransfer
* *
See Also
* *

{@link VkImageResolve}

* * @param commandBuffer the command buffer into which the command will be recorded. * @param srcImage the source image. * @param srcImageLayout the layout of the source image subresources for the resolve. * @param dstImage the destination image. * @param dstImageLayout the layout of the destination image subresources for the resolve. * @param pRegions a pointer to an array of {@link VkImageResolve} structures specifying the regions to resolve. */ public static void vkCmdResolveImage(VkCommandBuffer commandBuffer, @NativeType("VkImage") long srcImage, @NativeType("VkImageLayout") int srcImageLayout, @NativeType("VkImage") long dstImage, @NativeType("VkImageLayout") int dstImageLayout, @NativeType("const VkImageResolve *") VkImageResolve.Buffer pRegions) { nvkCmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, pRegions.remaining(), pRegions.address()); } /** * Resolve regions of an image. * *
C Specification
* *

To resolve a multisample image to a non-multisample image, call:

* *
     * void vkCmdResolveImage(
     *     VkCommandBuffer                             commandBuffer,
     *     VkImage                                     srcImage,
     *     VkImageLayout                               srcImageLayout,
     *     VkImage                                     dstImage,
     *     VkImageLayout                               dstImageLayout,
     *     uint32_t                                    regionCount,
     *     const VkImageResolve*                       pRegions);
* *
Description
* *

During the resolve the samples corresponding to each pixel location in the source are converted to a single sample before being written to the destination. If the source formats are floating-point or normalized types, the sample values for each pixel are resolved in an implementation-dependent manner. If the source formats are integer types, a single sample's value is selected for each pixel.

* *

{@code srcOffset} and {@code dstOffset} select the initial {@code x}, {@code y}, and {@code z} offsets in texels of the sub-regions of the source and destination image data. {@code extent} is the size in texels of the source image to resolve in {@code width}, {@code height} and {@code depth}.

* *

Resolves are done layer by layer starting with {@code baseArrayLayer} member of {@code srcSubresource} for the source and {@code dstSubresource} for the destination. {@code layerCount} layers are resolved to the destination image.

* *
Valid Usage
* *
    *
  • The source region specified by each element of {@code pRegions} must be a region that is contained within {@code srcImage}
  • *
  • The destination region specified by each element of {@code pRegions} must be a region that is contained within {@code dstImage}
  • *
  • The union of all source regions, and the union of all destination regions, specified by the elements of {@code pRegions}, must not overlap in memory
  • *
  • If {@code srcImage} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code srcImage} must have a sample count equal to any valid sample count value other than {@link #VK_SAMPLE_COUNT_1_BIT SAMPLE_COUNT_1_BIT}
  • *
  • If {@code dstImage} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • {@code dstImage} must have a sample count equal to {@link #VK_SAMPLE_COUNT_1_BIT SAMPLE_COUNT_1_BIT}
  • *
  • {@code srcImageLayout} must specify the layout of the image subresources of {@code srcImage} specified in {@code pRegions} at the time this command is executed on a {@code VkDevice}
  • *
  • {@code srcImageLayout} must be {@link KHRSharedPresentableImage#VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR IMAGE_LAYOUT_SHARED_PRESENT_KHR}, {@link #VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL} or {@link #VK_IMAGE_LAYOUT_GENERAL IMAGE_LAYOUT_GENERAL}
  • *
  • {@code dstImageLayout} must specify the layout of the image subresources of {@code dstImage} specified in {@code pRegions} at the time this command is executed on a {@code VkDevice}
  • *
  • {@code dstImageLayout} must be {@link KHRSharedPresentableImage#VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR IMAGE_LAYOUT_SHARED_PRESENT_KHR}, {@link #VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL} or {@link #VK_IMAGE_LAYOUT_GENERAL IMAGE_LAYOUT_GENERAL}
  • *
  • If {@code dstImage} was created with {@code tiling} equal to {@link #VK_IMAGE_TILING_LINEAR IMAGE_TILING_LINEAR}, {@code dstImage} must have been created with a {@code format} that supports being a color attachment, as specified by the {@link #VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT FORMAT_FEATURE_COLOR_ATTACHMENT_BIT} flag in {@link VkFormatProperties}{@code ::linearTilingFeatures} returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • If {@code dstImage} was created with {@code tiling} equal to {@link #VK_IMAGE_TILING_OPTIMAL IMAGE_TILING_OPTIMAL}, {@code dstImage} must have been created with a {@code format} that supports being a color attachment, as specified by the {@link #VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT FORMAT_FEATURE_COLOR_ATTACHMENT_BIT} flag in {@link VkFormatProperties}{@code ::optimalTilingFeatures} returned by {@link #vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties}
  • *
  • {@code srcImage} and {@code dstImage} must have been created with the same image format
  • *
  • The {@code srcSubresource.mipLevel} member of each element of {@code pRegions} must be less than the {@code mipLevels} specified in {@link VkImageCreateInfo} when {@code srcImage} was created
  • *
  • The {@code dstSubresource.mipLevel} member of each element of {@code pRegions} must be less than the {@code mipLevels} specified in {@link VkImageCreateInfo} when {@code dstImage} was created
  • *
  • The srcSubresource.baseArrayLayer srcSubresource.layerCount of each element of {@code pRegions} must be less than or equal to the {@code arrayLayers} specified in {@link VkImageCreateInfo} when {@code srcImage} was created
  • *
  • The dstSubresource.baseArrayLayer dstSubresource.layerCount of each element of {@code pRegions} must be less than or equal to the {@code arrayLayers} specified in {@link VkImageCreateInfo} when {@code dstImage} was created
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code srcImage} must be a valid {@code VkImage} handle
  • *
  • {@code srcImageLayout} must be a valid {@code VkImageLayout} value
  • *
  • {@code dstImage} must be a valid {@code VkImage} handle
  • *
  • {@code dstImageLayout} must be a valid {@code VkImageLayout} value
  • *
  • {@code pRegions} must be a valid pointer to an array of {@code regionCount} valid {@link VkImageResolve} structures
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • {@code regionCount} must be greater than 0
  • *
  • Each of {@code commandBuffer}, {@code dstImage}, and {@code srcImage} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideGraphicsTransfer
* *
See Also
* *

{@link VkImageResolve}

* * @param commandBuffer the command buffer into which the command will be recorded. * @param srcImage the source image. * @param srcImageLayout the layout of the source image subresources for the resolve. * @param dstImage the destination image. * @param dstImageLayout the layout of the destination image subresources for the resolve. */ public static void vkCmdResolveImage(VkCommandBuffer commandBuffer, @NativeType("VkImage") long srcImage, @NativeType("VkImageLayout") int srcImageLayout, @NativeType("VkImage") long dstImage, @NativeType("VkImageLayout") int dstImageLayout, @NativeType("const VkImageResolve *") VkImageResolve pRegion) { nvkCmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, 1, pRegion.address()); } // --- [ vkCmdSetEvent ] --- /** * Set an event object to signaled state. * *
C Specification
* *

To set the state of an event to signaled from a device, call:

* *
     * void vkCmdSetEvent(
     *     VkCommandBuffer                             commandBuffer,
     *     VkEvent                                     event,
     *     VkPipelineStageFlags                        stageMask);
* *
Description
* *

When {@link #vkCmdSetEvent CmdSetEvent} is submitted to a queue, it defines an execution dependency on commands that were submitted before it, and defines an event signal operation which sets the event to the signaled state.

* *

The first synchronization scope includes every command previously submitted to the same queue, including those in the same command buffer and batch. The synchronization scope is limited to operations on the pipeline stages determined by the source stage mask specified by {@code stageMask}.

* *

The second synchronization scope includes only the event signal operation.

* *

If {@code event} is already in the signaled state when {@link #vkCmdSetEvent CmdSetEvent} is executed on the device, then {@link #vkCmdSetEvent CmdSetEvent} has no effect, no event signal operation occurs, and no execution dependency is generated.

* *
Valid Usage
* *
    *
  • {@code stageMask} must not include {@link #VK_PIPELINE_STAGE_HOST_BIT PIPELINE_STAGE_HOST_BIT}
  • *
  • If the geometry shaders feature is not enabled, {@code stageMask} must not contain {@link #VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT PIPELINE_STAGE_GEOMETRY_SHADER_BIT}
  • *
  • If the tessellation shaders feature is not enabled, {@code stageMask} must not contain {@link #VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT} or {@link #VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT}
  • *
  • {@code commandBuffer}’s current device mask must include exactly one physical device.
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code event} must be a valid {@code VkEvent} handle
  • *
  • {@code stageMask} must be a valid combination of {@code VkPipelineStageFlagBits} values
  • *
  • {@code stageMask} must not be 0
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics, or compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • Both of {@code commandBuffer}, and {@code event} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideGraphics Compute
* * @param commandBuffer the command buffer into which the command is recorded. * @param event the event that will be signaled. * @param stageMask specifies the source stage mask used to determine when the {@code event} is signaled. */ public static void vkCmdSetEvent(VkCommandBuffer commandBuffer, @NativeType("VkEvent") long event, @NativeType("VkPipelineStageFlags") int stageMask) { long __functionAddress = commandBuffer.getCapabilities().vkCmdSetEvent; callPJV(__functionAddress, commandBuffer.address(), event, stageMask); } // --- [ vkCmdResetEvent ] --- /** * Reset an event object to non-signaled state. * *
C Specification
* *

To set the state of an event to unsignaled from a device, call:

* *
     * void vkCmdResetEvent(
     *     VkCommandBuffer                             commandBuffer,
     *     VkEvent                                     event,
     *     VkPipelineStageFlags                        stageMask);
* *
Description
* *

When {@link #vkCmdResetEvent CmdResetEvent} is submitted to a queue, it defines an execution dependency on commands that were submitted before it, and defines an event unsignal operation which resets the event to the unsignaled state.

* *

The first synchronization scope includes every command previously submitted to the same queue, including those in the same command buffer and batch. The synchronization scope is limited to operations on the pipeline stages determined by the source stage mask specified by {@code stageMask}.

* *

The second synchronization scope includes only the event unsignal operation.

* *

If {@code event} is already in the unsignaled state when {@link #vkCmdResetEvent CmdResetEvent} is executed on the device, then {@link #vkCmdResetEvent CmdResetEvent} has no effect, no event unsignal operation occurs, and no execution dependency is generated.

* *
Valid Usage
* *
    *
  • {@code stageMask} must not include {@link #VK_PIPELINE_STAGE_HOST_BIT PIPELINE_STAGE_HOST_BIT}
  • *
  • If the geometry shaders feature is not enabled, {@code stageMask} must not contain {@link #VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT PIPELINE_STAGE_GEOMETRY_SHADER_BIT}
  • *
  • If the tessellation shaders feature is not enabled, {@code stageMask} must not contain {@link #VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT} or {@link #VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT}
  • *
  • When this command executes, {@code event} must not be waited on by a {@link #vkCmdWaitEvents CmdWaitEvents} command that is currently executing
  • *
  • {@code commandBuffer}’s current device mask must include exactly one physical device.
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code event} must be a valid {@code VkEvent} handle
  • *
  • {@code stageMask} must be a valid combination of {@code VkPipelineStageFlagBits} values
  • *
  • {@code stageMask} must not be 0
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics, or compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • Both of {@code commandBuffer}, and {@code event} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideGraphics Compute
* * @param commandBuffer the command buffer into which the command is recorded. * @param event the event that will be unsignaled. * @param stageMask a bitmask of {@code VkPipelineStageFlagBits} specifying the source stage mask used to determine when the {@code event} is unsignaled. */ public static void vkCmdResetEvent(VkCommandBuffer commandBuffer, @NativeType("VkEvent") long event, @NativeType("VkPipelineStageFlags") int stageMask) { long __functionAddress = commandBuffer.getCapabilities().vkCmdResetEvent; callPJV(__functionAddress, commandBuffer.address(), event, stageMask); } // --- [ vkCmdWaitEvents ] --- /** * Unsafe version of: {@link #vkCmdWaitEvents CmdWaitEvents} * * @param eventCount the length of the {@code pEvents} array. * @param memoryBarrierCount the length of the {@code pMemoryBarriers} array. * @param bufferMemoryBarrierCount the length of the {@code pBufferMemoryBarriers} array. * @param imageMemoryBarrierCount the length of the {@code pImageMemoryBarriers} array. */ public static void nvkCmdWaitEvents(VkCommandBuffer commandBuffer, int eventCount, long pEvents, int srcStageMask, int dstStageMask, int memoryBarrierCount, long pMemoryBarriers, int bufferMemoryBarrierCount, long pBufferMemoryBarriers, int imageMemoryBarrierCount, long pImageMemoryBarriers) { long __functionAddress = commandBuffer.getCapabilities().vkCmdWaitEvents; callPPPPPV(__functionAddress, commandBuffer.address(), eventCount, pEvents, srcStageMask, dstStageMask, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); } /** * Wait for one or more events and insert a set of memory. * *
C Specification
* *

To wait for one or more events to enter the signaled state on a device, call:

* *
     * void vkCmdWaitEvents(
     *     VkCommandBuffer                             commandBuffer,
     *     uint32_t                                    eventCount,
     *     const VkEvent*                              pEvents,
     *     VkPipelineStageFlags                        srcStageMask,
     *     VkPipelineStageFlags                        dstStageMask,
     *     uint32_t                                    memoryBarrierCount,
     *     const VkMemoryBarrier*                      pMemoryBarriers,
     *     uint32_t                                    bufferMemoryBarrierCount,
     *     const VkBufferMemoryBarrier*                pBufferMemoryBarriers,
     *     uint32_t                                    imageMemoryBarrierCount,
     *     const VkImageMemoryBarrier*                 pImageMemoryBarriers);
* *
Description
* *

When {@link #vkCmdWaitEvents CmdWaitEvents} is submitted to a queue, it defines a memory dependency between prior event signal operations on the same queue or the host, and subsequent commands. {@link #vkCmdWaitEvents CmdWaitEvents} must not be used to wait on event signal operations occuring on other queues.

* *

The first synchronization scope only includes event signal operations that operate on members of {@code pEvents}, and the operations that happened-before the event signal operations. Event signal operations performed by {@link #vkCmdSetEvent CmdSetEvent} that were previously submitted to the same queue are included in the first synchronization scope, if the logically latest pipeline stage in their {@code stageMask} parameter is logically earlier than or equal to the logically latest pipeline stage in {@code srcStageMask}. Event signal operations performed by {@link #vkSetEvent SetEvent} are only included in the first synchronization scope if {@link #VK_PIPELINE_STAGE_HOST_BIT PIPELINE_STAGE_HOST_BIT} is included in {@code srcStageMask}.

* *

The second synchronization scope includes commands subsequently submitted to the same queue, including those in the same command buffer and batch. The second synchronization scope is limited to operations on the pipeline stages determined by the destination stage mask specified by {@code dstStageMask}.

* *

The first access scope is limited to access in the pipeline stages determined by the source stage mask specified by {@code srcStageMask}. Within that, the first access scope only includes the first access scopes defined by elements of the {@code pMemoryBarriers}, {@code pBufferMemoryBarriers} and {@code pImageMemoryBarriers} arrays, which each define a set of memory barriers. If no memory barriers are specified, then the first access scope includes no accesses.

* *

The second access scope is limited to access in the pipeline stages determined by the destination stage mask specified by {@code dstStageMask}. Within that, the second access scope only includes the second access scopes defined by elements of the {@code pMemoryBarriers}, {@code pBufferMemoryBarriers} and {@code pImageMemoryBarriers} arrays, which each define a set of memory barriers. If no memory barriers are specified, then the second access scope includes no accesses.

* *
Note
* *

{@link #vkCmdWaitEvents CmdWaitEvents} is used with {@link #vkCmdSetEvent CmdSetEvent} to define a memory dependency between two sets of action commands, roughly in the same way as pipeline barriers, but split into two commands such that work between the two may execute unhindered.

*
* *
Note
* *

Applications should be careful to avoid race conditions when using events. There is no direct ordering guarantee between a {@link #vkCmdResetEvent CmdResetEvent} command and a {@link #vkCmdWaitEvents CmdWaitEvents} command submitted after it, so some other execution dependency must be included between these commands (e.g. a semaphore).

*
* *
Valid Usage
* *
    *
  • {@code srcStageMask} must be the bitwise OR of the {@code stageMask} parameter used in previous calls to {@link #vkCmdSetEvent CmdSetEvent} with any of the members of {@code pEvents} and {@link #VK_PIPELINE_STAGE_HOST_BIT PIPELINE_STAGE_HOST_BIT} if any of the members of {@code pEvents} was set using {@link #vkSetEvent SetEvent}
  • *
  • If the geometry shaders feature is not enabled, {@code srcStageMask} must not contain {@link #VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT PIPELINE_STAGE_GEOMETRY_SHADER_BIT}
  • *
  • If the geometry shaders feature is not enabled, {@code dstStageMask} must not contain {@link #VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT PIPELINE_STAGE_GEOMETRY_SHADER_BIT}
  • *
  • If the tessellation shaders feature is not enabled, {@code srcStageMask} must not contain {@link #VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT} or {@link #VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT}
  • *
  • If the tessellation shaders feature is not enabled, {@code dstStageMask} must not contain {@link #VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT} or {@link #VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT}
  • *
  • If {@code pEvents} includes one or more events that will be signaled by {@link #vkSetEvent SetEvent} after {@code commandBuffer} has been submitted to a queue, then {@link #vkCmdWaitEvents CmdWaitEvents} must not be called inside a render pass instance
  • *
  • Any pipeline stage included in {@code srcStageMask} or {@code dstStageMask} must be supported by the capabilities of the queue family specified by the {@code queueFamilyIndex} member of the {@link VkCommandPoolCreateInfo} structure that was used to create the {@code VkCommandPool} that {@code commandBuffer} was allocated from, as specified in the table of supported pipeline stages.
  • *
  • Each element of {@code pMemoryBarriers}, {@code pBufferMemoryBarriers} or {@code pImageMemoryBarriers} must not have any access flag included in its {@code srcAccessMask} member if that bit is not supported by any of the pipeline stages in {@code srcStageMask}, as specified in the table of supported access types.
  • *
  • Each element of {@code pMemoryBarriers}, {@code pBufferMemoryBarriers} or {@code pImageMemoryBarriers} must not have any access flag included in its {@code dstAccessMask} member if that bit is not supported by any of the pipeline stages in {@code dstStageMask}, as specified in the table of supported access types.
  • *
  • {@code commandBuffer}’s current device mask must include exactly one physical device.
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code pEvents} must be a valid pointer to an array of {@code eventCount} valid {@code VkEvent} handles
  • *
  • {@code srcStageMask} must be a valid combination of {@code VkPipelineStageFlagBits} values
  • *
  • {@code srcStageMask} must not be 0
  • *
  • {@code dstStageMask} must be a valid combination of {@code VkPipelineStageFlagBits} values
  • *
  • {@code dstStageMask} must not be 0
  • *
  • If {@code memoryBarrierCount} is not 0, {@code pMemoryBarriers} must be a valid pointer to an array of {@code memoryBarrierCount} valid {@link VkMemoryBarrier} structures
  • *
  • If {@code bufferMemoryBarrierCount} is not 0, {@code pBufferMemoryBarriers} must be a valid pointer to an array of {@code bufferMemoryBarrierCount} valid {@link VkBufferMemoryBarrier} structures
  • *
  • If {@code imageMemoryBarrierCount} is not 0, {@code pImageMemoryBarriers} must be a valid pointer to an array of {@code imageMemoryBarrierCount} valid {@link VkImageMemoryBarrier} structures
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics, or compute operations
  • *
  • {@code eventCount} must be greater than 0
  • *
  • Both of {@code commandBuffer}, and the elements of {@code pEvents} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics Compute
* *
See Also
* *

{@link VkBufferMemoryBarrier}, {@link VkImageMemoryBarrier}, {@link VkMemoryBarrier}

* * @param commandBuffer the command buffer into which the command is recorded. * @param pEvents an array of event object handles to wait on. * @param srcStageMask a bitmask of {@code VkPipelineStageFlagBits} specifying the source stage mask. * @param dstStageMask a bitmask of {@code VkPipelineStageFlagBits} specifying the destination stage mask. * @param pMemoryBarriers a pointer to an array of {@link VkMemoryBarrier} structures. * @param pBufferMemoryBarriers a pointer to an array of {@link VkBufferMemoryBarrier} structures. * @param pImageMemoryBarriers a pointer to an array of {@link VkImageMemoryBarrier} structures. */ public static void vkCmdWaitEvents(VkCommandBuffer commandBuffer, @NativeType("const VkEvent *") LongBuffer pEvents, @NativeType("VkPipelineStageFlags") int srcStageMask, @NativeType("VkPipelineStageFlags") int dstStageMask, @Nullable @NativeType("const VkMemoryBarrier *") VkMemoryBarrier.Buffer pMemoryBarriers, @Nullable @NativeType("const VkBufferMemoryBarrier *") VkBufferMemoryBarrier.Buffer pBufferMemoryBarriers, @Nullable @NativeType("const VkImageMemoryBarrier *") VkImageMemoryBarrier.Buffer pImageMemoryBarriers) { nvkCmdWaitEvents(commandBuffer, pEvents.remaining(), memAddress(pEvents), srcStageMask, dstStageMask, remainingSafe(pMemoryBarriers), memAddressSafe(pMemoryBarriers), remainingSafe(pBufferMemoryBarriers), memAddressSafe(pBufferMemoryBarriers), remainingSafe(pImageMemoryBarriers), memAddressSafe(pImageMemoryBarriers)); } // --- [ vkCmdPipelineBarrier ] --- /** * Unsafe version of: {@link #vkCmdPipelineBarrier CmdPipelineBarrier} * * @param memoryBarrierCount the length of the {@code pMemoryBarriers} array. * @param bufferMemoryBarrierCount the length of the {@code pBufferMemoryBarriers} array. * @param imageMemoryBarrierCount the length of the {@code pImageMemoryBarriers} array. */ public static void nvkCmdPipelineBarrier(VkCommandBuffer commandBuffer, int srcStageMask, int dstStageMask, int dependencyFlags, int memoryBarrierCount, long pMemoryBarriers, int bufferMemoryBarrierCount, long pBufferMemoryBarriers, int imageMemoryBarrierCount, long pImageMemoryBarriers) { long __functionAddress = commandBuffer.getCapabilities().vkCmdPipelineBarrier; callPPPPV(__functionAddress, commandBuffer.address(), srcStageMask, dstStageMask, dependencyFlags, memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount, pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers); } /** * Insert a memory dependency. * *
C Specification
* *

To record a pipeline barrier, call:

* *
     * void vkCmdPipelineBarrier(
     *     VkCommandBuffer                             commandBuffer,
     *     VkPipelineStageFlags                        srcStageMask,
     *     VkPipelineStageFlags                        dstStageMask,
     *     VkDependencyFlags                           dependencyFlags,
     *     uint32_t                                    memoryBarrierCount,
     *     const VkMemoryBarrier*                      pMemoryBarriers,
     *     uint32_t                                    bufferMemoryBarrierCount,
     *     const VkBufferMemoryBarrier*                pBufferMemoryBarriers,
     *     uint32_t                                    imageMemoryBarrierCount,
     *     const VkImageMemoryBarrier*                 pImageMemoryBarriers);
* *
Description
* *

When {@link #vkCmdPipelineBarrier CmdPipelineBarrier} is submitted to a queue, it defines a memory dependency between commands that were submitted before it, and those submitted after it.

* *

If {@link #vkCmdPipelineBarrier CmdPipelineBarrier} was recorded outside a render pass instance, the first synchronization scope includes every command submitted to the same queue before it, including those in the same command buffer and batch. If {@link #vkCmdPipelineBarrier CmdPipelineBarrier} was recorded inside a render pass instance, the first synchronization scope includes only commands submitted before it within the same subpass. In either case, the first synchronization scope is limited to operations on the pipeline stages determined by the source stage mask specified by {@code srcStageMask}.

* *

If {@link #vkCmdPipelineBarrier CmdPipelineBarrier} was recorded outside a render pass instance, the second synchronization scope includes every command submitted to the same queue after it, including those in the same command buffer and batch. If {@link #vkCmdPipelineBarrier CmdPipelineBarrier} was recorded inside a render pass instance, the second synchronization scope includes only commands submitted after it within the same subpass. In either case, the second synchronization scope is limited to operations on the pipeline stages determined by the destination stage mask specified by {@code dstStageMask}.

* *

The first access scope is limited to access in the pipeline stages determined by the source stage mask specified by {@code srcStageMask}. Within that, the first access scope only includes the first access scopes defined by elements of the {@code pMemoryBarriers}, {@code pBufferMemoryBarriers} and {@code pImageMemoryBarriers} arrays, which each define a set of memory barriers. If no memory barriers are specified, then the first access scope includes no accesses.

* *

The second access scope is limited to access in the pipeline stages determined by the destination stage mask specified by {@code dstStageMask}. Within that, the second access scope only includes the second access scopes defined by elements of the {@code pMemoryBarriers}, {@code pBufferMemoryBarriers} and {@code pImageMemoryBarriers} arrays, which each define a set of memory barriers. If no memory barriers are specified, then the second access scope includes no accesses.

* *

If {@code dependencyFlags} includes {@link #VK_DEPENDENCY_BY_REGION_BIT DEPENDENCY_BY_REGION_BIT}, then any dependency between framebuffer-space pipeline stages is framebuffer-local - otherwise it is framebuffer-global.

* *
Valid Usage
* *
    *
  • If the geometry shaders feature is not enabled, {@code srcStageMask} must not contain {@link #VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT PIPELINE_STAGE_GEOMETRY_SHADER_BIT}
  • *
  • If the geometry shaders feature is not enabled, {@code dstStageMask} must not contain {@link #VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT PIPELINE_STAGE_GEOMETRY_SHADER_BIT}
  • *
  • If the tessellation shaders feature is not enabled, {@code srcStageMask} must not contain {@link #VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT} or {@link #VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT}
  • *
  • If the tessellation shaders feature is not enabled, {@code dstStageMask} must not contain {@link #VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT} or {@link #VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT}
  • *
  • If {@link #vkCmdPipelineBarrier CmdPipelineBarrier} is called within a render pass instance, the render pass must have been created with a {@link VkSubpassDependency} instance in {@code pDependencies} that expresses a dependency from the current subpass to itself.
  • *
  • If {@link #vkCmdPipelineBarrier CmdPipelineBarrier} is called within a render pass instance, {@code srcStageMask} must contain a subset of the bit values in the {@code srcStageMask} member of that instance of {@link VkSubpassDependency}
  • *
  • If {@link #vkCmdPipelineBarrier CmdPipelineBarrier} is called within a render pass instance, {@code dstStageMask} must contain a subset of the bit values in the {@code dstStageMask} member of that instance of {@link VkSubpassDependency}
  • *
  • If {@link #vkCmdPipelineBarrier CmdPipelineBarrier} is called within a render pass instance, the {@code srcAccessMask} of any element of {@code pMemoryBarriers} or {@code pImageMemoryBarriers} must contain a subset of the bit values the {@code srcAccessMask} member of that instance of {@link VkSubpassDependency}
  • *
  • If {@link #vkCmdPipelineBarrier CmdPipelineBarrier} is called within a render pass instance, the {@code dstAccessMask} of any element of {@code pMemoryBarriers} or {@code pImageMemoryBarriers} must contain a subset of the bit values the {@code dstAccessMask} member of that instance of {@link VkSubpassDependency}
  • *
  • If {@link #vkCmdPipelineBarrier CmdPipelineBarrier} is called within a render pass instance, {@code dependencyFlags} must be equal to the {@code dependencyFlags} member of that instance of {@link VkSubpassDependency}
  • *
  • If {@link #vkCmdPipelineBarrier CmdPipelineBarrier} is called within a render pass instance, {@code bufferMemoryBarrierCount} must be 0
  • *
  • If {@link #vkCmdPipelineBarrier CmdPipelineBarrier} is called within a render pass instance, the {@code image} member of any element of {@code pImageMemoryBarriers} must be equal to one of the elements of {@code pAttachments} that the current {@code framebuffer} was created with, that is also referred to by one of the elements of the {@code pColorAttachments}, {@code pResolveAttachments} or {@code pDepthStencilAttachment} members of the {@link VkSubpassDescription} instance that the current subpass was created with
  • *
  • If {@link #vkCmdPipelineBarrier CmdPipelineBarrier} is called within a render pass instance, the {@code oldLayout} and {@code newLayout} members of any element of {@code pImageMemoryBarriers} must be equal to the {@code layout} member of an element of the {@code pColorAttachments}, {@code pResolveAttachments} or {@code pDepthStencilAttachment} members of the {@link VkSubpassDescription} instance that the current subpass was created with, that refers to the same {@code image}
  • *
  • If {@link #vkCmdPipelineBarrier CmdPipelineBarrier} is called within a render pass instance, the {@code oldLayout} and {@code newLayout} members of an element of {@code pImageMemoryBarriers} must be equal
  • *
  • If {@link #vkCmdPipelineBarrier CmdPipelineBarrier} is called within a render pass instance, the {@code srcQueueFamilyIndex} and {@code dstQueueFamilyIndex} members of any element of {@code pImageMemoryBarriers} must be {@link #VK_QUEUE_FAMILY_IGNORED QUEUE_FAMILY_IGNORED}
  • *
  • Any pipeline stage included in {@code srcStageMask} or {@code dstStageMask} must be supported by the capabilities of the queue family specified by the {@code queueFamilyIndex} member of the {@link VkCommandPoolCreateInfo} structure that was used to create the {@code VkCommandPool} that {@code commandBuffer} was allocated from, as specified in the table of supported pipeline stages.
  • *
  • Each element of {@code pMemoryBarriers}, {@code pBufferMemoryBarriers} and {@code pImageMemoryBarriers} must not have any access flag included in its {@code srcAccessMask} member if that bit is not supported by any of the pipeline stages in {@code srcStageMask}, as specified in the table of supported access types.
  • *
  • Each element of {@code pMemoryBarriers}, {@code pBufferMemoryBarriers} and {@code pImageMemoryBarriers} must not have any access flag included in its {@code dstAccessMask} member if that bit is not supported by any of the pipeline stages in {@code dstStageMask}, as specified in the table of supported access types.
  • *
  • If {@link #vkCmdPipelineBarrier CmdPipelineBarrier} is called outside of a render pass instance, {@code dependencyFlags} must not include {@link KHXMultiview#VK_DEPENDENCY_VIEW_LOCAL_BIT_KHX DEPENDENCY_VIEW_LOCAL_BIT_KHX}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code srcStageMask} must be a valid combination of {@code VkPipelineStageFlagBits} values
  • *
  • {@code srcStageMask} must not be 0
  • *
  • {@code dstStageMask} must be a valid combination of {@code VkPipelineStageFlagBits} values
  • *
  • {@code dstStageMask} must not be 0
  • *
  • {@code dependencyFlags} must be a valid combination of {@code VkDependencyFlagBits} values
  • *
  • If {@code memoryBarrierCount} is not 0, {@code pMemoryBarriers} must be a valid pointer to an array of {@code memoryBarrierCount} valid {@link VkMemoryBarrier} structures
  • *
  • If {@code bufferMemoryBarrierCount} is not 0, {@code pBufferMemoryBarriers} must be a valid pointer to an array of {@code bufferMemoryBarrierCount} valid {@link VkBufferMemoryBarrier} structures
  • *
  • If {@code imageMemoryBarrierCount} is not 0, {@code pImageMemoryBarriers} must be a valid pointer to an array of {@code imageMemoryBarrierCount} valid {@link VkImageMemoryBarrier} structures
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support transfer, graphics, or compute operations
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothTransfer Graphics Compute
* *
See Also
* *

{@link VkBufferMemoryBarrier}, {@link VkImageMemoryBarrier}, {@link VkMemoryBarrier}

* * @param commandBuffer the command buffer into which the command is recorded. * @param srcStageMask a bitmask of {@code VkPipelineStageFlagBits} specifying the source stage mask. * @param dstStageMask a bitmask of {@code VkPipelineStageFlagBits} specifying the destination stage mask. * @param dependencyFlags a bitmask of {@code VkDependencyFlagBits} specifying how execution and memory dependencies are formed. * @param pMemoryBarriers a pointer to an array of {@link VkMemoryBarrier} structures. * @param pBufferMemoryBarriers a pointer to an array of {@link VkBufferMemoryBarrier} structures. * @param pImageMemoryBarriers a pointer to an array of {@link VkImageMemoryBarrier} structures. */ public static void vkCmdPipelineBarrier(VkCommandBuffer commandBuffer, @NativeType("VkPipelineStageFlags") int srcStageMask, @NativeType("VkPipelineStageFlags") int dstStageMask, @NativeType("VkDependencyFlags") int dependencyFlags, @Nullable @NativeType("const VkMemoryBarrier *") VkMemoryBarrier.Buffer pMemoryBarriers, @Nullable @NativeType("const VkBufferMemoryBarrier *") VkBufferMemoryBarrier.Buffer pBufferMemoryBarriers, @Nullable @NativeType("const VkImageMemoryBarrier *") VkImageMemoryBarrier.Buffer pImageMemoryBarriers) { nvkCmdPipelineBarrier(commandBuffer, srcStageMask, dstStageMask, dependencyFlags, remainingSafe(pMemoryBarriers), memAddressSafe(pMemoryBarriers), remainingSafe(pBufferMemoryBarriers), memAddressSafe(pBufferMemoryBarriers), remainingSafe(pImageMemoryBarriers), memAddressSafe(pImageMemoryBarriers)); } // --- [ vkCmdBeginQuery ] --- /** * Begin a query. * *
C Specification
* *

To begin a query, call:

* *
     * void vkCmdBeginQuery(
     *     VkCommandBuffer                             commandBuffer,
     *     VkQueryPool                                 queryPool,
     *     uint32_t                                    query,
     *     VkQueryControlFlags                         flags);
* *
Description
* *

If the {@code queryType} of the pool is {@link #VK_QUERY_TYPE_OCCLUSION QUERY_TYPE_OCCLUSION} and {@code flags} contains {@link #VK_QUERY_CONTROL_PRECISE_BIT QUERY_CONTROL_PRECISE_BIT}, an implementation must return a result that matches the actual number of samples passed. This is described in more detail in Occlusion Queries.

* *

After beginning a query, that query is considered active within the command buffer it was called in until that same query is ended. Queries active in a primary command buffer when secondary command buffers are executed are considered active for those secondary command buffers.

* *
Valid Usage
* *
    *
  • The query identified by {@code queryPool} and {@code query} must currently not be active
  • *
  • The query identified by {@code queryPool} and {@code query} must be unavailable
  • *
  • If the precise occlusion queries feature is not enabled, or the {@code queryType} used to create {@code queryPool} was not {@link #VK_QUERY_TYPE_OCCLUSION QUERY_TYPE_OCCLUSION}, {@code flags} must not contain {@link #VK_QUERY_CONTROL_PRECISE_BIT QUERY_CONTROL_PRECISE_BIT}
  • *
  • {@code queryPool} must have been created with a {@code queryType} that differs from that of any other queries that have been made active, and are currently still active within {@code commandBuffer}
  • *
  • {@code query} must be less than the number of queries in {@code queryPool}
  • *
  • If the {@code queryType} used to create {@code queryPool} was {@link #VK_QUERY_TYPE_OCCLUSION QUERY_TYPE_OCCLUSION}, the {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
  • If the {@code queryType} used to create {@code queryPool} was {@link #VK_QUERY_TYPE_PIPELINE_STATISTICS QUERY_TYPE_PIPELINE_STATISTICS} and any of the {@code pipelineStatistics} indicate graphics operations, the {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
  • If the {@code queryType} used to create {@code queryPool} was {@link #VK_QUERY_TYPE_PIPELINE_STATISTICS QUERY_TYPE_PIPELINE_STATISTICS} and any of the {@code pipelineStatistics} indicate compute operations, the {@code VkCommandPool} that {@code commandBuffer} was allocated from must support compute operations
  • *
  • All queries used by the command must not be active
  • *
  • All queries used by the command must be unavailable
  • *
  • If {@link #vkCmdBeginQuery CmdBeginQuery} is called within a render pass instance, the sum of {@code query} and the number of bits set in the current subpass’s view mask must be less than or equal to the number of queries in {@code queryPool}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code queryPool} must be a valid {@code VkQueryPool} handle
  • *
  • {@code flags} must be a valid combination of {@code VkQueryControlFlagBits} values
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics, or compute operations
  • *
  • Both of {@code commandBuffer}, and {@code queryPool} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics Compute
* * @param commandBuffer the command buffer into which this command will be recorded. * @param queryPool the query pool that will manage the results of the query. * @param query the query index within the query pool that will contain the results. * @param flags a bitmask of {@code VkQueryControlFlagBits} specifying constraints on the types of queries that can be performed. */ public static void vkCmdBeginQuery(VkCommandBuffer commandBuffer, @NativeType("VkQueryPool") long queryPool, @NativeType("uint32_t") int query, @NativeType("VkQueryControlFlags") int flags) { long __functionAddress = commandBuffer.getCapabilities().vkCmdBeginQuery; callPJV(__functionAddress, commandBuffer.address(), queryPool, query, flags); } // --- [ vkCmdEndQuery ] --- /** * Ends a query. * *
C Specification
* *

To end a query after the set of desired draw or dispatch commands is executed, call:

* *
     * void vkCmdEndQuery(
     *     VkCommandBuffer                             commandBuffer,
     *     VkQueryPool                                 queryPool,
     *     uint32_t                                    query);
* *
Description
* *

As queries operate asynchronously, ending a query does not immediately set the query's status to available. A query is considered finished when the final results of the query are ready to be retrieved by {@link #vkGetQueryPoolResults GetQueryPoolResults} and {@link #vkCmdCopyQueryPoolResults CmdCopyQueryPoolResults}, and this is when the query's status is set to available.

* *

Once a query is ended the query must finish in finite time, unless the state of the query is changed using other commands, e.g. by issuing a reset of the query.

* *
Valid Usage
* *
    *
  • The query identified by {@code queryPool} and {@code query} must currently be active
  • *
  • {@code query} must be less than the number of queries in {@code queryPool}
  • *
  • All queries used by the command must be active
  • *
  • If {@link #vkCmdEndQuery CmdEndQuery} is called within a render pass instance, the sum of {@code query} and the number of bits set in the current subpass’s view mask must be less than or equal to the number of queries in {@code queryPool}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code queryPool} must be a valid {@code VkQueryPool} handle
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics, or compute operations
  • *
  • Both of {@code commandBuffer}, and {@code queryPool} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics Compute
* * @param commandBuffer the command buffer into which this command will be recorded. * @param queryPool the query pool that is managing the results of the query. * @param query the query index within the query pool where the result is stored. */ public static void vkCmdEndQuery(VkCommandBuffer commandBuffer, @NativeType("VkQueryPool") long queryPool, @NativeType("uint32_t") int query) { long __functionAddress = commandBuffer.getCapabilities().vkCmdEndQuery; callPJV(__functionAddress, commandBuffer.address(), queryPool, query); } // --- [ vkCmdResetQueryPool ] --- /** * Reset queries in a query pool. * *
C Specification
* *

To reset a range of queries in a query pool, call:

* *
     * void vkCmdResetQueryPool(
     *     VkCommandBuffer                             commandBuffer,
     *     VkQueryPool                                 queryPool,
     *     uint32_t                                    firstQuery,
     *     uint32_t                                    queryCount);
* *
Description
* *

When executed on a queue, this command sets the status of query indices [firstQuery, firstQuery + queryCount - 1] to unavailable.

* *
Valid Usage
* *
    *
  • {@code firstQuery} must be less than the number of queries in {@code queryPool}
  • *
  • The sum of {@code firstQuery} and {@code queryCount} must be less than or equal to the number of queries in {@code queryPool}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code queryPool} must be a valid {@code VkQueryPool} handle
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics, or compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • Both of {@code commandBuffer}, and {@code queryPool} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideGraphics Compute
* * @param commandBuffer the command buffer into which this command will be recorded. * @param queryPool the handle of the query pool managing the queries being reset. * @param firstQuery the initial query index to reset. * @param queryCount the number of queries to reset. */ public static void vkCmdResetQueryPool(VkCommandBuffer commandBuffer, @NativeType("VkQueryPool") long queryPool, @NativeType("uint32_t") int firstQuery, @NativeType("uint32_t") int queryCount) { long __functionAddress = commandBuffer.getCapabilities().vkCmdResetQueryPool; callPJV(__functionAddress, commandBuffer.address(), queryPool, firstQuery, queryCount); } // --- [ vkCmdWriteTimestamp ] --- /** * Write a device timestamp into a query object. * *
C Specification
* *

To request a timestamp, call:

* *
     * void vkCmdWriteTimestamp(
     *     VkCommandBuffer                             commandBuffer,
     *     VkPipelineStageFlagBits                     pipelineStage,
     *     VkQueryPool                                 queryPool,
     *     uint32_t                                    query);
* *
Description
* *

{@link #vkCmdWriteTimestamp CmdWriteTimestamp} latches the value of the timer when all previous commands have completed executing as far as the specified pipeline stage, and writes the timestamp value to memory. When the timestamp value is written, the availability status of the query is set to available.

* *
Note
* *

If an implementation is unable to detect completion and latch the timer at any specific stage of the pipeline, it may instead do so at any logically later stage.

*
* *

{@link #vkCmdCopyQueryPoolResults CmdCopyQueryPoolResults} can then be called to copy the timestamp value from the query pool into buffer memory, with ordering and synchronization behavior equivalent to how other queries operate. Timestamp values can also be retrieved from the query pool using {@link #vkGetQueryPoolResults GetQueryPoolResults}. As with other queries, the query must be reset using {@link #vkCmdResetQueryPool CmdResetQueryPool} before requesting the timestamp value be written to it.

* *

While {@link #vkCmdWriteTimestamp CmdWriteTimestamp} can be called inside or outside of a render pass instance, {@link #vkCmdCopyQueryPoolResults CmdCopyQueryPoolResults} must only be called outside of a render pass instance.

* *

Timestamps may only be meaningfully compared if they are written by commands submitted to the same queue.

* *
Note
* *

An example of such a comparison is determining the execution time of a sequence of commands.

*
* *

If {@link #vkCmdWriteTimestamp CmdWriteTimestamp} is called while executing a render pass instance that has multiview enabled, the timestamp uses N consecutive query indices in the query pool (starting at {@code query}) where N is the number of bits set in the view mask of the subpass the command is executed in. The resulting query values are determined by an implementation-dependent choice of one of the following behaviors:

* *
    *
  • The first query is a timestamp value and (if more than one bit is set in the view mask) zero is written to the remaining queries. If two timestamps are written in the same subpass, the sum of the execution time of all views between those commands is the difference between the first query written by each command.
  • *
  • All N queries are timestamp values. If two timestamps are written in the same subpass, the sum of the execution time of all views between those commands is the sum of the difference between corresponding queries written by each command. The difference between corresponding queries may be the execution time of a single view.
  • *
* *

In either case, the application can sum the differences between all N queries to determine the total execution time.

* *
Valid Usage
* *
    *
  • {@code queryPool} must have been created with a {@code queryType} of {@link #VK_QUERY_TYPE_TIMESTAMP QUERY_TYPE_TIMESTAMP}
  • *
  • The query identified by {@code queryPool} and {@code query} must be unavailable
  • *
  • The command pool’s queue family must support a non-zero {@code timestampValidBits}
  • *
  • All queries used by the command must be unavailable
  • *
  • If {@link #vkCmdWriteTimestamp CmdWriteTimestamp} is called within a render pass instance, the sum of {@code query} and the number of bits set in the current subpass’s view mask must be less than or equal to the number of queries in {@code queryPool}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code pipelineStage} must be a valid {@code VkPipelineStageFlagBits} value
  • *
  • {@code queryPool} must be a valid {@code VkQueryPool} handle
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support transfer, graphics, or compute operations
  • *
  • Both of {@code commandBuffer}, and {@code queryPool} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothTransfer Graphics ComputeTransfer
* * @param commandBuffer the command buffer into which the command will be recorded. * @param pipelineStage one of the {@code VkPipelineStageFlagBits}, specifying a stage of the pipeline. * @param queryPool the query pool that will manage the timestamp. * @param query the query within the query pool that will contain the timestamp. */ public static void vkCmdWriteTimestamp(VkCommandBuffer commandBuffer, @NativeType("VkPipelineStageFlagBits") int pipelineStage, @NativeType("VkQueryPool") long queryPool, @NativeType("uint32_t") int query) { long __functionAddress = commandBuffer.getCapabilities().vkCmdWriteTimestamp; callPJV(__functionAddress, commandBuffer.address(), pipelineStage, queryPool, query); } // --- [ vkCmdCopyQueryPoolResults ] --- /** * Copy the results of queries in a query pool to a buffer object. * *
C Specification
* *

To copy query statuses and numerical results directly to buffer memory, call:

* *
     * void vkCmdCopyQueryPoolResults(
     *     VkCommandBuffer                             commandBuffer,
     *     VkQueryPool                                 queryPool,
     *     uint32_t                                    firstQuery,
     *     uint32_t                                    queryCount,
     *     VkBuffer                                    dstBuffer,
     *     VkDeviceSize                                dstOffset,
     *     VkDeviceSize                                stride,
     *     VkQueryResultFlags                          flags);
* *
Description
* *

{@link #vkCmdCopyQueryPoolResults CmdCopyQueryPoolResults} is guaranteed to see the effect of previous uses of {@link #vkCmdResetQueryPool CmdResetQueryPool} in the same queue, without any additional synchronization. Thus, the results will always reflect the most recent use of the query.

* *

{@code flags} has the same possible values described above for the {@code flags} parameter of {@link #vkGetQueryPoolResults GetQueryPoolResults}, but the different style of execution causes some subtle behavioral differences. Because {@link #vkCmdCopyQueryPoolResults CmdCopyQueryPoolResults} executes in order with respect to other query commands, there is less ambiguity about which use of a query is being requested.

* *

If no bits are set in {@code flags}, results for all requested queries in the available state are written as 32-bit unsigned integer values, and nothing is written for queries in the unavailable state.

* *

If {@link #VK_QUERY_RESULT_64_BIT QUERY_RESULT_64_BIT} is set, the results are written as an array of 64-bit unsigned integer values as described for {@link #vkGetQueryPoolResults GetQueryPoolResults}.

* *

If {@link #VK_QUERY_RESULT_WAIT_BIT QUERY_RESULT_WAIT_BIT} is set, the implementation will wait for each query's status to be in the available state before retrieving the numerical results for that query. This is guaranteed to reflect the most recent use of the query on the same queue, assuming that the query is not being simultaneously used by other queues. If the query does not become available in a finite amount of time (e.g. due to not issuing a query since the last reset), a {@link #VK_ERROR_DEVICE_LOST ERROR_DEVICE_LOST} error may occur.

* *

Similarly, if {@link #VK_QUERY_RESULT_WITH_AVAILABILITY_BIT QUERY_RESULT_WITH_AVAILABILITY_BIT} is set and {@link #VK_QUERY_RESULT_WAIT_BIT QUERY_RESULT_WAIT_BIT} is not set, the availability is guaranteed to reflect the most recent use of the query on the same queue, assuming that the query is not being simultaneously used by other queues. As with {@link #vkGetQueryPoolResults GetQueryPoolResults}, implementations must guarantee that if they return a non-zero availability value, then the numerical results are valid.

* *

If {@link #VK_QUERY_RESULT_PARTIAL_BIT QUERY_RESULT_PARTIAL_BIT} is set, {@link #VK_QUERY_RESULT_WAIT_BIT QUERY_RESULT_WAIT_BIT} is not set, and the query's status is unavailable, an intermediate result value between zero and the final result value is written for that query.

* *

{@link #VK_QUERY_RESULT_PARTIAL_BIT QUERY_RESULT_PARTIAL_BIT} must not be used if the pool's {@code queryType} is {@link #VK_QUERY_TYPE_TIMESTAMP QUERY_TYPE_TIMESTAMP}.

* *

{@link #vkCmdCopyQueryPoolResults CmdCopyQueryPoolResults} is considered to be a transfer operation, and its writes to buffer memory must be synchronized using {@link #VK_PIPELINE_STAGE_TRANSFER_BIT PIPELINE_STAGE_TRANSFER_BIT} and {@link #VK_ACCESS_TRANSFER_WRITE_BIT ACCESS_TRANSFER_WRITE_BIT} before using the results.

* *
Valid Usage
* *
    *
  • {@code dstOffset} must be less than the size of {@code dstBuffer}
  • *
  • {@code firstQuery} must be less than the number of queries in {@code queryPool}
  • *
  • The sum of {@code firstQuery} and {@code queryCount} must be less than or equal to the number of queries in {@code queryPool}
  • *
  • If {@link #VK_QUERY_RESULT_64_BIT QUERY_RESULT_64_BIT} is not set in {@code flags} then {@code dstOffset} and {@code stride} must be multiples of 4
  • *
  • If {@link #VK_QUERY_RESULT_64_BIT QUERY_RESULT_64_BIT} is set in {@code flags} then {@code dstOffset} and {@code stride} must be multiples of 8
  • *
  • {@code dstBuffer} must have enough storage, from {@code dstOffset}, to contain the result of each query, as described here
  • *
  • {@code dstBuffer} must have been created with {@link #VK_BUFFER_USAGE_TRANSFER_DST_BIT BUFFER_USAGE_TRANSFER_DST_BIT} usage flag
  • *
  • If {@code dstBuffer} is non-sparse then it must be bound completely and contiguously to a single {@code VkDeviceMemory} object
  • *
  • If the {@code queryType} used to create {@code queryPool} was {@link #VK_QUERY_TYPE_TIMESTAMP QUERY_TYPE_TIMESTAMP}, {@code flags} must not contain {@link #VK_QUERY_RESULT_PARTIAL_BIT QUERY_RESULT_PARTIAL_BIT}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code queryPool} must be a valid {@code VkQueryPool} handle
  • *
  • {@code dstBuffer} must be a valid {@code VkBuffer} handle
  • *
  • {@code flags} must be a valid combination of {@code VkQueryResultFlagBits} values
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics, or compute operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • Each of {@code commandBuffer}, {@code dstBuffer}, and {@code queryPool} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryOutsideGraphics ComputeTransfer
* * @param commandBuffer the command buffer into which this command will be recorded. * @param queryPool the query pool managing the queries containing the desired results. * @param firstQuery the initial query index. * @param queryCount the number of queries. {@code firstQuery} and {@code queryCount} together define a range of queries. * @param dstBuffer a {@code VkBuffer} object that will receive the results of the copy command. * @param dstOffset an offset into {@code dstBuffer}. * @param stride the stride in bytes between results for individual queries within {@code dstBuffer}. The required size of the backing memory for {@code dstBuffer} is determined as described above for {@link #vkGetQueryPoolResults GetQueryPoolResults}. * @param flags a bitmask of {@code VkQueryResultFlagBits} specifying how and when results are returned. */ public static void vkCmdCopyQueryPoolResults(VkCommandBuffer commandBuffer, @NativeType("VkQueryPool") long queryPool, @NativeType("uint32_t") int firstQuery, @NativeType("uint32_t") int queryCount, @NativeType("VkBuffer") long dstBuffer, @NativeType("VkDeviceSize") long dstOffset, @NativeType("VkDeviceSize") long stride, @NativeType("VkQueryResultFlags") int flags) { long __functionAddress = commandBuffer.getCapabilities().vkCmdCopyQueryPoolResults; callPJJJJV(__functionAddress, commandBuffer.address(), queryPool, firstQuery, queryCount, dstBuffer, dstOffset, stride, flags); } // --- [ vkCmdPushConstants ] --- /** * Unsafe version of: {@link #vkCmdPushConstants CmdPushConstants} * * @param size the size of the push constant range to update, in units of bytes. */ public static void nvkCmdPushConstants(VkCommandBuffer commandBuffer, long layout, int stageFlags, int offset, int size, long pValues) { long __functionAddress = commandBuffer.getCapabilities().vkCmdPushConstants; callPJPV(__functionAddress, commandBuffer.address(), layout, stageFlags, offset, size, pValues); } /** * Update the values of push constants. * *
C Specification
* *

To update push constants, call:

* *
     * void vkCmdPushConstants(
     *     VkCommandBuffer                             commandBuffer,
     *     VkPipelineLayout                            layout,
     *     VkShaderStageFlags                          stageFlags,
     *     uint32_t                                    offset,
     *     uint32_t                                    size,
     *     const void*                                 pValues);
* *
Valid Usage
* *
    *
  • {@code stageFlags} must match exactly the shader stages used in {@code layout} for the range specified by {@code offset} and {@code size}
  • *
  • {@code offset} must be a multiple of 4
  • *
  • {@code size} must be a multiple of 4
  • *
  • {@code offset} must be less than {@link VkPhysicalDeviceLimits}{@code ::maxPushConstantsSize}
  • *
  • {@code size} must be less than or equal to {@link VkPhysicalDeviceLimits}{@code ::maxPushConstantsSize} minus {@code offset}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code layout} must be a valid {@code VkPipelineLayout} handle
  • *
  • {@code stageFlags} must be a valid combination of {@code VkShaderStageFlagBits} values
  • *
  • {@code stageFlags} must not be 0
  • *
  • {@code pValues} must be a valid pointer to an array of {@code size} bytes
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics, or compute operations
  • *
  • {@code size} must be greater than 0
  • *
  • Both of {@code commandBuffer}, and {@code layout} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics Compute
* * @param commandBuffer the command buffer in which the push constant update will be recorded. * @param layout the pipeline layout used to program the push constant updates. * @param stageFlags a bitmask of {@code VkShaderStageFlagBits} specifying the shader stages that will use the push constants in the updated range. * @param offset the start offset of the push constant range to update, in units of bytes. * @param pValues an array of {@code size} bytes containing the new push constant values. */ public static void vkCmdPushConstants(VkCommandBuffer commandBuffer, @NativeType("VkPipelineLayout") long layout, @NativeType("VkShaderStageFlags") int stageFlags, @NativeType("uint32_t") int offset, @NativeType("const void *") ByteBuffer pValues) { nvkCmdPushConstants(commandBuffer, layout, stageFlags, offset, pValues.remaining(), memAddress(pValues)); } /** * Update the values of push constants. * *
C Specification
* *

To update push constants, call:

* *
     * void vkCmdPushConstants(
     *     VkCommandBuffer                             commandBuffer,
     *     VkPipelineLayout                            layout,
     *     VkShaderStageFlags                          stageFlags,
     *     uint32_t                                    offset,
     *     uint32_t                                    size,
     *     const void*                                 pValues);
* *
Valid Usage
* *
    *
  • {@code stageFlags} must match exactly the shader stages used in {@code layout} for the range specified by {@code offset} and {@code size}
  • *
  • {@code offset} must be a multiple of 4
  • *
  • {@code size} must be a multiple of 4
  • *
  • {@code offset} must be less than {@link VkPhysicalDeviceLimits}{@code ::maxPushConstantsSize}
  • *
  • {@code size} must be less than or equal to {@link VkPhysicalDeviceLimits}{@code ::maxPushConstantsSize} minus {@code offset}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code layout} must be a valid {@code VkPipelineLayout} handle
  • *
  • {@code stageFlags} must be a valid combination of {@code VkShaderStageFlagBits} values
  • *
  • {@code stageFlags} must not be 0
  • *
  • {@code pValues} must be a valid pointer to an array of {@code size} bytes
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics, or compute operations
  • *
  • {@code size} must be greater than 0
  • *
  • Both of {@code commandBuffer}, and {@code layout} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics Compute
* * @param commandBuffer the command buffer in which the push constant update will be recorded. * @param layout the pipeline layout used to program the push constant updates. * @param stageFlags a bitmask of {@code VkShaderStageFlagBits} specifying the shader stages that will use the push constants in the updated range. * @param offset the start offset of the push constant range to update, in units of bytes. * @param pValues an array of {@code size} bytes containing the new push constant values. */ public static void vkCmdPushConstants(VkCommandBuffer commandBuffer, @NativeType("VkPipelineLayout") long layout, @NativeType("VkShaderStageFlags") int stageFlags, @NativeType("uint32_t") int offset, @NativeType("const void *") ShortBuffer pValues) { nvkCmdPushConstants(commandBuffer, layout, stageFlags, offset, pValues.remaining() << 1, memAddress(pValues)); } /** * Update the values of push constants. * *
C Specification
* *

To update push constants, call:

* *
     * void vkCmdPushConstants(
     *     VkCommandBuffer                             commandBuffer,
     *     VkPipelineLayout                            layout,
     *     VkShaderStageFlags                          stageFlags,
     *     uint32_t                                    offset,
     *     uint32_t                                    size,
     *     const void*                                 pValues);
* *
Valid Usage
* *
    *
  • {@code stageFlags} must match exactly the shader stages used in {@code layout} for the range specified by {@code offset} and {@code size}
  • *
  • {@code offset} must be a multiple of 4
  • *
  • {@code size} must be a multiple of 4
  • *
  • {@code offset} must be less than {@link VkPhysicalDeviceLimits}{@code ::maxPushConstantsSize}
  • *
  • {@code size} must be less than or equal to {@link VkPhysicalDeviceLimits}{@code ::maxPushConstantsSize} minus {@code offset}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code layout} must be a valid {@code VkPipelineLayout} handle
  • *
  • {@code stageFlags} must be a valid combination of {@code VkShaderStageFlagBits} values
  • *
  • {@code stageFlags} must not be 0
  • *
  • {@code pValues} must be a valid pointer to an array of {@code size} bytes
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics, or compute operations
  • *
  • {@code size} must be greater than 0
  • *
  • Both of {@code commandBuffer}, and {@code layout} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics Compute
* * @param commandBuffer the command buffer in which the push constant update will be recorded. * @param layout the pipeline layout used to program the push constant updates. * @param stageFlags a bitmask of {@code VkShaderStageFlagBits} specifying the shader stages that will use the push constants in the updated range. * @param offset the start offset of the push constant range to update, in units of bytes. * @param pValues an array of {@code size} bytes containing the new push constant values. */ public static void vkCmdPushConstants(VkCommandBuffer commandBuffer, @NativeType("VkPipelineLayout") long layout, @NativeType("VkShaderStageFlags") int stageFlags, @NativeType("uint32_t") int offset, @NativeType("const void *") IntBuffer pValues) { nvkCmdPushConstants(commandBuffer, layout, stageFlags, offset, pValues.remaining() << 2, memAddress(pValues)); } /** * Update the values of push constants. * *
C Specification
* *

To update push constants, call:

* *
     * void vkCmdPushConstants(
     *     VkCommandBuffer                             commandBuffer,
     *     VkPipelineLayout                            layout,
     *     VkShaderStageFlags                          stageFlags,
     *     uint32_t                                    offset,
     *     uint32_t                                    size,
     *     const void*                                 pValues);
* *
Valid Usage
* *
    *
  • {@code stageFlags} must match exactly the shader stages used in {@code layout} for the range specified by {@code offset} and {@code size}
  • *
  • {@code offset} must be a multiple of 4
  • *
  • {@code size} must be a multiple of 4
  • *
  • {@code offset} must be less than {@link VkPhysicalDeviceLimits}{@code ::maxPushConstantsSize}
  • *
  • {@code size} must be less than or equal to {@link VkPhysicalDeviceLimits}{@code ::maxPushConstantsSize} minus {@code offset}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code layout} must be a valid {@code VkPipelineLayout} handle
  • *
  • {@code stageFlags} must be a valid combination of {@code VkShaderStageFlagBits} values
  • *
  • {@code stageFlags} must not be 0
  • *
  • {@code pValues} must be a valid pointer to an array of {@code size} bytes
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics, or compute operations
  • *
  • {@code size} must be greater than 0
  • *
  • Both of {@code commandBuffer}, and {@code layout} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics Compute
* * @param commandBuffer the command buffer in which the push constant update will be recorded. * @param layout the pipeline layout used to program the push constant updates. * @param stageFlags a bitmask of {@code VkShaderStageFlagBits} specifying the shader stages that will use the push constants in the updated range. * @param offset the start offset of the push constant range to update, in units of bytes. * @param pValues an array of {@code size} bytes containing the new push constant values. */ public static void vkCmdPushConstants(VkCommandBuffer commandBuffer, @NativeType("VkPipelineLayout") long layout, @NativeType("VkShaderStageFlags") int stageFlags, @NativeType("uint32_t") int offset, @NativeType("const void *") LongBuffer pValues) { nvkCmdPushConstants(commandBuffer, layout, stageFlags, offset, pValues.remaining() << 3, memAddress(pValues)); } /** * Update the values of push constants. * *
C Specification
* *

To update push constants, call:

* *
     * void vkCmdPushConstants(
     *     VkCommandBuffer                             commandBuffer,
     *     VkPipelineLayout                            layout,
     *     VkShaderStageFlags                          stageFlags,
     *     uint32_t                                    offset,
     *     uint32_t                                    size,
     *     const void*                                 pValues);
* *
Valid Usage
* *
    *
  • {@code stageFlags} must match exactly the shader stages used in {@code layout} for the range specified by {@code offset} and {@code size}
  • *
  • {@code offset} must be a multiple of 4
  • *
  • {@code size} must be a multiple of 4
  • *
  • {@code offset} must be less than {@link VkPhysicalDeviceLimits}{@code ::maxPushConstantsSize}
  • *
  • {@code size} must be less than or equal to {@link VkPhysicalDeviceLimits}{@code ::maxPushConstantsSize} minus {@code offset}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code layout} must be a valid {@code VkPipelineLayout} handle
  • *
  • {@code stageFlags} must be a valid combination of {@code VkShaderStageFlagBits} values
  • *
  • {@code stageFlags} must not be 0
  • *
  • {@code pValues} must be a valid pointer to an array of {@code size} bytes
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics, or compute operations
  • *
  • {@code size} must be greater than 0
  • *
  • Both of {@code commandBuffer}, and {@code layout} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics Compute
* * @param commandBuffer the command buffer in which the push constant update will be recorded. * @param layout the pipeline layout used to program the push constant updates. * @param stageFlags a bitmask of {@code VkShaderStageFlagBits} specifying the shader stages that will use the push constants in the updated range. * @param offset the start offset of the push constant range to update, in units of bytes. * @param pValues an array of {@code size} bytes containing the new push constant values. */ public static void vkCmdPushConstants(VkCommandBuffer commandBuffer, @NativeType("VkPipelineLayout") long layout, @NativeType("VkShaderStageFlags") int stageFlags, @NativeType("uint32_t") int offset, @NativeType("const void *") FloatBuffer pValues) { nvkCmdPushConstants(commandBuffer, layout, stageFlags, offset, pValues.remaining() << 2, memAddress(pValues)); } /** * Update the values of push constants. * *
C Specification
* *

To update push constants, call:

* *
     * void vkCmdPushConstants(
     *     VkCommandBuffer                             commandBuffer,
     *     VkPipelineLayout                            layout,
     *     VkShaderStageFlags                          stageFlags,
     *     uint32_t                                    offset,
     *     uint32_t                                    size,
     *     const void*                                 pValues);
* *
Valid Usage
* *
    *
  • {@code stageFlags} must match exactly the shader stages used in {@code layout} for the range specified by {@code offset} and {@code size}
  • *
  • {@code offset} must be a multiple of 4
  • *
  • {@code size} must be a multiple of 4
  • *
  • {@code offset} must be less than {@link VkPhysicalDeviceLimits}{@code ::maxPushConstantsSize}
  • *
  • {@code size} must be less than or equal to {@link VkPhysicalDeviceLimits}{@code ::maxPushConstantsSize} minus {@code offset}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code layout} must be a valid {@code VkPipelineLayout} handle
  • *
  • {@code stageFlags} must be a valid combination of {@code VkShaderStageFlagBits} values
  • *
  • {@code stageFlags} must not be 0
  • *
  • {@code pValues} must be a valid pointer to an array of {@code size} bytes
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics, or compute operations
  • *
  • {@code size} must be greater than 0
  • *
  • Both of {@code commandBuffer}, and {@code layout} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
Primary SecondaryBothGraphics Compute
* * @param commandBuffer the command buffer in which the push constant update will be recorded. * @param layout the pipeline layout used to program the push constant updates. * @param stageFlags a bitmask of {@code VkShaderStageFlagBits} specifying the shader stages that will use the push constants in the updated range. * @param offset the start offset of the push constant range to update, in units of bytes. * @param pValues an array of {@code size} bytes containing the new push constant values. */ public static void vkCmdPushConstants(VkCommandBuffer commandBuffer, @NativeType("VkPipelineLayout") long layout, @NativeType("VkShaderStageFlags") int stageFlags, @NativeType("uint32_t") int offset, @NativeType("const void *") DoubleBuffer pValues) { nvkCmdPushConstants(commandBuffer, layout, stageFlags, offset, pValues.remaining() << 3, memAddress(pValues)); } // --- [ vkCmdBeginRenderPass ] --- /** Unsafe version of: {@link #vkCmdBeginRenderPass CmdBeginRenderPass} */ public static void nvkCmdBeginRenderPass(VkCommandBuffer commandBuffer, long pRenderPassBegin, int contents) { long __functionAddress = commandBuffer.getCapabilities().vkCmdBeginRenderPass; if (CHECKS) { VkRenderPassBeginInfo.validate(pRenderPassBegin); } callPPV(__functionAddress, commandBuffer.address(), pRenderPassBegin, contents); } /** * Begin a new render pass. * *
C Specification
* *

To begin a render pass instance, call:

* *
     * void vkCmdBeginRenderPass(
     *     VkCommandBuffer                             commandBuffer,
     *     const VkRenderPassBeginInfo*                pRenderPassBegin,
     *     VkSubpassContents                           contents);
* *
Description
* *

After beginning a render pass instance, the command buffer is ready to record the commands for the first subpass of that render pass.

* *
Valid Usage
* *
    *
  • If any of the {@code initialLayout} or {@code finalLayout} member of the {@link VkAttachmentDescription} structures or the {@code layout} member of the {@link VkAttachmentReference} structures specified when creating the render pass specified in the {@code renderPass} member of {@code pRenderPassBegin} is {@link #VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL} then the corresponding attachment image subresource of the framebuffer specified in the {@code framebuffer} member of {@code pRenderPassBegin} must have been created with {@link #VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT IMAGE_USAGE_COLOR_ATTACHMENT_BIT} set
  • *
  • If any of the {@code initialLayout} or {@code finalLayout} member of the {@link VkAttachmentDescription} structures or the {@code layout} member of the {@link VkAttachmentReference} structures specified when creating the render pass specified in the {@code renderPass} member of {@code pRenderPassBegin} is {@link KHRMaintenance2#VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR}, {@link KHRMaintenance2#VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR}, {@link #VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}, or {@link #VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL} then the corresponding attachment image subresource of the framebuffer specified in the {@code framebuffer} member of {@code pRenderPassBegin} must have been created with {@link #VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT} set
  • *
  • If any of the {@code initialLayout} or {@code finalLayout} member of the {@link VkAttachmentDescription} structures or the {@code layout} member of the {@link VkAttachmentReference} structures specified when creating the render pass specified in the {@code renderPass} member of {@code pRenderPassBegin} is {@link #VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL} then the corresponding attachment image subresource of the framebuffer specified in the {@code framebuffer} member of {@code pRenderPassBegin} must have been created with {@link #VK_IMAGE_USAGE_SAMPLED_BIT IMAGE_USAGE_SAMPLED_BIT} or {@link #VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT IMAGE_USAGE_INPUT_ATTACHMENT_BIT} set
  • *
  • If any of the {@code initialLayout} or {@code finalLayout} member of the {@link VkAttachmentDescription} structures or the {@code layout} member of the {@link VkAttachmentReference} structures specified when creating the render pass specified in the {@code renderPass} member of {@code pRenderPassBegin} is {@link #VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL} then the corresponding attachment image subresource of the framebuffer specified in the {@code framebuffer} member of {@code pRenderPassBegin} must have been created with {@link #VK_IMAGE_USAGE_TRANSFER_SRC_BIT IMAGE_USAGE_TRANSFER_SRC_BIT} set
  • *
  • If any of the {@code initialLayout} or {@code finalLayout} member of the {@link VkAttachmentDescription} structures or the {@code layout} member of the {@link VkAttachmentReference} structures specified when creating the render pass specified in the {@code renderPass} member of {@code pRenderPassBegin} is {@link #VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL} then the corresponding attachment image subresource of the framebuffer specified in the {@code framebuffer} member of {@code pRenderPassBegin} must have been created with {@link #VK_IMAGE_USAGE_TRANSFER_DST_BIT IMAGE_USAGE_TRANSFER_DST_BIT} set
  • *
  • If any of the {@code initialLayout} members of the {@link VkAttachmentDescription} structures specified when creating the render pass specified in the {@code renderPass} member of {@code pRenderPassBegin} is not {@link #VK_IMAGE_LAYOUT_UNDEFINED IMAGE_LAYOUT_UNDEFINED}, then each such {@code initialLayout} must be equal to the current layout of the corresponding attachment image subresource of the framebuffer specified in the {@code framebuffer} member of {@code pRenderPassBegin}
  • *
  • The {@code srcStageMask} and {@code dstStageMask} members of any element of the {@code pDependencies} member of {@link VkRenderPassCreateInfo} used to create {@code renderPass} must be supported by the capabilities of the queue family identified by the {@code queueFamilyIndex} member of the {@link VkCommandPoolCreateInfo} used to create the command pool which {@code commandBuffer} was allocated from.
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code pRenderPassBegin} must be a valid pointer to a valid {@link VkRenderPassBeginInfo} structure
  • *
  • {@code contents} must be a valid {@code VkSubpassContents} value
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
  • This command must only be called outside of a render pass instance
  • *
  • {@code commandBuffer} must be a primary {@code VkCommandBuffer}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
PrimaryOutsideGraphicsGraphics
* *
See Also
* *

{@link VkRenderPassBeginInfo}

* * @param commandBuffer the command buffer in which to record the command. * @param pRenderPassBegin a pointer to a {@link VkRenderPassBeginInfo} structure (defined below) which indicates the render pass to begin an instance of, and the framebuffer the instance uses. * @param contents a {@code VkSubpassContents} value specifying how the commands in the first subpass will be provided. */ public static void vkCmdBeginRenderPass(VkCommandBuffer commandBuffer, @NativeType("const VkRenderPassBeginInfo *") VkRenderPassBeginInfo pRenderPassBegin, @NativeType("VkSubpassContents") int contents) { nvkCmdBeginRenderPass(commandBuffer, pRenderPassBegin.address(), contents); } // --- [ vkCmdNextSubpass ] --- /** * Transition to the next subpass of a render pass. * *
C Specification
* *

To transition to the next subpass in the render pass instance after recording the commands for a subpass, call:

* *
     * void vkCmdNextSubpass(
     *     VkCommandBuffer                             commandBuffer,
     *     VkSubpassContents                           contents);
* *
Description
* *

The subpass index for a render pass begins at zero when {@link #vkCmdBeginRenderPass CmdBeginRenderPass} is recorded, and increments each time {@link #vkCmdNextSubpass CmdNextSubpass} is recorded.

* *

Moving to the next subpass automatically performs any multisample resolve operations in the subpass being ended. End-of-subpass multisample resolves are treated as color attachment writes for the purposes of synchronization. That is, they are considered to execute in the {@link #VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT} pipeline stage and their writes are synchronized with {@link #VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT ACCESS_COLOR_ATTACHMENT_WRITE_BIT}. Synchronization between rendering within a subpass and any resolve operations at the end of the subpass occurs automatically, without need for explicit dependencies or pipeline barriers. However, if the resolve attachment is also used in a different subpass, an explicit dependency is needed.

* *

After transitioning to the next subpass, the application can record the commands for that subpass.

* *
Valid Usage
* *
    *
  • The current subpass index must be less than the number of subpasses in the render pass minus one
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code contents} must be a valid {@code VkSubpassContents} value
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
  • This command must only be called inside of a render pass instance
  • *
  • {@code commandBuffer} must be a primary {@code VkCommandBuffer}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
PrimaryInsideGraphicsGraphics
* * @param commandBuffer the command buffer in which to record the command. * @param contents specifies how the commands in the next subpass will be provided, in the same fashion as the corresponding parameter of {@link #vkCmdBeginRenderPass CmdBeginRenderPass}. */ public static void vkCmdNextSubpass(VkCommandBuffer commandBuffer, @NativeType("VkSubpassContents") int contents) { long __functionAddress = commandBuffer.getCapabilities().vkCmdNextSubpass; callPV(__functionAddress, commandBuffer.address(), contents); } // --- [ vkCmdEndRenderPass ] --- /** * End the current render pass. * *
C Specification
* *

To record a command to end a render pass instance after recording the commands for the last subpass, call:

* *
     * void vkCmdEndRenderPass(
     *     VkCommandBuffer                             commandBuffer);
* *
Description
* *

Ending a render pass instance performs any multisample resolve operations on the final subpass.

* *
Valid Usage
* *
    *
  • The current subpass index must be equal to the number of subpasses in the render pass minus one
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support graphics operations
  • *
  • This command must only be called inside of a render pass instance
  • *
  • {@code commandBuffer} must be a primary {@code VkCommandBuffer}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
PrimaryInsideGraphicsGraphics
* * @param commandBuffer the command buffer in which to end the current render pass instance. */ public static void vkCmdEndRenderPass(VkCommandBuffer commandBuffer) { long __functionAddress = commandBuffer.getCapabilities().vkCmdEndRenderPass; callPV(__functionAddress, commandBuffer.address()); } // --- [ vkCmdExecuteCommands ] --- /** * Unsafe version of: {@link #vkCmdExecuteCommands CmdExecuteCommands} * * @param commandBufferCount the length of the {@code pCommandBuffers} array. */ public static void nvkCmdExecuteCommands(VkCommandBuffer commandBuffer, int commandBufferCount, long pCommandBuffers) { long __functionAddress = commandBuffer.getCapabilities().vkCmdExecuteCommands; callPPV(__functionAddress, commandBuffer.address(), commandBufferCount, pCommandBuffers); } /** * Execute a secondary command buffer from a primary command buffer. * *
C Specification
* *

A secondary command buffer must not be directly submitted to a queue. Instead, secondary command buffers are recorded to execute as part of a primary command buffer with the command:

* *
     * void vkCmdExecuteCommands(
     *     VkCommandBuffer                             commandBuffer,
     *     uint32_t                                    commandBufferCount,
     *     const VkCommandBuffer*                      pCommandBuffers);
* *
Description
* *

If any element of {@code pCommandBuffers} was not recorded with the {@link #VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT} flag, and it was recorded into any other primary command buffer which is currently in the executable or recording state, that primary command buffer becomes invalid.

* *
Valid Usage
* *
    *
  • {@code commandBuffer} must have been allocated with a {@code level} of {@link #VK_COMMAND_BUFFER_LEVEL_PRIMARY COMMAND_BUFFER_LEVEL_PRIMARY}
  • *
  • Each element of {@code pCommandBuffers} must have been allocated with a {@code level} of {@link #VK_COMMAND_BUFFER_LEVEL_SECONDARY COMMAND_BUFFER_LEVEL_SECONDARY}
  • *
  • Each element of {@code pCommandBuffers} must be in the pending or executable state.
  • *
  • If any element of {@code pCommandBuffers} was not recorded with the {@link #VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT} flag, and it was recorded into any other primary command buffer, that primary command buffer must not be in the pending state
  • *
  • If any element of {@code pCommandBuffers} was not recorded with the {@link #VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT} flag, it must not be in the pending state.
  • *
  • If any element of {@code pCommandBuffers} was not recorded with the {@link #VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT} flag, it must not have already been recorded to {@code commandBuffer}.
  • *
  • If any element of {@code pCommandBuffers} was not recorded with the {@link #VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT} flag, it must not appear more than once in {@code pCommandBuffers}.
  • *
  • Each element of {@code pCommandBuffers} must have been allocated from a {@code VkCommandPool} that was created for the same queue family as the {@code VkCommandPool} from which {@code commandBuffer} was allocated
  • *
  • If {@link #vkCmdExecuteCommands CmdExecuteCommands} is being called within a render pass instance, that render pass instance must have been begun with the {@code contents} parameter of {@link #vkCmdBeginRenderPass CmdBeginRenderPass} set to {@link #VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS}
  • *
  • If {@link #vkCmdExecuteCommands CmdExecuteCommands} is being called within a render pass instance, each element of {@code pCommandBuffers} must have been recorded with the {@link #VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT}
  • *
  • If {@link #vkCmdExecuteCommands CmdExecuteCommands} is being called within a render pass instance, each element of {@code pCommandBuffers} must have been recorded with {@link VkCommandBufferInheritanceInfo}{@code ::subpass} set to the index of the subpass which the given command buffer will be executed in
  • *
  • If {@link #vkCmdExecuteCommands CmdExecuteCommands} is being called within a render pass instance, the render passes specified in the pname::pBeginInfo{@code ::pInheritanceInfo}{@code ::renderPass} members of the {@link #vkBeginCommandBuffer BeginCommandBuffer} commands used to begin recording each element of {@code pCommandBuffers} must be compatible with the current render pass.
  • *
  • If {@link #vkCmdExecuteCommands CmdExecuteCommands} is being called within a render pass instance, and any element of {@code pCommandBuffers} was recorded with {@link VkCommandBufferInheritanceInfo}{@code ::framebuffer} not equal to {@link #VK_NULL_HANDLE NULL_HANDLE}, that {@code VkFramebuffer} must match the {@code VkFramebuffer} used in the current render pass instance
  • *
  • If {@link #vkCmdExecuteCommands CmdExecuteCommands} is not being called within a render pass instance, each element of {@code pCommandBuffers} must not have been recorded with the {@link #VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT}
  • *
  • If the inherited queries feature is not enabled, {@code commandBuffer} must not have any queries active
  • *
  • If {@code commandBuffer} has a {@link #VK_QUERY_TYPE_OCCLUSION QUERY_TYPE_OCCLUSION} query active, then each element of {@code pCommandBuffers} must have been recorded with {@link VkCommandBufferInheritanceInfo}{@code ::occlusionQueryEnable} set to {@link #VK_TRUE TRUE}
  • *
  • If {@code commandBuffer} has a {@link #VK_QUERY_TYPE_OCCLUSION QUERY_TYPE_OCCLUSION} query active, then each element of {@code pCommandBuffers} must have been recorded with {@link VkCommandBufferInheritanceInfo}{@code ::queryFlags} having all bits set that are set for the query
  • *
  • If {@code commandBuffer} has a {@link #VK_QUERY_TYPE_PIPELINE_STATISTICS QUERY_TYPE_PIPELINE_STATISTICS} query active, then each element of {@code pCommandBuffers} must have been recorded with {@link VkCommandBufferInheritanceInfo}{@code ::pipelineStatistics} having all bits set that are set in the {@code VkQueryPool} the query uses
  • *
  • Each element of {@code pCommandBuffers} must not begin any query types that are active in {@code commandBuffer}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code pCommandBuffers} must be a valid pointer to an array of {@code commandBufferCount} valid {@code VkCommandBuffer} handles
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support transfer, graphics, or compute operations
  • *
  • {@code commandBuffer} must be a primary {@code VkCommandBuffer}
  • *
  • {@code commandBufferCount} must be greater than 0
  • *
  • Both of {@code commandBuffer}, and the elements of {@code pCommandBuffers} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
PrimaryBothTransfer Graphics Compute
* * @param commandBuffer a handle to a primary command buffer that the secondary command buffers are executed in. * @param pCommandBuffers an array of secondary command buffer handles, which are recorded to execute in the primary command buffer in the order they are listed in the array. */ public static void vkCmdExecuteCommands(VkCommandBuffer commandBuffer, @NativeType("const VkCommandBuffer *") PointerBuffer pCommandBuffers) { nvkCmdExecuteCommands(commandBuffer, pCommandBuffers.remaining(), memAddress(pCommandBuffers)); } /** * Execute a secondary command buffer from a primary command buffer. * *
C Specification
* *

A secondary command buffer must not be directly submitted to a queue. Instead, secondary command buffers are recorded to execute as part of a primary command buffer with the command:

* *
     * void vkCmdExecuteCommands(
     *     VkCommandBuffer                             commandBuffer,
     *     uint32_t                                    commandBufferCount,
     *     const VkCommandBuffer*                      pCommandBuffers);
* *
Description
* *

If any element of {@code pCommandBuffers} was not recorded with the {@link #VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT} flag, and it was recorded into any other primary command buffer which is currently in the executable or recording state, that primary command buffer becomes invalid.

* *
Valid Usage
* *
    *
  • {@code commandBuffer} must have been allocated with a {@code level} of {@link #VK_COMMAND_BUFFER_LEVEL_PRIMARY COMMAND_BUFFER_LEVEL_PRIMARY}
  • *
  • Each element of {@code pCommandBuffers} must have been allocated with a {@code level} of {@link #VK_COMMAND_BUFFER_LEVEL_SECONDARY COMMAND_BUFFER_LEVEL_SECONDARY}
  • *
  • Each element of {@code pCommandBuffers} must be in the pending or executable state.
  • *
  • If any element of {@code pCommandBuffers} was not recorded with the {@link #VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT} flag, and it was recorded into any other primary command buffer, that primary command buffer must not be in the pending state
  • *
  • If any element of {@code pCommandBuffers} was not recorded with the {@link #VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT} flag, it must not be in the pending state.
  • *
  • If any element of {@code pCommandBuffers} was not recorded with the {@link #VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT} flag, it must not have already been recorded to {@code commandBuffer}.
  • *
  • If any element of {@code pCommandBuffers} was not recorded with the {@link #VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT} flag, it must not appear more than once in {@code pCommandBuffers}.
  • *
  • Each element of {@code pCommandBuffers} must have been allocated from a {@code VkCommandPool} that was created for the same queue family as the {@code VkCommandPool} from which {@code commandBuffer} was allocated
  • *
  • If {@link #vkCmdExecuteCommands CmdExecuteCommands} is being called within a render pass instance, that render pass instance must have been begun with the {@code contents} parameter of {@link #vkCmdBeginRenderPass CmdBeginRenderPass} set to {@link #VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS}
  • *
  • If {@link #vkCmdExecuteCommands CmdExecuteCommands} is being called within a render pass instance, each element of {@code pCommandBuffers} must have been recorded with the {@link #VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT}
  • *
  • If {@link #vkCmdExecuteCommands CmdExecuteCommands} is being called within a render pass instance, each element of {@code pCommandBuffers} must have been recorded with {@link VkCommandBufferInheritanceInfo}{@code ::subpass} set to the index of the subpass which the given command buffer will be executed in
  • *
  • If {@link #vkCmdExecuteCommands CmdExecuteCommands} is being called within a render pass instance, the render passes specified in the pname::pBeginInfo{@code ::pInheritanceInfo}{@code ::renderPass} members of the {@link #vkBeginCommandBuffer BeginCommandBuffer} commands used to begin recording each element of {@code pCommandBuffers} must be compatible with the current render pass.
  • *
  • If {@link #vkCmdExecuteCommands CmdExecuteCommands} is being called within a render pass instance, and any element of {@code pCommandBuffers} was recorded with {@link VkCommandBufferInheritanceInfo}{@code ::framebuffer} not equal to {@link #VK_NULL_HANDLE NULL_HANDLE}, that {@code VkFramebuffer} must match the {@code VkFramebuffer} used in the current render pass instance
  • *
  • If {@link #vkCmdExecuteCommands CmdExecuteCommands} is not being called within a render pass instance, each element of {@code pCommandBuffers} must not have been recorded with the {@link #VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT}
  • *
  • If the inherited queries feature is not enabled, {@code commandBuffer} must not have any queries active
  • *
  • If {@code commandBuffer} has a {@link #VK_QUERY_TYPE_OCCLUSION QUERY_TYPE_OCCLUSION} query active, then each element of {@code pCommandBuffers} must have been recorded with {@link VkCommandBufferInheritanceInfo}{@code ::occlusionQueryEnable} set to {@link #VK_TRUE TRUE}
  • *
  • If {@code commandBuffer} has a {@link #VK_QUERY_TYPE_OCCLUSION QUERY_TYPE_OCCLUSION} query active, then each element of {@code pCommandBuffers} must have been recorded with {@link VkCommandBufferInheritanceInfo}{@code ::queryFlags} having all bits set that are set for the query
  • *
  • If {@code commandBuffer} has a {@link #VK_QUERY_TYPE_PIPELINE_STATISTICS QUERY_TYPE_PIPELINE_STATISTICS} query active, then each element of {@code pCommandBuffers} must have been recorded with {@link VkCommandBufferInheritanceInfo}{@code ::pipelineStatistics} having all bits set that are set in the {@code VkQueryPool} the query uses
  • *
  • Each element of {@code pCommandBuffers} must not begin any query types that are active in {@code commandBuffer}
  • *
* *
Valid Usage (Implicit)
* *
    *
  • {@code commandBuffer} must be a valid {@code VkCommandBuffer} handle
  • *
  • {@code pCommandBuffers} must be a valid pointer to an array of {@code commandBufferCount} valid {@code VkCommandBuffer} handles
  • *
  • {@code commandBuffer} must be in the recording state
  • *
  • The {@code VkCommandPool} that {@code commandBuffer} was allocated from must support transfer, graphics, or compute operations
  • *
  • {@code commandBuffer} must be a primary {@code VkCommandBuffer}
  • *
  • {@code commandBufferCount} must be greater than 0
  • *
  • Both of {@code commandBuffer}, and the elements of {@code pCommandBuffers} must have been created, allocated, or retrieved from the same {@code VkDevice}
  • *
* *
Host Synchronization
* *
    *
  • Host access to {@code commandBuffer} must be externally synchronized
  • *
  • Host access to the {@code VkCommandPool} that {@code commandBuffer} was allocated from must be externally synchronized
  • *
* *
Command Properties
* * * * *
Command Buffer LevelsRender Pass ScopeSupported Queue TypesPipeline Type
PrimaryBothTransfer Graphics Compute
* * @param commandBuffer a handle to a primary command buffer that the secondary command buffers are executed in. */ public static void vkCmdExecuteCommands(VkCommandBuffer commandBuffer, @NativeType("const VkCommandBuffer *") VkCommandBuffer pCommandBuffer) { MemoryStack stack = stackGet(); int stackPointer = stack.getPointer(); try { PointerBuffer pCommandBuffers = stack.pointers(pCommandBuffer); nvkCmdExecuteCommands(commandBuffer, 1, memAddress(pCommandBuffers)); } finally { stack.setPointer(stackPointer); } } // --- [ VK_MAKE_VERSION ] --- /** * Constructs an API version number. * *

This macro can be used when constructing the {@link VkApplicationInfo}{@code ::pname:apiVersion} parameter passed to {@link #vkCreateInstance CreateInstance}.

* * @param major the major version number * @param minor the minor version number * @param patch the patch version number */ @NativeType("uint32_t") public static int VK_MAKE_VERSION(@NativeType("uint32_t") int major, @NativeType("uint32_t") int minor, @NativeType("uint32_t") int patch) { return (major << 22) | (minor << 12) | patch; } // --- [ VK_VERSION_MAJOR ] --- /** * Extracts the API major version number from a packed version number. * * @param version the Vulkan API version */ @NativeType("uint32_t") public static int VK_VERSION_MAJOR(@NativeType("uint32_t") int version) { return version >> 22; } // --- [ VK_VERSION_MINOR ] --- /** * Extracts the API minor version number from a packed version number. * * @param version the Vulkan API version */ @NativeType("uint32_t") public static int VK_VERSION_MINOR(@NativeType("uint32_t") int version) { return (version >> 12) & 0x3FF; } // --- [ VK_VERSION_PATCH ] --- /** * Extracts the API patch version number from a packed version number. * * @param version the Vulkan API version */ @NativeType("uint32_t") public static int VK_VERSION_PATCH(@NativeType("uint32_t") int version) { return version & 0xFFF; } /** Array version of: {@link #vkEnumeratePhysicalDevices EnumeratePhysicalDevices} */ @NativeType("VkResult") public static int vkEnumeratePhysicalDevices(VkInstance instance, @NativeType("uint32_t *") int[] pPhysicalDeviceCount, @Nullable @NativeType("VkPhysicalDevice *") PointerBuffer pPhysicalDevices) { long __functionAddress = instance.getCapabilities().vkEnumeratePhysicalDevices; if (CHECKS) { check(pPhysicalDeviceCount, 1); checkSafe(pPhysicalDevices, pPhysicalDeviceCount[0]); } return callPPPI(__functionAddress, instance.address(), pPhysicalDeviceCount, memAddressSafe(pPhysicalDevices)); } /** Array version of: {@link #vkGetPhysicalDeviceQueueFamilyProperties GetPhysicalDeviceQueueFamilyProperties} */ public static void vkGetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice, @NativeType("uint32_t *") int[] pQueueFamilyPropertyCount, @Nullable @NativeType("VkQueueFamilyProperties *") VkQueueFamilyProperties.Buffer pQueueFamilyProperties) { long __functionAddress = physicalDevice.getCapabilities().vkGetPhysicalDeviceQueueFamilyProperties; if (CHECKS) { check(pQueueFamilyPropertyCount, 1); checkSafe(pQueueFamilyProperties, pQueueFamilyPropertyCount[0]); } callPPPV(__functionAddress, physicalDevice.address(), pQueueFamilyPropertyCount, memAddressSafe(pQueueFamilyProperties)); } /** Array version of: {@link #vkEnumerateInstanceExtensionProperties EnumerateInstanceExtensionProperties} */ @NativeType("VkResult") public static int vkEnumerateInstanceExtensionProperties(@Nullable @NativeType("const char *") ByteBuffer pLayerName, @NativeType("uint32_t *") int[] pPropertyCount, @Nullable @NativeType("VkExtensionProperties *") VkExtensionProperties.Buffer pProperties) { long __functionAddress = VK.getGlobalCommands().vkEnumerateInstanceExtensionProperties; if (CHECKS) { checkNT1Safe(pLayerName); check(pPropertyCount, 1); checkSafe(pProperties, pPropertyCount[0]); } return callPPPI(__functionAddress, memAddressSafe(pLayerName), pPropertyCount, memAddressSafe(pProperties)); } /** Array version of: {@link #vkEnumerateInstanceExtensionProperties EnumerateInstanceExtensionProperties} */ @NativeType("VkResult") public static int vkEnumerateInstanceExtensionProperties(@Nullable @NativeType("const char *") CharSequence pLayerName, @NativeType("uint32_t *") int[] pPropertyCount, @Nullable @NativeType("VkExtensionProperties *") VkExtensionProperties.Buffer pProperties) { long __functionAddress = VK.getGlobalCommands().vkEnumerateInstanceExtensionProperties; if (CHECKS) { check(pPropertyCount, 1); checkSafe(pProperties, pPropertyCount[0]); } MemoryStack stack = stackGet(); int stackPointer = stack.getPointer(); try { ByteBuffer pLayerNameEncoded = stack.UTF8Safe(pLayerName); return callPPPI(__functionAddress, memAddressSafe(pLayerNameEncoded), pPropertyCount, memAddressSafe(pProperties)); } finally { stack.setPointer(stackPointer); } } /** Array version of: {@link #vkEnumerateDeviceExtensionProperties EnumerateDeviceExtensionProperties} */ @NativeType("VkResult") public static int vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, @Nullable @NativeType("const char *") ByteBuffer pLayerName, @NativeType("uint32_t *") int[] pPropertyCount, @Nullable @NativeType("VkExtensionProperties *") VkExtensionProperties.Buffer pProperties) { long __functionAddress = physicalDevice.getCapabilities().vkEnumerateDeviceExtensionProperties; if (CHECKS) { checkNT1Safe(pLayerName); check(pPropertyCount, 1); checkSafe(pProperties, pPropertyCount[0]); } return callPPPPI(__functionAddress, physicalDevice.address(), memAddressSafe(pLayerName), pPropertyCount, memAddressSafe(pProperties)); } /** Array version of: {@link #vkEnumerateDeviceExtensionProperties EnumerateDeviceExtensionProperties} */ @NativeType("VkResult") public static int vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, @Nullable @NativeType("const char *") CharSequence pLayerName, @NativeType("uint32_t *") int[] pPropertyCount, @Nullable @NativeType("VkExtensionProperties *") VkExtensionProperties.Buffer pProperties) { long __functionAddress = physicalDevice.getCapabilities().vkEnumerateDeviceExtensionProperties; if (CHECKS) { check(pPropertyCount, 1); checkSafe(pProperties, pPropertyCount[0]); } MemoryStack stack = stackGet(); int stackPointer = stack.getPointer(); try { ByteBuffer pLayerNameEncoded = stack.UTF8Safe(pLayerName); return callPPPPI(__functionAddress, physicalDevice.address(), memAddressSafe(pLayerNameEncoded), pPropertyCount, memAddressSafe(pProperties)); } finally { stack.setPointer(stackPointer); } } /** Array version of: {@link #vkEnumerateInstanceLayerProperties EnumerateInstanceLayerProperties} */ @NativeType("VkResult") public static int vkEnumerateInstanceLayerProperties(@NativeType("uint32_t *") int[] pPropertyCount, @Nullable @NativeType("VkLayerProperties *") VkLayerProperties.Buffer pProperties) { long __functionAddress = VK.getGlobalCommands().vkEnumerateInstanceLayerProperties; if (CHECKS) { check(pPropertyCount, 1); checkSafe(pProperties, pPropertyCount[0]); } return callPPI(__functionAddress, pPropertyCount, memAddressSafe(pProperties)); } /** Array version of: {@link #vkEnumerateDeviceLayerProperties EnumerateDeviceLayerProperties} */ @NativeType("VkResult") public static int vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, @NativeType("uint32_t *") int[] pPropertyCount, @Nullable @NativeType("VkLayerProperties *") VkLayerProperties.Buffer pProperties) { long __functionAddress = physicalDevice.getCapabilities().vkEnumerateDeviceLayerProperties; if (CHECKS) { check(pPropertyCount, 1); checkSafe(pProperties, pPropertyCount[0]); } return callPPPI(__functionAddress, physicalDevice.address(), pPropertyCount, memAddressSafe(pProperties)); } /** Array version of: {@link #vkAllocateMemory AllocateMemory} */ @NativeType("VkResult") public static int vkAllocateMemory(VkDevice device, @NativeType("const VkMemoryAllocateInfo *") VkMemoryAllocateInfo pAllocateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkDeviceMemory *") long[] pMemory) { long __functionAddress = device.getCapabilities().vkAllocateMemory; if (CHECKS) { check(pMemory, 1); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPPPPI(__functionAddress, device.address(), pAllocateInfo.address(), memAddressSafe(pAllocator), pMemory); } /** Array version of: {@link #vkGetDeviceMemoryCommitment GetDeviceMemoryCommitment} */ public static void vkGetDeviceMemoryCommitment(VkDevice device, @NativeType("VkDeviceMemory") long memory, @NativeType("VkDeviceSize *") long[] pCommittedMemoryInBytes) { long __functionAddress = device.getCapabilities().vkGetDeviceMemoryCommitment; if (CHECKS) { check(pCommittedMemoryInBytes, 1); } callPJPV(__functionAddress, device.address(), memory, pCommittedMemoryInBytes); } /** Array version of: {@link #vkGetImageSparseMemoryRequirements GetImageSparseMemoryRequirements} */ public static void vkGetImageSparseMemoryRequirements(VkDevice device, @NativeType("VkImage") long image, @NativeType("uint32_t *") int[] pSparseMemoryRequirementCount, @Nullable @NativeType("VkSparseImageMemoryRequirements *") VkSparseImageMemoryRequirements.Buffer pSparseMemoryRequirements) { long __functionAddress = device.getCapabilities().vkGetImageSparseMemoryRequirements; if (CHECKS) { check(pSparseMemoryRequirementCount, 1); checkSafe(pSparseMemoryRequirements, pSparseMemoryRequirementCount[0]); } callPJPPV(__functionAddress, device.address(), image, pSparseMemoryRequirementCount, memAddressSafe(pSparseMemoryRequirements)); } /** Array version of: {@link #vkGetPhysicalDeviceSparseImageFormatProperties GetPhysicalDeviceSparseImageFormatProperties} */ public static void vkGetPhysicalDeviceSparseImageFormatProperties(VkPhysicalDevice physicalDevice, @NativeType("VkFormat") int format, @NativeType("VkImageType") int type, @NativeType("VkSampleCountFlagBits") int samples, @NativeType("VkImageUsageFlags") int usage, @NativeType("VkImageTiling") int tiling, @NativeType("uint32_t *") int[] pPropertyCount, @Nullable @NativeType("VkSparseImageFormatProperties *") VkSparseImageFormatProperties.Buffer pProperties) { long __functionAddress = physicalDevice.getCapabilities().vkGetPhysicalDeviceSparseImageFormatProperties; if (CHECKS) { check(pPropertyCount, 1); checkSafe(pProperties, pPropertyCount[0]); } callPPPV(__functionAddress, physicalDevice.address(), format, type, samples, usage, tiling, pPropertyCount, memAddressSafe(pProperties)); } /** Array version of: {@link #vkCreateFence CreateFence} */ @NativeType("VkResult") public static int vkCreateFence(VkDevice device, @NativeType("const VkFenceCreateInfo *") VkFenceCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkFence *") long[] pFence) { long __functionAddress = device.getCapabilities().vkCreateFence; if (CHECKS) { check(pFence, 1); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo.address(), memAddressSafe(pAllocator), pFence); } /** Array version of: {@link #vkResetFences ResetFences} */ @NativeType("VkResult") public static int vkResetFences(VkDevice device, @NativeType("const VkFence *") long[] pFences) { long __functionAddress = device.getCapabilities().vkResetFences; return callPPI(__functionAddress, device.address(), pFences.length, pFences); } /** Array version of: {@link #vkWaitForFences WaitForFences} */ @NativeType("VkResult") public static int vkWaitForFences(VkDevice device, @NativeType("const VkFence *") long[] pFences, @NativeType("VkBool32") boolean waitAll, @NativeType("uint64_t") long timeout) { long __functionAddress = device.getCapabilities().vkWaitForFences; return callPPJI(__functionAddress, device.address(), pFences.length, pFences, waitAll ? 1 : 0, timeout); } /** Array version of: {@link #vkCreateSemaphore CreateSemaphore} */ @NativeType("VkResult") public static int vkCreateSemaphore(VkDevice device, @NativeType("const VkSemaphoreCreateInfo *") VkSemaphoreCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkSemaphore *") long[] pSemaphore) { long __functionAddress = device.getCapabilities().vkCreateSemaphore; if (CHECKS) { check(pSemaphore, 1); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo.address(), memAddressSafe(pAllocator), pSemaphore); } /** Array version of: {@link #vkCreateEvent CreateEvent} */ @NativeType("VkResult") public static int vkCreateEvent(VkDevice device, @NativeType("const VkEventCreateInfo *") VkEventCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkEvent *") long[] pEvent) { long __functionAddress = device.getCapabilities().vkCreateEvent; if (CHECKS) { check(pEvent, 1); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo.address(), memAddressSafe(pAllocator), pEvent); } /** Array version of: {@link #vkCreateQueryPool CreateQueryPool} */ @NativeType("VkResult") public static int vkCreateQueryPool(VkDevice device, @NativeType("const VkQueryPoolCreateInfo *") VkQueryPoolCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkQueryPool *") long[] pQueryPool) { long __functionAddress = device.getCapabilities().vkCreateQueryPool; if (CHECKS) { check(pQueryPool, 1); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo.address(), memAddressSafe(pAllocator), pQueryPool); } /** Array version of: {@link #vkGetQueryPoolResults GetQueryPoolResults} */ @NativeType("VkResult") public static int vkGetQueryPoolResults(VkDevice device, @NativeType("VkQueryPool") long queryPool, @NativeType("uint32_t") int firstQuery, @NativeType("uint32_t") int queryCount, @NativeType("void *") int[] pData, @NativeType("VkDeviceSize") long stride, @NativeType("VkQueryResultFlags") int flags) { long __functionAddress = device.getCapabilities().vkGetQueryPoolResults; return callPJPPJI(__functionAddress, device.address(), queryPool, firstQuery, queryCount, (long)(pData.length << 2), pData, stride, flags); } /** Array version of: {@link #vkGetQueryPoolResults GetQueryPoolResults} */ @NativeType("VkResult") public static int vkGetQueryPoolResults(VkDevice device, @NativeType("VkQueryPool") long queryPool, @NativeType("uint32_t") int firstQuery, @NativeType("uint32_t") int queryCount, @NativeType("void *") long[] pData, @NativeType("VkDeviceSize") long stride, @NativeType("VkQueryResultFlags") int flags) { long __functionAddress = device.getCapabilities().vkGetQueryPoolResults; return callPJPPJI(__functionAddress, device.address(), queryPool, firstQuery, queryCount, (long)(pData.length << 3), pData, stride, flags); } /** Array version of: {@link #vkCreateBuffer CreateBuffer} */ @NativeType("VkResult") public static int vkCreateBuffer(VkDevice device, @NativeType("const VkBufferCreateInfo *") VkBufferCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkBuffer *") long[] pBuffer) { long __functionAddress = device.getCapabilities().vkCreateBuffer; if (CHECKS) { check(pBuffer, 1); VkBufferCreateInfo.validate(pCreateInfo.address()); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo.address(), memAddressSafe(pAllocator), pBuffer); } /** Array version of: {@link #vkCreateBufferView CreateBufferView} */ @NativeType("VkResult") public static int vkCreateBufferView(VkDevice device, @NativeType("const VkBufferViewCreateInfo *") VkBufferViewCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkBufferView *") long[] pView) { long __functionAddress = device.getCapabilities().vkCreateBufferView; if (CHECKS) { check(pView, 1); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo.address(), memAddressSafe(pAllocator), pView); } /** Array version of: {@link #vkCreateImage CreateImage} */ @NativeType("VkResult") public static int vkCreateImage(VkDevice device, @NativeType("const VkImageCreateInfo *") VkImageCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkImage *") long[] pImage) { long __functionAddress = device.getCapabilities().vkCreateImage; if (CHECKS) { check(pImage, 1); VkImageCreateInfo.validate(pCreateInfo.address()); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo.address(), memAddressSafe(pAllocator), pImage); } /** Array version of: {@link #vkCreateImageView CreateImageView} */ @NativeType("VkResult") public static int vkCreateImageView(VkDevice device, @NativeType("const VkImageViewCreateInfo *") VkImageViewCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkImageView *") long[] pView) { long __functionAddress = device.getCapabilities().vkCreateImageView; if (CHECKS) { check(pView, 1); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo.address(), memAddressSafe(pAllocator), pView); } /** Array version of: {@link #vkCreateShaderModule CreateShaderModule} */ @NativeType("VkResult") public static int vkCreateShaderModule(VkDevice device, @NativeType("const VkShaderModuleCreateInfo *") VkShaderModuleCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkShaderModule *") long[] pShaderModule) { long __functionAddress = device.getCapabilities().vkCreateShaderModule; if (CHECKS) { check(pShaderModule, 1); VkShaderModuleCreateInfo.validate(pCreateInfo.address()); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo.address(), memAddressSafe(pAllocator), pShaderModule); } /** Array version of: {@link #vkCreatePipelineCache CreatePipelineCache} */ @NativeType("VkResult") public static int vkCreatePipelineCache(VkDevice device, @NativeType("const VkPipelineCacheCreateInfo *") VkPipelineCacheCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkPipelineCache *") long[] pPipelineCache) { long __functionAddress = device.getCapabilities().vkCreatePipelineCache; if (CHECKS) { check(pPipelineCache, 1); VkPipelineCacheCreateInfo.validate(pCreateInfo.address()); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo.address(), memAddressSafe(pAllocator), pPipelineCache); } /** Array version of: {@link #vkMergePipelineCaches MergePipelineCaches} */ @NativeType("VkResult") public static int vkMergePipelineCaches(VkDevice device, @NativeType("VkPipelineCache") long dstCache, @NativeType("const VkPipelineCache *") long[] pSrcCaches) { long __functionAddress = device.getCapabilities().vkMergePipelineCaches; return callPJPI(__functionAddress, device.address(), dstCache, pSrcCaches.length, pSrcCaches); } /** Array version of: {@link #vkCreateGraphicsPipelines CreateGraphicsPipelines} */ @NativeType("VkResult") public static int vkCreateGraphicsPipelines(VkDevice device, @NativeType("VkPipelineCache") long pipelineCache, @NativeType("const VkGraphicsPipelineCreateInfo *") VkGraphicsPipelineCreateInfo.Buffer pCreateInfos, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkPipeline *") long[] pPipelines) { long __functionAddress = device.getCapabilities().vkCreateGraphicsPipelines; if (CHECKS) { check(pPipelines, pCreateInfos.remaining()); VkGraphicsPipelineCreateInfo.validate(pCreateInfos.address(), pCreateInfos.remaining()); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPJPPPI(__functionAddress, device.address(), pipelineCache, pCreateInfos.remaining(), pCreateInfos.address(), memAddressSafe(pAllocator), pPipelines); } /** Array version of: {@link #vkCreateComputePipelines CreateComputePipelines} */ @NativeType("VkResult") public static int vkCreateComputePipelines(VkDevice device, @NativeType("VkPipelineCache") long pipelineCache, @NativeType("const VkComputePipelineCreateInfo *") VkComputePipelineCreateInfo.Buffer pCreateInfos, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkPipeline *") long[] pPipelines) { long __functionAddress = device.getCapabilities().vkCreateComputePipelines; if (CHECKS) { check(pPipelines, pCreateInfos.remaining()); VkComputePipelineCreateInfo.validate(pCreateInfos.address(), pCreateInfos.remaining()); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPJPPPI(__functionAddress, device.address(), pipelineCache, pCreateInfos.remaining(), pCreateInfos.address(), memAddressSafe(pAllocator), pPipelines); } /** Array version of: {@link #vkCreatePipelineLayout CreatePipelineLayout} */ @NativeType("VkResult") public static int vkCreatePipelineLayout(VkDevice device, @NativeType("const VkPipelineLayoutCreateInfo *") VkPipelineLayoutCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkPipelineLayout *") long[] pPipelineLayout) { long __functionAddress = device.getCapabilities().vkCreatePipelineLayout; if (CHECKS) { check(pPipelineLayout, 1); VkPipelineLayoutCreateInfo.validate(pCreateInfo.address()); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo.address(), memAddressSafe(pAllocator), pPipelineLayout); } /** Array version of: {@link #vkCreateSampler CreateSampler} */ @NativeType("VkResult") public static int vkCreateSampler(VkDevice device, @NativeType("const VkSamplerCreateInfo *") VkSamplerCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkSampler *") long[] pSampler) { long __functionAddress = device.getCapabilities().vkCreateSampler; if (CHECKS) { check(pSampler, 1); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo.address(), memAddressSafe(pAllocator), pSampler); } /** Array version of: {@link #vkCreateDescriptorSetLayout CreateDescriptorSetLayout} */ @NativeType("VkResult") public static int vkCreateDescriptorSetLayout(VkDevice device, @NativeType("const VkDescriptorSetLayoutCreateInfo *") VkDescriptorSetLayoutCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkDescriptorSetLayout *") long[] pSetLayout) { long __functionAddress = device.getCapabilities().vkCreateDescriptorSetLayout; if (CHECKS) { check(pSetLayout, 1); VkDescriptorSetLayoutCreateInfo.validate(pCreateInfo.address()); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo.address(), memAddressSafe(pAllocator), pSetLayout); } /** Array version of: {@link #vkCreateDescriptorPool CreateDescriptorPool} */ @NativeType("VkResult") public static int vkCreateDescriptorPool(VkDevice device, @NativeType("const VkDescriptorPoolCreateInfo *") VkDescriptorPoolCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkDescriptorPool *") long[] pDescriptorPool) { long __functionAddress = device.getCapabilities().vkCreateDescriptorPool; if (CHECKS) { check(pDescriptorPool, 1); VkDescriptorPoolCreateInfo.validate(pCreateInfo.address()); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo.address(), memAddressSafe(pAllocator), pDescriptorPool); } /** Array version of: {@link #vkAllocateDescriptorSets AllocateDescriptorSets} */ @NativeType("VkResult") public static int vkAllocateDescriptorSets(VkDevice device, @NativeType("const VkDescriptorSetAllocateInfo *") VkDescriptorSetAllocateInfo pAllocateInfo, @NativeType("VkDescriptorSet *") long[] pDescriptorSets) { long __functionAddress = device.getCapabilities().vkAllocateDescriptorSets; if (CHECKS) { check(pDescriptorSets, pAllocateInfo.descriptorSetCount()); VkDescriptorSetAllocateInfo.validate(pAllocateInfo.address()); } return callPPPI(__functionAddress, device.address(), pAllocateInfo.address(), pDescriptorSets); } /** Array version of: {@link #vkFreeDescriptorSets FreeDescriptorSets} */ @NativeType("VkResult") public static int vkFreeDescriptorSets(VkDevice device, @NativeType("VkDescriptorPool") long descriptorPool, @NativeType("const VkDescriptorSet *") long[] pDescriptorSets) { long __functionAddress = device.getCapabilities().vkFreeDescriptorSets; return callPJPI(__functionAddress, device.address(), descriptorPool, pDescriptorSets.length, pDescriptorSets); } /** Array version of: {@link #vkCreateFramebuffer CreateFramebuffer} */ @NativeType("VkResult") public static int vkCreateFramebuffer(VkDevice device, @NativeType("const VkFramebufferCreateInfo *") VkFramebufferCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkFramebuffer *") long[] pFramebuffer) { long __functionAddress = device.getCapabilities().vkCreateFramebuffer; if (CHECKS) { check(pFramebuffer, 1); VkFramebufferCreateInfo.validate(pCreateInfo.address()); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo.address(), memAddressSafe(pAllocator), pFramebuffer); } /** Array version of: {@link #vkCreateRenderPass CreateRenderPass} */ @NativeType("VkResult") public static int vkCreateRenderPass(VkDevice device, @NativeType("const VkRenderPassCreateInfo *") VkRenderPassCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkRenderPass *") long[] pRenderPass) { long __functionAddress = device.getCapabilities().vkCreateRenderPass; if (CHECKS) { check(pRenderPass, 1); VkRenderPassCreateInfo.validate(pCreateInfo.address()); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo.address(), memAddressSafe(pAllocator), pRenderPass); } /** Array version of: {@link #vkCreateCommandPool CreateCommandPool} */ @NativeType("VkResult") public static int vkCreateCommandPool(VkDevice device, @NativeType("const VkCommandPoolCreateInfo *") VkCommandPoolCreateInfo pCreateInfo, @Nullable @NativeType("const VkAllocationCallbacks *") VkAllocationCallbacks pAllocator, @NativeType("VkCommandPool *") long[] pCommandPool) { long __functionAddress = device.getCapabilities().vkCreateCommandPool; if (CHECKS) { check(pCommandPool, 1); if (pAllocator != null) { VkAllocationCallbacks.validate(pAllocator.address()); } } return callPPPPI(__functionAddress, device.address(), pCreateInfo.address(), memAddressSafe(pAllocator), pCommandPool); } /** Array version of: {@link #vkCmdSetBlendConstants CmdSetBlendConstants} */ public static void vkCmdSetBlendConstants(VkCommandBuffer commandBuffer, @NativeType("const float *") float[] blendConstants) { long __functionAddress = commandBuffer.getCapabilities().vkCmdSetBlendConstants; if (CHECKS) { check(blendConstants, 4); } callPPV(__functionAddress, commandBuffer.address(), blendConstants); } /** Array version of: {@link #vkCmdBindDescriptorSets CmdBindDescriptorSets} */ public static void vkCmdBindDescriptorSets(VkCommandBuffer commandBuffer, @NativeType("VkPipelineBindPoint") int pipelineBindPoint, @NativeType("VkPipelineLayout") long layout, @NativeType("uint32_t") int firstSet, @NativeType("const VkDescriptorSet *") long[] pDescriptorSets, @Nullable @NativeType("const uint32_t *") int[] pDynamicOffsets) { long __functionAddress = commandBuffer.getCapabilities().vkCmdBindDescriptorSets; callPJPPV(__functionAddress, commandBuffer.address(), pipelineBindPoint, layout, firstSet, pDescriptorSets.length, pDescriptorSets, lengthSafe(pDynamicOffsets), pDynamicOffsets); } /** Array version of: {@link #vkCmdBindVertexBuffers CmdBindVertexBuffers} */ public static void vkCmdBindVertexBuffers(VkCommandBuffer commandBuffer, @NativeType("uint32_t") int firstBinding, @NativeType("const VkBuffer *") long[] pBuffers, @NativeType("const VkDeviceSize *") long[] pOffsets) { long __functionAddress = commandBuffer.getCapabilities().vkCmdBindVertexBuffers; if (CHECKS) { check(pOffsets, pBuffers.length); } callPPPV(__functionAddress, commandBuffer.address(), firstBinding, pBuffers.length, pBuffers, pOffsets); } /** Array version of: {@link #vkCmdUpdateBuffer CmdUpdateBuffer} */ public static void vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, @NativeType("VkBuffer") long dstBuffer, @NativeType("VkDeviceSize") long dstOffset, @NativeType("const void *") short[] pData) { long __functionAddress = commandBuffer.getCapabilities().vkCmdUpdateBuffer; callPJJJPV(__functionAddress, commandBuffer.address(), dstBuffer, dstOffset, (long)(pData.length << 1), pData); } /** Array version of: {@link #vkCmdUpdateBuffer CmdUpdateBuffer} */ public static void vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, @NativeType("VkBuffer") long dstBuffer, @NativeType("VkDeviceSize") long dstOffset, @NativeType("const void *") int[] pData) { long __functionAddress = commandBuffer.getCapabilities().vkCmdUpdateBuffer; callPJJJPV(__functionAddress, commandBuffer.address(), dstBuffer, dstOffset, (long)(pData.length << 2), pData); } /** Array version of: {@link #vkCmdUpdateBuffer CmdUpdateBuffer} */ public static void vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, @NativeType("VkBuffer") long dstBuffer, @NativeType("VkDeviceSize") long dstOffset, @NativeType("const void *") long[] pData) { long __functionAddress = commandBuffer.getCapabilities().vkCmdUpdateBuffer; callPJJJPV(__functionAddress, commandBuffer.address(), dstBuffer, dstOffset, (long)(pData.length << 3), pData); } /** Array version of: {@link #vkCmdUpdateBuffer CmdUpdateBuffer} */ public static void vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, @NativeType("VkBuffer") long dstBuffer, @NativeType("VkDeviceSize") long dstOffset, @NativeType("const void *") float[] pData) { long __functionAddress = commandBuffer.getCapabilities().vkCmdUpdateBuffer; callPJJJPV(__functionAddress, commandBuffer.address(), dstBuffer, dstOffset, (long)(pData.length << 2), pData); } /** Array version of: {@link #vkCmdUpdateBuffer CmdUpdateBuffer} */ public static void vkCmdUpdateBuffer(VkCommandBuffer commandBuffer, @NativeType("VkBuffer") long dstBuffer, @NativeType("VkDeviceSize") long dstOffset, @NativeType("const void *") double[] pData) { long __functionAddress = commandBuffer.getCapabilities().vkCmdUpdateBuffer; callPJJJPV(__functionAddress, commandBuffer.address(), dstBuffer, dstOffset, (long)(pData.length << 3), pData); } /** Array version of: {@link #vkCmdWaitEvents CmdWaitEvents} */ public static void vkCmdWaitEvents(VkCommandBuffer commandBuffer, @NativeType("const VkEvent *") long[] pEvents, @NativeType("VkPipelineStageFlags") int srcStageMask, @NativeType("VkPipelineStageFlags") int dstStageMask, @Nullable @NativeType("const VkMemoryBarrier *") VkMemoryBarrier.Buffer pMemoryBarriers, @Nullable @NativeType("const VkBufferMemoryBarrier *") VkBufferMemoryBarrier.Buffer pBufferMemoryBarriers, @Nullable @NativeType("const VkImageMemoryBarrier *") VkImageMemoryBarrier.Buffer pImageMemoryBarriers) { long __functionAddress = commandBuffer.getCapabilities().vkCmdWaitEvents; callPPPPPV(__functionAddress, commandBuffer.address(), pEvents.length, pEvents, srcStageMask, dstStageMask, remainingSafe(pMemoryBarriers), memAddressSafe(pMemoryBarriers), remainingSafe(pBufferMemoryBarriers), memAddressSafe(pBufferMemoryBarriers), remainingSafe(pImageMemoryBarriers), memAddressSafe(pImageMemoryBarriers)); } /** Array version of: {@link #vkCmdPushConstants CmdPushConstants} */ public static void vkCmdPushConstants(VkCommandBuffer commandBuffer, @NativeType("VkPipelineLayout") long layout, @NativeType("VkShaderStageFlags") int stageFlags, @NativeType("uint32_t") int offset, @NativeType("const void *") short[] pValues) { long __functionAddress = commandBuffer.getCapabilities().vkCmdPushConstants; callPJPV(__functionAddress, commandBuffer.address(), layout, stageFlags, offset, pValues.length << 1, pValues); } /** Array version of: {@link #vkCmdPushConstants CmdPushConstants} */ public static void vkCmdPushConstants(VkCommandBuffer commandBuffer, @NativeType("VkPipelineLayout") long layout, @NativeType("VkShaderStageFlags") int stageFlags, @NativeType("uint32_t") int offset, @NativeType("const void *") int[] pValues) { long __functionAddress = commandBuffer.getCapabilities().vkCmdPushConstants; callPJPV(__functionAddress, commandBuffer.address(), layout, stageFlags, offset, pValues.length << 2, pValues); } /** Array version of: {@link #vkCmdPushConstants CmdPushConstants} */ public static void vkCmdPushConstants(VkCommandBuffer commandBuffer, @NativeType("VkPipelineLayout") long layout, @NativeType("VkShaderStageFlags") int stageFlags, @NativeType("uint32_t") int offset, @NativeType("const void *") long[] pValues) { long __functionAddress = commandBuffer.getCapabilities().vkCmdPushConstants; callPJPV(__functionAddress, commandBuffer.address(), layout, stageFlags, offset, pValues.length << 3, pValues); } /** Array version of: {@link #vkCmdPushConstants CmdPushConstants} */ public static void vkCmdPushConstants(VkCommandBuffer commandBuffer, @NativeType("VkPipelineLayout") long layout, @NativeType("VkShaderStageFlags") int stageFlags, @NativeType("uint32_t") int offset, @NativeType("const void *") float[] pValues) { long __functionAddress = commandBuffer.getCapabilities().vkCmdPushConstants; callPJPV(__functionAddress, commandBuffer.address(), layout, stageFlags, offset, pValues.length << 2, pValues); } /** Array version of: {@link #vkCmdPushConstants CmdPushConstants} */ public static void vkCmdPushConstants(VkCommandBuffer commandBuffer, @NativeType("VkPipelineLayout") long layout, @NativeType("VkShaderStageFlags") int stageFlags, @NativeType("uint32_t") int offset, @NativeType("const void *") double[] pValues) { long __functionAddress = commandBuffer.getCapabilities().vkCmdPushConstants; callPJPV(__functionAddress, commandBuffer.address(), layout, stageFlags, offset, pValues.length << 3, pValues); } }




© 2015 - 2024 Weber Informatics LLC | Privacy Policy