From 7070ff726b9d2f8de962413f9c89184b8de29ec7 Mon Sep 17 00:00:00 2001 From: hodasemi Date: Sat, 14 Jan 2023 13:03:01 +0100 Subject: [PATCH] Initial commit --- .gitignore | 2 + .vscode/settings.json | 7 + Cargo.toml | 8 + assetpath/.vscode/settings.json | 7 + assetpath/Cargo.toml | 10 + assetpath/src/lib.rs | 133 + library_loader/.vscode/settings.json | 7 + library_loader/Cargo.toml | 8 + library_loader/src/lib.rs | 1 + library_loader/src/macros.rs | 115 + vma-rs/Cargo.toml | 15 + vma-rs/build.rs | 76 + vma-rs/src/allocation.rs | 470 + vma-rs/src/allocator.rs | 171 + vma-rs/src/allocator_pool.rs | 15 + vma-rs/src/lib.rs | 7 + vma-rs/src/prelude.rs | 6 + vma-rs/src/vma_bindings.rs | 2202 ++ vma-rs/vma_lib/vma_lib.cpp | 2 + vma-rs/vma_source/vk_mem_alloc.h | 19305 ++++++++++++++++ vma-rs/vulkan/vk_icd.h | 183 + vma-rs/vulkan/vk_layer.h | 202 + vma-rs/vulkan/vk_platform.h | 92 + vma-rs/vulkan/vk_sdk_platform.h | 69 + vma-rs/vulkan/vulkan.h | 86 + vma-rs/vulkan/vulkan_android.h | 122 + vma-rs/vulkan/vulkan_core.h | 10722 +++++++++ vma-rs/vulkan/vulkan_fuchsia.h | 57 + vma-rs/vulkan/vulkan_ggp.h | 68 + vma-rs/vulkan/vulkan_ios.h | 57 + vma-rs/vulkan/vulkan_macos.h | 57 + vma-rs/vulkan/vulkan_metal.h | 64 + vma-rs/vulkan/vulkan_vi.h | 57 + vma-rs/vulkan/vulkan_wayland.h | 64 + vma-rs/vulkan/vulkan_win32.h | 328 + vma-rs/vulkan/vulkan_xcb.h | 65 + vma-rs/vulkan/vulkan_xlib.h | 65 + vma-rs/vulkan/vulkan_xlib_xrandr.h | 55 + vulkan-rs/.vscode/settings.json | 7 + vulkan-rs/Cargo.toml | 13 + vulkan-rs/src/acceleration_structure.rs | 451 + vulkan-rs/src/address.rs | 157 + vulkan-rs/src/buffer.rs | 288 + vulkan-rs/src/commandbuffer.rs | 1090 + vulkan-rs/src/commandpool.rs | 88 + vulkan-rs/src/deferred_operation.rs | 62 + vulkan-rs/src/descriptorpool.rs | 160 + vulkan-rs/src/descriptorset.rs | 319 + vulkan-rs/src/descriptorsetlayout.rs | 130 + vulkan-rs/src/device.rs | 2943 +++ vulkan-rs/src/fence.rs | 88 + vulkan-rs/src/ffi.rs | 70 + vulkan-rs/src/framebuffer.rs | 145 + vulkan-rs/src/image.rs | 1163 + vulkan-rs/src/instance.rs | 1128 + vulkan-rs/src/lib.rs | 59 + vulkan-rs/src/macros.rs | 165 + vulkan-rs/src/memory.rs | 179 + vulkan-rs/src/physicaldevice.rs | 337 + vulkan-rs/src/pipeline.rs | 85 + vulkan-rs/src/pipelinecache.rs | 52 + vulkan-rs/src/pipelinelayout.rs | 73 + vulkan-rs/src/pipelines/compute_pipeline.rs | 84 + vulkan-rs/src/pipelines/graphics_pipeline.rs | 463 + vulkan-rs/src/pipelines/mod.rs | 4 + .../src/pipelines/ray_tracing_pipeline.rs | 270 + .../src/pipelines/shader_binding_table.rs | 289 + vulkan-rs/src/prelude.rs | 49 + vulkan-rs/src/querypool.rs | 66 + vulkan-rs/src/queue.rs | 242 + vulkan-rs/src/render_target/mod.rs | 390 + vulkan-rs/src/render_target/sub_pass.rs | 439 + vulkan-rs/src/renderpass.rs | 48 + vulkan-rs/src/sampler_manager.rs | 182 + vulkan-rs/src/semaphore.rs | 49 + vulkan-rs/src/shadermodule.rs | 178 + vulkan-rs/src/surface.rs | 80 + vulkan-rs/src/swapchain.rs | 319 + vulkan-sys/Cargo.toml | 11 + vulkan-sys/src/custom/mappedmemory.rs | 67 + vulkan-sys/src/custom/mod.rs | 5 + vulkan-sys/src/custom/names.rs | 32 + vulkan-sys/src/custom/prelude.rs | 3 + vulkan-sys/src/custom/string.rs | 70 + vulkan-sys/src/enums/accessflags.rs | 39 + vulkan-sys/src/enums/amd/mod.rs | 3 + vulkan-sys/src/enums/amd/prelude.rs | 1 + .../src/enums/amd/rasterizationorderamd.rs | 9 + .../src/enums/androidsurfacecreateflagskhr.rs | 15 + .../src/enums/attachmentdescriptionflags.rs | 15 + vulkan-sys/src/enums/attachmentloadop.rs | 9 + vulkan-sys/src/enums/attachmentstoreop.rs | 8 + vulkan-sys/src/enums/blendfactor.rs | 25 + vulkan-sys/src/enums/blendop.rs | 11 + vulkan-sys/src/enums/bool32.rs | 33 + vulkan-sys/src/enums/bordercolor.rs | 12 + vulkan-sys/src/enums/buffercreateflags.rs | 17 + vulkan-sys/src/enums/bufferusageflags.rs | 30 + vulkan-sys/src/enums/bufferviewcreateflags.rs | 12 + vulkan-sys/src/enums/colorcomponentflags.rs | 15 + vulkan-sys/src/enums/colorspacekhr.rs | 25 + vulkan-sys/src/enums/commandbufferlevel.rs | 8 + .../src/enums/commandbufferresetflags.rs | 12 + .../src/enums/commandbufferusageflags.rs | 14 + .../src/enums/commandpoolcreateflags.rs | 13 + vulkan-sys/src/enums/commandpoolresetflags.rs | 12 + vulkan-sys/src/enums/commandpooltrimflags.rs | 12 + vulkan-sys/src/enums/compareop.rs | 14 + vulkan-sys/src/enums/componentswizzle.rs | 13 + .../src/enums/compositealphaflagskhr.rs | 21 + vulkan-sys/src/enums/cullmodeflags.rs | 15 + vulkan-sys/src/enums/debugreporterrorext.rs | 8 + vulkan-sys/src/enums/debugreportflagsext.rs | 16 + .../src/enums/debugreportobjecttypeext.rs | 43 + .../debugutilsmessageseverityflagsext.rs | 18 + .../enums/debugutilsmessagetypeflagsext.rs | 17 + ...debugutilsmessengercallbackdataflagsext.rs | 15 + .../enums/debugutilsmessengercreateflags.rs | 15 + vulkan-sys/src/enums/dependencyflags.rs | 15 + .../src/enums/descriptorpoolcreateflags.rs | 13 + .../src/enums/descriptorpoolresetflags.rs | 12 + .../enums/descriptorsetlayoutcreateflags.rs | 16 + vulkan-sys/src/enums/descriptortype.rs | 31 + vulkan-sys/src/enums/devicecreateflags.rs | 12 + .../src/enums/devicequeuecreateflags.rs | 12 + .../src/enums/displaymodecreateflagskhr.rs | 15 + .../src/enums/displayplanealphaflagskhr.rs | 12 + .../src/enums/displaysurfacecreateflagskhr.rs | 15 + vulkan-sys/src/enums/dynamicstate.rs | 22 + vulkan-sys/src/enums/eventcreateflags.rs | 12 + .../enums/ext/descriptorbindingflagsext.rs | 16 + vulkan-sys/src/enums/ext/mod.rs | 3 + vulkan-sys/src/enums/ext/prelude.rs | 1 + .../enums/externalmemoryhandletypeflags.rs | 25 + vulkan-sys/src/enums/fencecreateflags.rs | 12 + vulkan-sys/src/enums/filter.rs | 8 + vulkan-sys/src/enums/format.rs | 384 + vulkan-sys/src/enums/formatfeatureflags.rs | 30 + .../src/enums/framebuffercreateflags.rs | 12 + vulkan-sys/src/enums/frontface.rs | 8 + vulkan-sys/src/enums/imageaspectflags.rs | 15 + vulkan-sys/src/enums/imagecreateflags.rs | 26 + vulkan-sys/src/enums/imagelayout.rs | 22 + vulkan-sys/src/enums/imagetiling.rs | 8 + vulkan-sys/src/enums/imagetype.rs | 9 + vulkan-sys/src/enums/imageusageflags.rs | 19 + vulkan-sys/src/enums/imageviewcreateflags.rs | 12 + vulkan-sys/src/enums/imageviewtype.rs | 13 + vulkan-sys/src/enums/indextype.rs | 10 + vulkan-sys/src/enums/instancecreateflags.rs | 12 + .../src/enums/internalallocationtype.rs | 7 + .../src/enums/iossurfacecreateflagsmvk.rs | 12 + .../khr/acceleration_structure_build_type.rs | 9 + .../acceleration_structure_compatibility.rs | 8 + .../acceleration_structure_create_flags.rs | 15 + ...tion_structure_memory_requirements_type.rs | 9 + .../enums/khr/acceleration_structure_type.rs | 9 + .../khr/build_acceleration_structure_flags.rs | 19 + .../khr/build_acceleration_structure_mode.rs | 8 + .../khr/copy_acceleration_structure_mode.rs | 10 + vulkan-sys/src/enums/khr/geometry_flags.rs | 13 + .../src/enums/khr/geometry_instance_flags.rs | 15 + vulkan-sys/src/enums/khr/geometry_type.rs | 9 + vulkan-sys/src/enums/khr/mod.rs | 15 + vulkan-sys/src/enums/khr/prelude.rs | 13 + .../khr/ray_tracing_shader_group_type.rs | 9 + .../src/enums/khr/shader_group_shader.rs | 10 + vulkan-sys/src/enums/logicop.rs | 22 + .../src/enums/macossurfacecreateflagsmvk.rs | 15 + vulkan-sys/src/enums/macros.rs | 255 + vulkan-sys/src/enums/memoryheapflags.rs | 12 + vulkan-sys/src/enums/memorymapflags.rs | 12 + vulkan-sys/src/enums/memorypropertyflags.rs | 19 + .../src/enums/mirsurfacecreateflagskhr.rs | 12 + vulkan-sys/src/enums/mod.rs | 129 + vulkan-sys/src/enums/objecttype.rs | 45 + vulkan-sys/src/enums/physicaldevicetype.rs | 11 + vulkan-sys/src/enums/pipelinebindpoint.rs | 9 + .../src/enums/pipelinecachecreateflags.rs | 12 + .../src/enums/pipelinecacheheaderversion.rs | 7 + .../pipelinecolorblendstatecreateflags.rs | 15 + vulkan-sys/src/enums/pipelinecreateflags.rs | 30 + .../pipelinedepthstencilstatecreateflags.rs | 15 + .../enums/pipelinedynamicstatecreateflags.rs | 15 + .../pipelineinputassemblystatecreateflags.rs | 15 + .../src/enums/pipelinelayoutcreateflags.rs | 12 + .../pipelinemultisamplestatecreateflags.rs | 15 + .../pipelinerasterizationstatecreateflags.rs | 15 + .../enums/pipelineshaderstagecreateflags.rs | 15 + vulkan-sys/src/enums/pipelinestageflags.rs | 38 + .../pipelinetesselationstatecreateflags.rs | 15 + .../pipelinevertexinputstatecreateflags.rs | 15 + .../enums/pipelineviewportstatecreateflags.rs | 15 + vulkan-sys/src/enums/polygonmode.rs | 9 + vulkan-sys/src/enums/prelude.rs | 124 + vulkan-sys/src/enums/presentmodekhr.rs | 10 + vulkan-sys/src/enums/primitivetopology.rs | 17 + vulkan-sys/src/enums/querycontrolflags.rs | 12 + .../src/enums/querypipelinestatisticsflags.rs | 25 + vulkan-sys/src/enums/querypoolcreateflags.rs | 12 + vulkan-sys/src/enums/queryresultflags.rs | 15 + vulkan-sys/src/enums/querytype.rs | 12 + vulkan-sys/src/enums/queueflags.rs | 15 + vulkan-sys/src/enums/renderpasscreateflags.rs | 12 + vulkan-sys/src/enums/result.rs | 53 + vulkan-sys/src/enums/samplecountflags.rs | 46 + vulkan-sys/src/enums/sampleraddressmode.rs | 11 + vulkan-sys/src/enums/samplercreateflags.rs | 13 + vulkan-sys/src/enums/samplermipmapmode.rs | 8 + vulkan-sys/src/enums/semaphorecreateflags.rs | 12 + .../src/enums/shadermodulecreateflags.rs | 12 + vulkan-sys/src/enums/shaderstageflags.rs | 27 + vulkan-sys/src/enums/sharingmode.rs | 14 + .../src/enums/sparseimageformatflags.rs | 14 + vulkan-sys/src/enums/sparsememorybindflags.rs | 12 + vulkan-sys/src/enums/stencilfaceflags.rs | 14 + vulkan-sys/src/enums/stencilop.rs | 14 + vulkan-sys/src/enums/structuretype.rs | 903 + vulkan-sys/src/enums/subpasscontents.rs | 8 + .../src/enums/subpassdescriptionflags.rs | 13 + .../src/enums/surfacetransformflagskhr.rs | 26 + .../src/enums/swapchaincreateflagskhr.rs | 14 + vulkan-sys/src/enums/systemallocationscope.rs | 11 + vulkan-sys/src/enums/vertexinputrate.rs | 8 + .../src/enums/waylandsurfacecreateflagskhr.rs | 15 + .../src/enums/win32surfacecreateflagskhr.rs | 15 + .../src/enums/xcbsurfacecreateflagskhr.rs | 12 + .../src/enums/xlibsurfacecreateflagskhr.rs | 12 + vulkan-sys/src/functions/core/device.rs | 831 + vulkan-sys/src/functions/core/entry.rs | 32 + vulkan-sys/src/functions/core/instance.rs | 94 + vulkan-sys/src/functions/core/maintenance3.rs | 18 + vulkan-sys/src/functions/core/mod.rs | 7 + vulkan-sys/src/functions/core/prelude.rs | 5 + vulkan-sys/src/functions/core/statics.rs | 17 + .../functions/ext/debug_report_callback.rs | 43 + .../functions/ext/debug_utils_messenger.rs | 38 + vulkan-sys/src/functions/ext/mod.rs | 4 + vulkan-sys/src/functions/ext/prelude.rs | 2 + .../functions/khr/acceleration_structure.rs | 152 + .../src/functions/khr/deferred_operations.rs | 39 + vulkan-sys/src/functions/khr/device_wsi.rs | 43 + vulkan-sys/src/functions/khr/instance_wsi.rs | 176 + vulkan-sys/src/functions/khr/mod.rs | 8 + .../khr/physical_device_properties2.rs | 55 + vulkan-sys/src/functions/khr/prelude.rs | 6 + .../src/functions/khr/ray_tracing_pipeline.rs | 74 + vulkan-sys/src/functions/mod.rs | 5 + vulkan-sys/src/functions/prelude.rs | 3 + vulkan-sys/src/lib.rs | 30 + vulkan-sys/src/prelude.rs | 7 + vulkan-sys/src/structs/amd/mod.rs | 3 + ...rasterizationstaterasterizationorderamd.rs | 21 + vulkan-sys/src/structs/amd/prelude.rs | 1 + .../src/structs/core/allocationcallback.rs | 39 + .../src/structs/core/applicationinfo.rs | 62 + .../src/structs/core/attachmentdescription.rs | 45 + .../src/structs/core/attachmentreference.rs | 8 + .../src/structs/core/base_in_structure.rs | 27 + .../src/structs/core/base_out_structure.rs | 27 + .../structs/core/bind_buffer_memory_info.rs | 26 + .../structs/core/bind_image_memory_info.rs | 26 + vulkan-sys/src/structs/core/bindsparseinfo.rs | 20 + .../core/buffer_device_address_info.rs | 24 + .../core/buffer_memory_requirements_info_2.rs | 22 + vulkan-sys/src/structs/core/buffercopy.rs | 9 + .../src/structs/core/buffercreateinfo.rs | 45 + .../core/bufferdeviceaddresscreateinfoext.rs | 22 + .../src/structs/core/bufferimagecopy.rs | 12 + .../src/structs/core/buffermemorybarrier.rs | 46 + .../src/structs/core/bufferviewcreateinfo.rs | 39 + .../src/structs/core/clearattachment.rs | 9 + .../src/structs/core/clearcolorvalue.rs | 35 + .../structs/core/cleardepthstencilvalue.rs | 6 + vulkan-sys/src/structs/core/clearrect.rs | 9 + vulkan-sys/src/structs/core/clearvalue.rs | 28 + .../structs/core/commandbufferallocateinfo.rs | 30 + .../structs/core/commandbufferbegininfo.rs | 34 + .../core/commandbufferinheritanceinfo.rs | 46 + .../src/structs/core/commandpoolcreateinfo.rs | 27 + .../src/structs/core/componentmapping.rs | 21 + .../structs/core/computepipelinecreateinfo.rs | 42 + .../src/structs/core/copydescriptorset.rs | 17 + .../core/debugreportcallbackcreateinfoext.rs | 31 + .../core/debugutilmessengercallbackdataext.rs | 61 + .../src/structs/core/debugutilslabelext.rs | 12 + .../core/debugutilsmessengercreateinfoext.rs | 43 + .../core/debugutilsobjectnameinfoext.rs | 34 + .../src/structs/core/descriptorbufferinfo.rs | 9 + .../src/structs/core/descriptorimageinfo.rs | 9 + .../structs/core/descriptorpoolcreateinfo.rs | 35 + .../src/structs/core/descriptorpoolsize.rs | 8 + .../structs/core/descriptorsetallocateinfo.rs | 37 + .../core/descriptorsetlayoutbinding.rs | 54 + .../core/descriptorsetlayoutcreateinfo.rs | 34 + .../core/descriptorsetlayoutsupport.rs | 31 + .../src/structs/core/devicecreateinfo.rs | 84 + .../src/structs/core/devicequeuecreateinfo.rs | 31 + .../structs/core/dispatchindirectcommand.rs | 7 + .../structs/core/displayplanecapabilities.rs | 15 + .../structs/core/displayplaneproperties.rs | 8 + .../src/structs/core/displayproperties.rs | 21 + .../core/drawindexedindirectcommand.rs | 9 + .../src/structs/core/drawindirectcommand.rs | 8 + .../src/structs/core/eventcreateinfo.rs | 25 + .../src/structs/core/extensionproperties.rs | 25 + vulkan-sys/src/structs/core/extent2d.rs | 6 + vulkan-sys/src/structs/core/extent3d.rs | 7 + .../core/externalmemorybuffercreateinfo.rs | 25 + .../src/structs/core/fencecreateinfo.rs | 25 + .../src/structs/core/formatproperties.rs | 9 + .../src/structs/core/framebuffercreateinfo.rs | 44 + .../core/graphicspipelinecreateinfo.rs | 101 + .../core/image_memory_requirements_info_2.rs | 22 + vulkan-sys/src/structs/core/imageblit.rs | 10 + vulkan-sys/src/structs/core/imagecopy.rs | 11 + .../src/structs/core/imagecreateinfo.rs | 64 + .../src/structs/core/imageformatproperties.rs | 11 + .../src/structs/core/imagememorybarrier.rs | 49 + vulkan-sys/src/structs/core/imageresolve.rs | 11 + .../src/structs/core/imagesubresource.rs | 9 + .../structs/core/imagesubresourcelayers.rs | 10 + .../src/structs/core/imagesubresourcerange.rs | 11 + .../src/structs/core/imageviewcreateinfo.rs | 42 + .../src/structs/core/instancecreateinfo.rs | 93 + .../structs/core/iossurfacecreateinfomvk.rs | 27 + .../src/structs/core/layerproperties.rs | 22 + .../structs/core/macossurfacecreateinfomvk.rs | 28 + .../src/structs/core/mappedmemoryrange.rs | 26 + .../src/structs/core/memoryallocateinfo.rs | 24 + vulkan-sys/src/structs/core/memorybarrier.rs | 28 + vulkan-sys/src/structs/core/memoryheap.rs | 8 + .../src/structs/core/memoryrequirements.rs | 23 + vulkan-sys/src/structs/core/memorytype.rs | 8 + vulkan-sys/src/structs/core/mod.rs | 133 + .../structs/core/mvkdisplayconfiguration.rs | 13 + .../core/mvkphysicaldevicemetalfeatures.rs | 13 + .../structs/core/mvkswapchainperformance.rs | 9 + vulkan-sys/src/structs/core/offset2d.rs | 6 + vulkan-sys/src/structs/core/offset3d.rs | 7 + .../structs/core/physicaldevicefeatures.rs | 131 + .../src/structs/core/physicaldevicelimits.rs | 112 + .../physicaldevicemaintanence3properties.rs | 22 + .../core/physicaldevicememoryproperties.rs | 33 + .../structs/core/physicaldeviceproperties.rs | 52 + .../core/physicaldevicesparseproperties.rs | 11 + .../structs/core/pipelinecachecreateinfo.rs | 35 + .../core/pipelinecolorblendattachmentstate.rs | 32 + .../core/pipelinecolorblendstatecreateinfo.rs | 46 + .../pipelinedepthstencilstatecreateinfo.rs | 54 + .../core/pipelinedynamicstatecreateinfo.rs | 29 + .../pipelineinputassemblystatecreateinfo.rs | 33 + .../structs/core/pipelinelayoutcreateinfo.rs | 37 + .../pipelinemultisamplestatecreateinfo.rs | 55 + .../pipelinerasterizationstatecreateinfo.rs | 62 + .../core/pipelineshaderstagecreateinfo.rs | 153 + .../pipelinetesselationstatecreateinfo.rs | 27 + .../pipelinevertexinputstatecreateinfo.rs | 37 + .../core/pipelineviewportstatecreateinfo.rs | 37 + vulkan-sys/src/structs/core/prelude.rs | 137 + .../src/structs/core/pushconstantrange.rs | 22 + .../src/structs/core/querypoolcreateinfo.rs | 37 + .../src/structs/core/queuefamilyproperties.rs | 10 + vulkan-sys/src/structs/core/rect2d.rs | 8 + .../src/structs/core/renderpassbegininfo.rs | 35 + .../src/structs/core/renderpasscreateinfo.rs | 42 + .../src/structs/core/samplercreateinfo.rs | 100 + .../src/structs/core/semaphorecreateinfo.rs | 25 + .../structs/core/shadermodulecreateinfo.rs | 29 + .../core/sparsebuffermemorybindinfo.rs | 9 + .../core/sparseimageformatproperties.rs | 9 + .../src/structs/core/sparseimagememorybind.rs | 12 + .../structs/core/sparseimagememorybindinfo.rs | 9 + .../core/sparseimagememoryrequirements.rs | 11 + .../core/sparseimageopaquememorybindinfo.rs | 9 + .../src/structs/core/sparsememorybind.rs | 11 + .../src/structs/core/specializationinfo.rs | 43 + .../structs/core/specializationmapentry.rs | 7 + vulkan-sys/src/structs/core/stencilopstate.rs | 13 + vulkan-sys/src/structs/core/submitinfo.rs | 39 + .../src/structs/core/subpassdependency.rs | 42 + .../src/structs/core/subpassdescription.rs | 57 + .../src/structs/core/subresourcelayout.rs | 11 + .../core/vertexinputattributedescription.rs | 10 + .../core/vertexinputbindingdescription.rs | 9 + vulkan-sys/src/structs/core/viewport.rs | 10 + .../src/structs/core/writedescriptorset.rs | 64 + ...iptorsetlayoutbindingflagscreateinfoext.rs | 24 + ...ariabledescriptorcountallocationinfoext.rs | 29 + ...variabledescriptorcountlayoutsupportext.rs | 28 + vulkan-sys/src/structs/ext/mod.rs | 8 + ...icaldevicedescriptorindexingfeaturesext.rs | 65 + ...aldevicedescriptorindexingpropertiesext.rs | 69 + ...physicaldevicememorybudgetpropertiesext.rs | 44 + vulkan-sys/src/structs/ext/prelude.rs | 6 + .../khr/androidsurfacecreateinfokhr.rs | 30 + .../khr/descriptorupdatetemplateentrykhr.rs | 12 + .../structs/khr/displaymodecreateinfokhr.rs | 12 + .../structs/khr/displaymodeparameterkhr.rs | 8 + .../structs/khr/displaymodepropertieskhr.rs | 8 + .../src/structs/khr/displaypresentinfokhr.rs | 13 + .../khr/displaysurfacecreateinfokhr.rs | 18 + .../src/structs/khr/formatproperties2khr.rs | 26 + .../structs/khr/imageformatproperties2khr.rs | 26 + .../src/structs/khr/memoryrequirements2khr.rs | 13 + vulkan-sys/src/structs/khr/mod.rs | 32 + ...l_device_buffer_device_address_features.rs | 29 + .../structs/khr/physicaldevicefeatures2khr.rs | 52 + .../khr/physicaldeviceimageformatinfo2khr.rs | 40 + .../khr/physicaldevicememoryproperties2khr.rs | 35 + .../khr/physicaldeviceproperties2khr.rs | 45 + ...ysicaldevicepushdescriptorpropertieskhr.rs | 22 + ...physicaldevicesparseimageformatinfo2khr.rs | 40 + .../khr/pipeline_library_create_info.rs | 28 + vulkan-sys/src/structs/khr/prelude.rs | 30 + vulkan-sys/src/structs/khr/presentinfokhr.rs | 44 + .../structs/khr/queuefamilyproperties2khr.rs | 22 + .../structs/khr/ray_tracing/aabb_positions.rs | 36 + ...eleration_structure_build_geometry_info.rs | 85 + ...acceleration_structure_build_range_info.rs | 24 + ...acceleration_structure_build_sizes_info.rs | 30 + .../acceleration_structure_create_info.rs | 38 + ...eleration_structure_device_address_info.rs | 21 + .../acceleration_structure_geometry.rs | 32 + ...eleration_structure_geometry_aabbs_data.rs | 24 + .../acceleration_structure_geometry_data.rs | 30 + ...ation_structure_geometry_instances_data.rs | 47 + ...ation_structure_geometry_triangles_data.rs | 42 + .../acceleration_structure_instance.rs | 86 + .../acceleration_structure_version_info.rs | 26 + .../copy_acceleration_structure_info.rs | 29 + ...y_acceleration_structure_to_memory_info.rs | 29 + ...y_memory_to_acceleration_structure_info.rs | 29 + .../khr/ray_tracing/device_or_host_address.rs | 51 + .../device_or_host_address_const.rs | 59 + vulkan-sys/src/structs/khr/ray_tracing/mod.rs | 32 + ..._device_acceleration_structure_features.rs | 33 + ...evice_acceleration_structure_properties.rs | 39 + .../physical_device_ray_tracing_features.rs | 33 + .../physical_device_ray_tracing_properties.rs | 39 + .../src/structs/khr/ray_tracing/prelude.rs | 30 + .../ray_tracing_pipeline_create_info.rs | 57 + ..._tracing_pipeline_interface_create_info.rs | 26 + .../ray_tracing_shader_group_create_info.rs | 41 + .../khr/ray_tracing/strided_buffer_region.rs | 9 + .../strided_device_address_region.rs | 8 + .../trace_rays_indirect_command.rs | 6 + .../khr/ray_tracing/transform_matrix.rs | 13 + ...e_descriptor_set_acceleration_structure.rs | 44 + .../khr/sparseimageformatproperties2khr.rs | 22 + .../src/structs/khr/surfacecapabilitieskhr.rs | 16 + .../src/structs/khr/surfaceformatkhr.rs | 8 + .../src/structs/khr/swapchaincreateinfokhr.rs | 81 + .../khr/waylandsurfacecreateinfokhr.rs | 29 + .../structs/khr/win32surfacecreateinfokhr.rs | 29 + .../structs/khr/xcbsurfacecreateinfokhr.rs | 29 + .../structs/khr/xlibsurfacecreateinfokhr.rs | 29 + vulkan-sys/src/structs/macros.rs | 37 + vulkan-sys/src/structs/mod.rs | 23 + vulkan-sys/src/structs/prelude.rs | 7 + vulkan-sys/src/types/constants.rs | 14 + vulkan-sys/src/types/core.rs | 124 + vulkan-sys/src/types/ext.rs | 9 + vulkan-sys/src/types/khr.rs | 34 + vulkan-sys/src/types/macros.rs | 35 + vulkan-sys/src/types/mod.rs | 12 + vulkan-sys/src/types/nv.rs | 3 + vulkan-sys/src/types/prelude.rs | 7 + vulkan-sys/src/types/types.rs | 3 + vulkan-sys/src/types/voidfunction.rs | 4 + 470 files changed, 59921 insertions(+) create mode 100644 .gitignore create mode 100644 .vscode/settings.json create mode 100644 Cargo.toml create mode 100644 assetpath/.vscode/settings.json create mode 100644 assetpath/Cargo.toml create mode 100644 assetpath/src/lib.rs create mode 100644 library_loader/.vscode/settings.json create mode 100644 library_loader/Cargo.toml create mode 100644 library_loader/src/lib.rs create mode 100644 library_loader/src/macros.rs create mode 100644 vma-rs/Cargo.toml create mode 100644 vma-rs/build.rs create mode 100644 vma-rs/src/allocation.rs create mode 100644 vma-rs/src/allocator.rs create mode 100644 vma-rs/src/allocator_pool.rs create mode 100644 vma-rs/src/lib.rs create mode 100644 vma-rs/src/prelude.rs create mode 100644 vma-rs/src/vma_bindings.rs create mode 100644 vma-rs/vma_lib/vma_lib.cpp create mode 100644 vma-rs/vma_source/vk_mem_alloc.h create mode 100644 vma-rs/vulkan/vk_icd.h create mode 100644 vma-rs/vulkan/vk_layer.h create mode 100644 vma-rs/vulkan/vk_platform.h create mode 100644 vma-rs/vulkan/vk_sdk_platform.h create mode 100644 vma-rs/vulkan/vulkan.h create mode 100644 vma-rs/vulkan/vulkan_android.h create mode 100644 vma-rs/vulkan/vulkan_core.h create mode 100644 vma-rs/vulkan/vulkan_fuchsia.h create mode 100644 vma-rs/vulkan/vulkan_ggp.h create mode 100644 vma-rs/vulkan/vulkan_ios.h create mode 100644 vma-rs/vulkan/vulkan_macos.h create mode 100644 vma-rs/vulkan/vulkan_metal.h create mode 100644 vma-rs/vulkan/vulkan_vi.h create mode 100644 vma-rs/vulkan/vulkan_wayland.h create mode 100644 vma-rs/vulkan/vulkan_win32.h create mode 100644 vma-rs/vulkan/vulkan_xcb.h create mode 100644 vma-rs/vulkan/vulkan_xlib.h create mode 100644 vma-rs/vulkan/vulkan_xlib_xrandr.h create mode 100644 vulkan-rs/.vscode/settings.json create mode 100644 vulkan-rs/Cargo.toml create mode 100644 vulkan-rs/src/acceleration_structure.rs create mode 100644 vulkan-rs/src/address.rs create mode 100644 vulkan-rs/src/buffer.rs create mode 100644 vulkan-rs/src/commandbuffer.rs create mode 100644 vulkan-rs/src/commandpool.rs create mode 100644 vulkan-rs/src/deferred_operation.rs create mode 100644 vulkan-rs/src/descriptorpool.rs create mode 100644 vulkan-rs/src/descriptorset.rs create mode 100644 vulkan-rs/src/descriptorsetlayout.rs create mode 100644 vulkan-rs/src/device.rs create mode 100644 vulkan-rs/src/fence.rs create mode 100644 vulkan-rs/src/ffi.rs create mode 100644 vulkan-rs/src/framebuffer.rs create mode 100644 vulkan-rs/src/image.rs create mode 100644 vulkan-rs/src/instance.rs create mode 100644 vulkan-rs/src/lib.rs create mode 100644 vulkan-rs/src/macros.rs create mode 100644 vulkan-rs/src/memory.rs create mode 100644 vulkan-rs/src/physicaldevice.rs create mode 100644 vulkan-rs/src/pipeline.rs create mode 100644 vulkan-rs/src/pipelinecache.rs create mode 100644 vulkan-rs/src/pipelinelayout.rs create mode 100644 vulkan-rs/src/pipelines/compute_pipeline.rs create mode 100644 vulkan-rs/src/pipelines/graphics_pipeline.rs create mode 100644 vulkan-rs/src/pipelines/mod.rs create mode 100644 vulkan-rs/src/pipelines/ray_tracing_pipeline.rs create mode 100644 vulkan-rs/src/pipelines/shader_binding_table.rs create mode 100644 vulkan-rs/src/prelude.rs create mode 100644 vulkan-rs/src/querypool.rs create mode 100644 vulkan-rs/src/queue.rs create mode 100644 vulkan-rs/src/render_target/mod.rs create mode 100644 vulkan-rs/src/render_target/sub_pass.rs create mode 100644 vulkan-rs/src/renderpass.rs create mode 100644 vulkan-rs/src/sampler_manager.rs create mode 100644 vulkan-rs/src/semaphore.rs create mode 100644 vulkan-rs/src/shadermodule.rs create mode 100644 vulkan-rs/src/surface.rs create mode 100644 vulkan-rs/src/swapchain.rs create mode 100644 vulkan-sys/Cargo.toml create mode 100644 vulkan-sys/src/custom/mappedmemory.rs create mode 100644 vulkan-sys/src/custom/mod.rs create mode 100644 vulkan-sys/src/custom/names.rs create mode 100644 vulkan-sys/src/custom/prelude.rs create mode 100644 vulkan-sys/src/custom/string.rs create mode 100644 vulkan-sys/src/enums/accessflags.rs create mode 100644 vulkan-sys/src/enums/amd/mod.rs create mode 100644 vulkan-sys/src/enums/amd/prelude.rs create mode 100644 vulkan-sys/src/enums/amd/rasterizationorderamd.rs create mode 100644 vulkan-sys/src/enums/androidsurfacecreateflagskhr.rs create mode 100644 vulkan-sys/src/enums/attachmentdescriptionflags.rs create mode 100644 vulkan-sys/src/enums/attachmentloadop.rs create mode 100644 vulkan-sys/src/enums/attachmentstoreop.rs create mode 100644 vulkan-sys/src/enums/blendfactor.rs create mode 100644 vulkan-sys/src/enums/blendop.rs create mode 100644 vulkan-sys/src/enums/bool32.rs create mode 100644 vulkan-sys/src/enums/bordercolor.rs create mode 100644 vulkan-sys/src/enums/buffercreateflags.rs create mode 100644 vulkan-sys/src/enums/bufferusageflags.rs create mode 100644 vulkan-sys/src/enums/bufferviewcreateflags.rs create mode 100644 vulkan-sys/src/enums/colorcomponentflags.rs create mode 100644 vulkan-sys/src/enums/colorspacekhr.rs create mode 100644 vulkan-sys/src/enums/commandbufferlevel.rs create mode 100644 vulkan-sys/src/enums/commandbufferresetflags.rs create mode 100644 vulkan-sys/src/enums/commandbufferusageflags.rs create mode 100644 vulkan-sys/src/enums/commandpoolcreateflags.rs create mode 100644 vulkan-sys/src/enums/commandpoolresetflags.rs create mode 100644 vulkan-sys/src/enums/commandpooltrimflags.rs create mode 100644 vulkan-sys/src/enums/compareop.rs create mode 100644 vulkan-sys/src/enums/componentswizzle.rs create mode 100644 vulkan-sys/src/enums/compositealphaflagskhr.rs create mode 100644 vulkan-sys/src/enums/cullmodeflags.rs create mode 100644 vulkan-sys/src/enums/debugreporterrorext.rs create mode 100644 vulkan-sys/src/enums/debugreportflagsext.rs create mode 100644 vulkan-sys/src/enums/debugreportobjecttypeext.rs create mode 100644 vulkan-sys/src/enums/debugutilsmessageseverityflagsext.rs create mode 100644 vulkan-sys/src/enums/debugutilsmessagetypeflagsext.rs create mode 100644 vulkan-sys/src/enums/debugutilsmessengercallbackdataflagsext.rs create mode 100644 vulkan-sys/src/enums/debugutilsmessengercreateflags.rs create mode 100644 vulkan-sys/src/enums/dependencyflags.rs create mode 100644 vulkan-sys/src/enums/descriptorpoolcreateflags.rs create mode 100644 vulkan-sys/src/enums/descriptorpoolresetflags.rs create mode 100644 vulkan-sys/src/enums/descriptorsetlayoutcreateflags.rs create mode 100644 vulkan-sys/src/enums/descriptortype.rs create mode 100644 vulkan-sys/src/enums/devicecreateflags.rs create mode 100644 vulkan-sys/src/enums/devicequeuecreateflags.rs create mode 100644 vulkan-sys/src/enums/displaymodecreateflagskhr.rs create mode 100644 vulkan-sys/src/enums/displayplanealphaflagskhr.rs create mode 100644 vulkan-sys/src/enums/displaysurfacecreateflagskhr.rs create mode 100644 vulkan-sys/src/enums/dynamicstate.rs create mode 100644 vulkan-sys/src/enums/eventcreateflags.rs create mode 100644 vulkan-sys/src/enums/ext/descriptorbindingflagsext.rs create mode 100644 vulkan-sys/src/enums/ext/mod.rs create mode 100644 vulkan-sys/src/enums/ext/prelude.rs create mode 100644 vulkan-sys/src/enums/externalmemoryhandletypeflags.rs create mode 100644 vulkan-sys/src/enums/fencecreateflags.rs create mode 100644 vulkan-sys/src/enums/filter.rs create mode 100644 vulkan-sys/src/enums/format.rs create mode 100644 vulkan-sys/src/enums/formatfeatureflags.rs create mode 100644 vulkan-sys/src/enums/framebuffercreateflags.rs create mode 100644 vulkan-sys/src/enums/frontface.rs create mode 100644 vulkan-sys/src/enums/imageaspectflags.rs create mode 100644 vulkan-sys/src/enums/imagecreateflags.rs create mode 100644 vulkan-sys/src/enums/imagelayout.rs create mode 100644 vulkan-sys/src/enums/imagetiling.rs create mode 100644 vulkan-sys/src/enums/imagetype.rs create mode 100644 vulkan-sys/src/enums/imageusageflags.rs create mode 100644 vulkan-sys/src/enums/imageviewcreateflags.rs create mode 100644 vulkan-sys/src/enums/imageviewtype.rs create mode 100644 vulkan-sys/src/enums/indextype.rs create mode 100644 vulkan-sys/src/enums/instancecreateflags.rs create mode 100644 vulkan-sys/src/enums/internalallocationtype.rs create mode 100644 vulkan-sys/src/enums/iossurfacecreateflagsmvk.rs create mode 100644 vulkan-sys/src/enums/khr/acceleration_structure_build_type.rs create mode 100644 vulkan-sys/src/enums/khr/acceleration_structure_compatibility.rs create mode 100644 vulkan-sys/src/enums/khr/acceleration_structure_create_flags.rs create mode 100644 vulkan-sys/src/enums/khr/acceleration_structure_memory_requirements_type.rs create mode 100644 vulkan-sys/src/enums/khr/acceleration_structure_type.rs create mode 100644 vulkan-sys/src/enums/khr/build_acceleration_structure_flags.rs create mode 100644 vulkan-sys/src/enums/khr/build_acceleration_structure_mode.rs create mode 100644 vulkan-sys/src/enums/khr/copy_acceleration_structure_mode.rs create mode 100644 vulkan-sys/src/enums/khr/geometry_flags.rs create mode 100644 vulkan-sys/src/enums/khr/geometry_instance_flags.rs create mode 100644 vulkan-sys/src/enums/khr/geometry_type.rs create mode 100644 vulkan-sys/src/enums/khr/mod.rs create mode 100644 vulkan-sys/src/enums/khr/prelude.rs create mode 100644 vulkan-sys/src/enums/khr/ray_tracing_shader_group_type.rs create mode 100644 vulkan-sys/src/enums/khr/shader_group_shader.rs create mode 100644 vulkan-sys/src/enums/logicop.rs create mode 100644 vulkan-sys/src/enums/macossurfacecreateflagsmvk.rs create mode 100644 vulkan-sys/src/enums/macros.rs create mode 100644 vulkan-sys/src/enums/memoryheapflags.rs create mode 100644 vulkan-sys/src/enums/memorymapflags.rs create mode 100644 vulkan-sys/src/enums/memorypropertyflags.rs create mode 100644 vulkan-sys/src/enums/mirsurfacecreateflagskhr.rs create mode 100644 vulkan-sys/src/enums/mod.rs create mode 100644 vulkan-sys/src/enums/objecttype.rs create mode 100644 vulkan-sys/src/enums/physicaldevicetype.rs create mode 100644 vulkan-sys/src/enums/pipelinebindpoint.rs create mode 100644 vulkan-sys/src/enums/pipelinecachecreateflags.rs create mode 100644 vulkan-sys/src/enums/pipelinecacheheaderversion.rs create mode 100644 vulkan-sys/src/enums/pipelinecolorblendstatecreateflags.rs create mode 100644 vulkan-sys/src/enums/pipelinecreateflags.rs create mode 100644 vulkan-sys/src/enums/pipelinedepthstencilstatecreateflags.rs create mode 100644 vulkan-sys/src/enums/pipelinedynamicstatecreateflags.rs create mode 100644 vulkan-sys/src/enums/pipelineinputassemblystatecreateflags.rs create mode 100644 vulkan-sys/src/enums/pipelinelayoutcreateflags.rs create mode 100644 vulkan-sys/src/enums/pipelinemultisamplestatecreateflags.rs create mode 100644 vulkan-sys/src/enums/pipelinerasterizationstatecreateflags.rs create mode 100644 vulkan-sys/src/enums/pipelineshaderstagecreateflags.rs create mode 100644 vulkan-sys/src/enums/pipelinestageflags.rs create mode 100644 vulkan-sys/src/enums/pipelinetesselationstatecreateflags.rs create mode 100644 vulkan-sys/src/enums/pipelinevertexinputstatecreateflags.rs create mode 100644 vulkan-sys/src/enums/pipelineviewportstatecreateflags.rs create mode 100644 vulkan-sys/src/enums/polygonmode.rs create mode 100644 vulkan-sys/src/enums/prelude.rs create mode 100644 vulkan-sys/src/enums/presentmodekhr.rs create mode 100644 vulkan-sys/src/enums/primitivetopology.rs create mode 100644 vulkan-sys/src/enums/querycontrolflags.rs create mode 100644 vulkan-sys/src/enums/querypipelinestatisticsflags.rs create mode 100644 vulkan-sys/src/enums/querypoolcreateflags.rs create mode 100644 vulkan-sys/src/enums/queryresultflags.rs create mode 100644 vulkan-sys/src/enums/querytype.rs create mode 100644 vulkan-sys/src/enums/queueflags.rs create mode 100644 vulkan-sys/src/enums/renderpasscreateflags.rs create mode 100644 vulkan-sys/src/enums/result.rs create mode 100644 vulkan-sys/src/enums/samplecountflags.rs create mode 100644 vulkan-sys/src/enums/sampleraddressmode.rs create mode 100644 vulkan-sys/src/enums/samplercreateflags.rs create mode 100644 vulkan-sys/src/enums/samplermipmapmode.rs create mode 100644 vulkan-sys/src/enums/semaphorecreateflags.rs create mode 100644 vulkan-sys/src/enums/shadermodulecreateflags.rs create mode 100644 vulkan-sys/src/enums/shaderstageflags.rs create mode 100644 vulkan-sys/src/enums/sharingmode.rs create mode 100644 vulkan-sys/src/enums/sparseimageformatflags.rs create mode 100644 vulkan-sys/src/enums/sparsememorybindflags.rs create mode 100644 vulkan-sys/src/enums/stencilfaceflags.rs create mode 100644 vulkan-sys/src/enums/stencilop.rs create mode 100644 vulkan-sys/src/enums/structuretype.rs create mode 100644 vulkan-sys/src/enums/subpasscontents.rs create mode 100644 vulkan-sys/src/enums/subpassdescriptionflags.rs create mode 100644 vulkan-sys/src/enums/surfacetransformflagskhr.rs create mode 100644 vulkan-sys/src/enums/swapchaincreateflagskhr.rs create mode 100644 vulkan-sys/src/enums/systemallocationscope.rs create mode 100644 vulkan-sys/src/enums/vertexinputrate.rs create mode 100644 vulkan-sys/src/enums/waylandsurfacecreateflagskhr.rs create mode 100644 vulkan-sys/src/enums/win32surfacecreateflagskhr.rs create mode 100644 vulkan-sys/src/enums/xcbsurfacecreateflagskhr.rs create mode 100644 vulkan-sys/src/enums/xlibsurfacecreateflagskhr.rs create mode 100644 vulkan-sys/src/functions/core/device.rs create mode 100644 vulkan-sys/src/functions/core/entry.rs create mode 100644 vulkan-sys/src/functions/core/instance.rs create mode 100644 vulkan-sys/src/functions/core/maintenance3.rs create mode 100644 vulkan-sys/src/functions/core/mod.rs create mode 100644 vulkan-sys/src/functions/core/prelude.rs create mode 100644 vulkan-sys/src/functions/core/statics.rs create mode 100644 vulkan-sys/src/functions/ext/debug_report_callback.rs create mode 100644 vulkan-sys/src/functions/ext/debug_utils_messenger.rs create mode 100644 vulkan-sys/src/functions/ext/mod.rs create mode 100644 vulkan-sys/src/functions/ext/prelude.rs create mode 100644 vulkan-sys/src/functions/khr/acceleration_structure.rs create mode 100644 vulkan-sys/src/functions/khr/deferred_operations.rs create mode 100644 vulkan-sys/src/functions/khr/device_wsi.rs create mode 100644 vulkan-sys/src/functions/khr/instance_wsi.rs create mode 100644 vulkan-sys/src/functions/khr/mod.rs create mode 100644 vulkan-sys/src/functions/khr/physical_device_properties2.rs create mode 100644 vulkan-sys/src/functions/khr/prelude.rs create mode 100644 vulkan-sys/src/functions/khr/ray_tracing_pipeline.rs create mode 100644 vulkan-sys/src/functions/mod.rs create mode 100644 vulkan-sys/src/functions/prelude.rs create mode 100644 vulkan-sys/src/lib.rs create mode 100644 vulkan-sys/src/prelude.rs create mode 100644 vulkan-sys/src/structs/amd/mod.rs create mode 100644 vulkan-sys/src/structs/amd/pipelinerasterizationstaterasterizationorderamd.rs create mode 100644 vulkan-sys/src/structs/amd/prelude.rs create mode 100644 vulkan-sys/src/structs/core/allocationcallback.rs create mode 100644 vulkan-sys/src/structs/core/applicationinfo.rs create mode 100644 vulkan-sys/src/structs/core/attachmentdescription.rs create mode 100644 vulkan-sys/src/structs/core/attachmentreference.rs create mode 100644 vulkan-sys/src/structs/core/base_in_structure.rs create mode 100644 vulkan-sys/src/structs/core/base_out_structure.rs create mode 100644 vulkan-sys/src/structs/core/bind_buffer_memory_info.rs create mode 100644 vulkan-sys/src/structs/core/bind_image_memory_info.rs create mode 100644 vulkan-sys/src/structs/core/bindsparseinfo.rs create mode 100644 vulkan-sys/src/structs/core/buffer_device_address_info.rs create mode 100644 vulkan-sys/src/structs/core/buffer_memory_requirements_info_2.rs create mode 100644 vulkan-sys/src/structs/core/buffercopy.rs create mode 100644 vulkan-sys/src/structs/core/buffercreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/bufferdeviceaddresscreateinfoext.rs create mode 100644 vulkan-sys/src/structs/core/bufferimagecopy.rs create mode 100644 vulkan-sys/src/structs/core/buffermemorybarrier.rs create mode 100644 vulkan-sys/src/structs/core/bufferviewcreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/clearattachment.rs create mode 100644 vulkan-sys/src/structs/core/clearcolorvalue.rs create mode 100644 vulkan-sys/src/structs/core/cleardepthstencilvalue.rs create mode 100644 vulkan-sys/src/structs/core/clearrect.rs create mode 100644 vulkan-sys/src/structs/core/clearvalue.rs create mode 100644 vulkan-sys/src/structs/core/commandbufferallocateinfo.rs create mode 100644 vulkan-sys/src/structs/core/commandbufferbegininfo.rs create mode 100644 vulkan-sys/src/structs/core/commandbufferinheritanceinfo.rs create mode 100644 vulkan-sys/src/structs/core/commandpoolcreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/componentmapping.rs create mode 100644 vulkan-sys/src/structs/core/computepipelinecreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/copydescriptorset.rs create mode 100644 vulkan-sys/src/structs/core/debugreportcallbackcreateinfoext.rs create mode 100644 vulkan-sys/src/structs/core/debugutilmessengercallbackdataext.rs create mode 100644 vulkan-sys/src/structs/core/debugutilslabelext.rs create mode 100644 vulkan-sys/src/structs/core/debugutilsmessengercreateinfoext.rs create mode 100644 vulkan-sys/src/structs/core/debugutilsobjectnameinfoext.rs create mode 100644 vulkan-sys/src/structs/core/descriptorbufferinfo.rs create mode 100644 vulkan-sys/src/structs/core/descriptorimageinfo.rs create mode 100644 vulkan-sys/src/structs/core/descriptorpoolcreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/descriptorpoolsize.rs create mode 100644 vulkan-sys/src/structs/core/descriptorsetallocateinfo.rs create mode 100644 vulkan-sys/src/structs/core/descriptorsetlayoutbinding.rs create mode 100644 vulkan-sys/src/structs/core/descriptorsetlayoutcreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/descriptorsetlayoutsupport.rs create mode 100644 vulkan-sys/src/structs/core/devicecreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/devicequeuecreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/dispatchindirectcommand.rs create mode 100644 vulkan-sys/src/structs/core/displayplanecapabilities.rs create mode 100644 vulkan-sys/src/structs/core/displayplaneproperties.rs create mode 100644 vulkan-sys/src/structs/core/displayproperties.rs create mode 100644 vulkan-sys/src/structs/core/drawindexedindirectcommand.rs create mode 100644 vulkan-sys/src/structs/core/drawindirectcommand.rs create mode 100644 vulkan-sys/src/structs/core/eventcreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/extensionproperties.rs create mode 100644 vulkan-sys/src/structs/core/extent2d.rs create mode 100644 vulkan-sys/src/structs/core/extent3d.rs create mode 100644 vulkan-sys/src/structs/core/externalmemorybuffercreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/fencecreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/formatproperties.rs create mode 100644 vulkan-sys/src/structs/core/framebuffercreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/graphicspipelinecreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/image_memory_requirements_info_2.rs create mode 100644 vulkan-sys/src/structs/core/imageblit.rs create mode 100644 vulkan-sys/src/structs/core/imagecopy.rs create mode 100644 vulkan-sys/src/structs/core/imagecreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/imageformatproperties.rs create mode 100644 vulkan-sys/src/structs/core/imagememorybarrier.rs create mode 100644 vulkan-sys/src/structs/core/imageresolve.rs create mode 100644 vulkan-sys/src/structs/core/imagesubresource.rs create mode 100644 vulkan-sys/src/structs/core/imagesubresourcelayers.rs create mode 100644 vulkan-sys/src/structs/core/imagesubresourcerange.rs create mode 100644 vulkan-sys/src/structs/core/imageviewcreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/instancecreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/iossurfacecreateinfomvk.rs create mode 100644 vulkan-sys/src/structs/core/layerproperties.rs create mode 100644 vulkan-sys/src/structs/core/macossurfacecreateinfomvk.rs create mode 100644 vulkan-sys/src/structs/core/mappedmemoryrange.rs create mode 100644 vulkan-sys/src/structs/core/memoryallocateinfo.rs create mode 100644 vulkan-sys/src/structs/core/memorybarrier.rs create mode 100644 vulkan-sys/src/structs/core/memoryheap.rs create mode 100644 vulkan-sys/src/structs/core/memoryrequirements.rs create mode 100644 vulkan-sys/src/structs/core/memorytype.rs create mode 100644 vulkan-sys/src/structs/core/mod.rs create mode 100644 vulkan-sys/src/structs/core/mvkdisplayconfiguration.rs create mode 100644 vulkan-sys/src/structs/core/mvkphysicaldevicemetalfeatures.rs create mode 100644 vulkan-sys/src/structs/core/mvkswapchainperformance.rs create mode 100644 vulkan-sys/src/structs/core/offset2d.rs create mode 100644 vulkan-sys/src/structs/core/offset3d.rs create mode 100644 vulkan-sys/src/structs/core/physicaldevicefeatures.rs create mode 100644 vulkan-sys/src/structs/core/physicaldevicelimits.rs create mode 100644 vulkan-sys/src/structs/core/physicaldevicemaintanence3properties.rs create mode 100644 vulkan-sys/src/structs/core/physicaldevicememoryproperties.rs create mode 100644 vulkan-sys/src/structs/core/physicaldeviceproperties.rs create mode 100644 vulkan-sys/src/structs/core/physicaldevicesparseproperties.rs create mode 100644 vulkan-sys/src/structs/core/pipelinecachecreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/pipelinecolorblendattachmentstate.rs create mode 100644 vulkan-sys/src/structs/core/pipelinecolorblendstatecreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/pipelinedepthstencilstatecreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/pipelinedynamicstatecreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/pipelineinputassemblystatecreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/pipelinelayoutcreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/pipelinemultisamplestatecreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/pipelinerasterizationstatecreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/pipelineshaderstagecreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/pipelinetesselationstatecreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/pipelinevertexinputstatecreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/pipelineviewportstatecreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/prelude.rs create mode 100644 vulkan-sys/src/structs/core/pushconstantrange.rs create mode 100644 vulkan-sys/src/structs/core/querypoolcreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/queuefamilyproperties.rs create mode 100644 vulkan-sys/src/structs/core/rect2d.rs create mode 100644 vulkan-sys/src/structs/core/renderpassbegininfo.rs create mode 100644 vulkan-sys/src/structs/core/renderpasscreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/samplercreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/semaphorecreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/shadermodulecreateinfo.rs create mode 100644 vulkan-sys/src/structs/core/sparsebuffermemorybindinfo.rs create mode 100644 vulkan-sys/src/structs/core/sparseimageformatproperties.rs create mode 100644 vulkan-sys/src/structs/core/sparseimagememorybind.rs create mode 100644 vulkan-sys/src/structs/core/sparseimagememorybindinfo.rs create mode 100644 vulkan-sys/src/structs/core/sparseimagememoryrequirements.rs create mode 100644 vulkan-sys/src/structs/core/sparseimageopaquememorybindinfo.rs create mode 100644 vulkan-sys/src/structs/core/sparsememorybind.rs create mode 100644 vulkan-sys/src/structs/core/specializationinfo.rs create mode 100644 vulkan-sys/src/structs/core/specializationmapentry.rs create mode 100644 vulkan-sys/src/structs/core/stencilopstate.rs create mode 100644 vulkan-sys/src/structs/core/submitinfo.rs create mode 100644 vulkan-sys/src/structs/core/subpassdependency.rs create mode 100644 vulkan-sys/src/structs/core/subpassdescription.rs create mode 100644 vulkan-sys/src/structs/core/subresourcelayout.rs create mode 100644 vulkan-sys/src/structs/core/vertexinputattributedescription.rs create mode 100644 vulkan-sys/src/structs/core/vertexinputbindingdescription.rs create mode 100644 vulkan-sys/src/structs/core/viewport.rs create mode 100644 vulkan-sys/src/structs/core/writedescriptorset.rs create mode 100644 vulkan-sys/src/structs/ext/descriptorsetlayoutbindingflagscreateinfoext.rs create mode 100644 vulkan-sys/src/structs/ext/descriptorsetvariabledescriptorcountallocationinfoext.rs create mode 100644 vulkan-sys/src/structs/ext/descriptorsetvariabledescriptorcountlayoutsupportext.rs create mode 100644 vulkan-sys/src/structs/ext/mod.rs create mode 100644 vulkan-sys/src/structs/ext/physicaldevicedescriptorindexingfeaturesext.rs create mode 100644 vulkan-sys/src/structs/ext/physicaldevicedescriptorindexingpropertiesext.rs create mode 100644 vulkan-sys/src/structs/ext/physicaldevicememorybudgetpropertiesext.rs create mode 100644 vulkan-sys/src/structs/ext/prelude.rs create mode 100644 vulkan-sys/src/structs/khr/androidsurfacecreateinfokhr.rs create mode 100644 vulkan-sys/src/structs/khr/descriptorupdatetemplateentrykhr.rs create mode 100644 vulkan-sys/src/structs/khr/displaymodecreateinfokhr.rs create mode 100644 vulkan-sys/src/structs/khr/displaymodeparameterkhr.rs create mode 100644 vulkan-sys/src/structs/khr/displaymodepropertieskhr.rs create mode 100644 vulkan-sys/src/structs/khr/displaypresentinfokhr.rs create mode 100644 vulkan-sys/src/structs/khr/displaysurfacecreateinfokhr.rs create mode 100644 vulkan-sys/src/structs/khr/formatproperties2khr.rs create mode 100644 vulkan-sys/src/structs/khr/imageformatproperties2khr.rs create mode 100644 vulkan-sys/src/structs/khr/memoryrequirements2khr.rs create mode 100644 vulkan-sys/src/structs/khr/mod.rs create mode 100644 vulkan-sys/src/structs/khr/physical_device_buffer_device_address_features.rs create mode 100644 vulkan-sys/src/structs/khr/physicaldevicefeatures2khr.rs create mode 100644 vulkan-sys/src/structs/khr/physicaldeviceimageformatinfo2khr.rs create mode 100644 vulkan-sys/src/structs/khr/physicaldevicememoryproperties2khr.rs create mode 100644 vulkan-sys/src/structs/khr/physicaldeviceproperties2khr.rs create mode 100644 vulkan-sys/src/structs/khr/physicaldevicepushdescriptorpropertieskhr.rs create mode 100644 vulkan-sys/src/structs/khr/physicaldevicesparseimageformatinfo2khr.rs create mode 100644 vulkan-sys/src/structs/khr/pipeline_library_create_info.rs create mode 100644 vulkan-sys/src/structs/khr/prelude.rs create mode 100644 vulkan-sys/src/structs/khr/presentinfokhr.rs create mode 100644 vulkan-sys/src/structs/khr/queuefamilyproperties2khr.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/aabb_positions.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_build_geometry_info.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_build_range_info.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_build_sizes_info.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_create_info.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_device_address_info.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_geometry.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_geometry_aabbs_data.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_geometry_data.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_geometry_instances_data.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_geometry_triangles_data.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_instance.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_version_info.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/copy_acceleration_structure_info.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/copy_acceleration_structure_to_memory_info.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/copy_memory_to_acceleration_structure_info.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/device_or_host_address.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/device_or_host_address_const.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/mod.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/physical_device_acceleration_structure_features.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/physical_device_acceleration_structure_properties.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/physical_device_ray_tracing_features.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/physical_device_ray_tracing_properties.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/prelude.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/ray_tracing_pipeline_create_info.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/ray_tracing_pipeline_interface_create_info.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/ray_tracing_shader_group_create_info.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/strided_buffer_region.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/strided_device_address_region.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/trace_rays_indirect_command.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/transform_matrix.rs create mode 100644 vulkan-sys/src/structs/khr/ray_tracing/write_descriptor_set_acceleration_structure.rs create mode 100644 vulkan-sys/src/structs/khr/sparseimageformatproperties2khr.rs create mode 100644 vulkan-sys/src/structs/khr/surfacecapabilitieskhr.rs create mode 100644 vulkan-sys/src/structs/khr/surfaceformatkhr.rs create mode 100644 vulkan-sys/src/structs/khr/swapchaincreateinfokhr.rs create mode 100644 vulkan-sys/src/structs/khr/waylandsurfacecreateinfokhr.rs create mode 100644 vulkan-sys/src/structs/khr/win32surfacecreateinfokhr.rs create mode 100644 vulkan-sys/src/structs/khr/xcbsurfacecreateinfokhr.rs create mode 100644 vulkan-sys/src/structs/khr/xlibsurfacecreateinfokhr.rs create mode 100644 vulkan-sys/src/structs/macros.rs create mode 100644 vulkan-sys/src/structs/mod.rs create mode 100644 vulkan-sys/src/structs/prelude.rs create mode 100644 vulkan-sys/src/types/constants.rs create mode 100644 vulkan-sys/src/types/core.rs create mode 100644 vulkan-sys/src/types/ext.rs create mode 100644 vulkan-sys/src/types/khr.rs create mode 100644 vulkan-sys/src/types/macros.rs create mode 100644 vulkan-sys/src/types/mod.rs create mode 100644 vulkan-sys/src/types/nv.rs create mode 100644 vulkan-sys/src/types/prelude.rs create mode 100644 vulkan-sys/src/types/types.rs create mode 100644 vulkan-sys/src/types/voidfunction.rs diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b354aec --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +Cargo.lock +target/ \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..7c16c0c --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,7 @@ +{ + "workbench.colorCustomizations": { + "activityBar.background": "#521B23", + "titleBar.activeBackground": "#722631", + "titleBar.activeForeground": "#FEFBFB" + } +} \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..79ca8f1 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,8 @@ +[workspace] +members = [ + "assetpath", + "vma-rs", + "vulkan-rs", + "vulkan-sys", + "library_loader", +] \ No newline at end of file diff --git a/assetpath/.vscode/settings.json b/assetpath/.vscode/settings.json new file mode 100644 index 0000000..db866f9 --- /dev/null +++ b/assetpath/.vscode/settings.json @@ -0,0 +1,7 @@ +{ + "workbench.colorCustomizations": { + "activityBar.background": "#1B2F41", + "titleBar.activeBackground": "#26425B", + "titleBar.activeForeground": "#F7FAFC" + } +} \ No newline at end of file diff --git a/assetpath/Cargo.toml b/assetpath/Cargo.toml new file mode 100644 index 0000000..34c896a --- /dev/null +++ b/assetpath/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "assetpath" +version = "0.1.0" +authors = ["hodasemi "] +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +serde = { version = "1.0.152", features = ["derive"] } diff --git a/assetpath/src/lib.rs b/assetpath/src/lib.rs new file mode 100644 index 0000000..9d04350 --- /dev/null +++ b/assetpath/src/lib.rs @@ -0,0 +1,133 @@ +use serde::{Deserialize, Serialize}; +use std::{fmt::Display, path::Path, str::FromStr}; + +#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] +pub struct AssetPath { + #[serde(skip)] + prefix: Option, + + path: String, +} + +impl AssetPath { + fn check_prefix(prefix: &str) -> String { + if prefix.ends_with('/') { + prefix.to_string() + } else { + format!("{}/", prefix) + } + } + + pub fn assume_prefix_free(&mut self) { + assert!(self.prefix.is_none(), "Prefix already set!"); + + self.prefix = Some(String::new()); + } + + pub fn set_prefix(&mut self, prefix: &str) { + assert!(self.prefix.is_none(), "Prefix already set!"); + + self.prefix = Some(Self::check_prefix(prefix)); + } + + pub fn has_prefix(&self) -> bool { + match &self.prefix { + Some(prefix) => !prefix.is_empty(), + None => false, + } + } + + pub fn prefix(&self) -> Option<&str> { + self.prefix.as_ref().map(|s| s.as_str()) + } + + pub fn set_path(&mut self, path: impl ToString) { + self.path = path.to_string(); + } + + pub fn full_path(&self) -> String { + assert!(self.prefix.is_some(), "Prefix must be set!"); + + format!("{}{}", self.prefix.clone().unwrap(), self.path) + } + + pub fn path_without_prefix(&self) -> &str { + &self.path + } + + pub fn is_empty(&self) -> bool { + self.path.is_empty() + } + + pub fn exists(&self) -> bool { + let s = self.full_path(); + let path = Path::new(&s); + + path.exists() + } +} + +impl Display for AssetPath { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.path) + } +} + +impl FromStr for AssetPath { + type Err = String; + + fn from_str(s: &str) -> Result { + Ok(Self::from(s)) + } +} + +impl From<(&str, &str)> for AssetPath { + fn from((prefix, path): (&str, &str)) -> Self { + Self { + prefix: Some(Self::check_prefix(prefix)), + path: path.to_string(), + } + } +} + +impl From<(String, &str)> for AssetPath { + fn from((prefix, path): (String, &str)) -> Self { + Self { + prefix: Some(Self::check_prefix(&prefix)), + path: path.to_string(), + } + } +} + +impl From<(&str, String)> for AssetPath { + fn from((prefix, path): (&str, String)) -> Self { + Self { + prefix: Some(Self::check_prefix(prefix)), + path, + } + } +} + +impl From<(String, String)> for AssetPath { + fn from((prefix, path): (String, String)) -> Self { + Self { + prefix: Some(Self::check_prefix(&prefix)), + path, + } + } +} + +impl From<&str> for AssetPath { + fn from(path: &str) -> Self { + Self { + prefix: None, + path: path.to_string(), + } + } +} + +impl From for AssetPath { + fn from(path: String) -> Self { + Self { prefix: None, path } + } +} diff --git a/library_loader/.vscode/settings.json b/library_loader/.vscode/settings.json new file mode 100644 index 0000000..04bc8ea --- /dev/null +++ b/library_loader/.vscode/settings.json @@ -0,0 +1,7 @@ +{ + "workbench.colorCustomizations": { + "activityBar.background": "#382B16", + "titleBar.activeBackground": "#4F3D1F", + "titleBar.activeForeground": "#FDFBF9" + } +} \ No newline at end of file diff --git a/library_loader/Cargo.toml b/library_loader/Cargo.toml new file mode 100644 index 0000000..bab8f06 --- /dev/null +++ b/library_loader/Cargo.toml @@ -0,0 +1,8 @@ +[package] +name = "library_loader" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] diff --git a/library_loader/src/lib.rs b/library_loader/src/lib.rs new file mode 100644 index 0000000..eda363d --- /dev/null +++ b/library_loader/src/lib.rs @@ -0,0 +1 @@ +pub mod macros; diff --git a/library_loader/src/macros.rs b/library_loader/src/macros.rs new file mode 100644 index 0000000..f753e7a --- /dev/null +++ b/library_loader/src/macros.rs @@ -0,0 +1,115 @@ +#[macro_export] +macro_rules! load_function_ptrs { + ($struct_name: ident, { $($name: ident($($param_n: ident: $param_ty: ty),*) -> $ret: ty,)+ }) => ( + paste::item! { + $( + #[allow(non_camel_case_types)] + pub type [] = extern "system" fn($($param_ty),*) -> $ret; + )+ + + pub struct $struct_name { + $( + pub $name: [], + )+ + } + + impl $struct_name { + pub fn load(mut f: F) -> $struct_name + where F: FnMut(&std::ffi::CStr) -> *const std::os::raw::c_void + { + $struct_name { + $( + $name: unsafe { + let dummy: *const std::ffi::c_void = std::ptr::null(); + + let name = std::ffi::CStr::from_bytes_with_nul_unchecked(concat!(stringify!($name), "\0").as_bytes()); + let val = f(name); + + if val.is_null() { + println!("failed loading {}", stringify!($name)); + std::mem::transmute(dummy) + } else { + std::mem::transmute(val) + } + }, + )+ + } + } + + $( + #[inline] + pub unsafe fn $name(&self $(, $param_n: $param_ty)*) -> $ret { + let ptr = self.$name; + ptr($($param_n),*) + } + )+ + } + } + ) +} + +#[macro_export] +macro_rules! load_function_ptrs_from_lib { + ($struct_name: ident, $library: expr, { $($name: ident($($param_n: ident: $param_ty: ty),*) -> $ret: ty,)+ }) => ( + paste::item! { + $( + #[allow(non_camel_case_types)] + pub type [] = extern "system" fn($($param_ty),*) -> $ret; + )+ + + pub struct $struct_name { + pub _lib: Option, + + $( + pub $name: [], + )+ + } + + impl $struct_name { + pub fn load() -> anyhow::Result<$struct_name> { + let lib = match shared_library::dynamic_library::DynamicLibrary::open(Some(std::path::Path::new($library))) { + Ok(lib) => lib, + Err(err) => { + return Err(anyhow::Error::msg(format!( + "Failed loading library ({}): {}", + $library, + err + ))) + } + }; + + Ok($struct_name { + $( + $name: unsafe { + extern "system" fn $name($(_: $param_ty),*) { panic!("function pointer `{}` not loaded", stringify!($name)) } + let name = std::ffi::CStr::from_bytes_with_nul_unchecked(concat!(stringify!($name), "\0").as_bytes()); + let val: *const std::os::raw::c_void = { + let str_name = name.to_str().expect("can't convert CStr"); + lib.symbol(str_name) + .unwrap_or_else(|_| panic!("failed getting {}", str_name)) + }; + + if val.is_null() { + println!("failed loading {}", stringify!($name)); + std::mem::transmute($name as *const ()) + } else { + std::mem::transmute(val) + } + }, + )+ + + _lib: Some(lib), + }) + } + + $( + #[inline] + pub unsafe fn $name(&self $(, $param_n: $param_ty)*) -> $ret { + let ptr = self.$name; + ptr($($param_n),*) + } + )+ + } + } + ) +} diff --git a/vma-rs/Cargo.toml b/vma-rs/Cargo.toml new file mode 100644 index 0000000..2bc5e46 --- /dev/null +++ b/vma-rs/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "vma-rs" +version = "0.1.0" +authors = ["hodasemi "] +edition = "2021" +build = "build.rs" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +vulkan-sys = { path = "../vulkan-sys" } +anyhow = { version = "1.0.68", features = ["backtrace"] } + +[build-dependencies] +cc = "1.0.78" \ No newline at end of file diff --git a/vma-rs/build.rs b/vma-rs/build.rs new file mode 100644 index 0000000..95b5a6d --- /dev/null +++ b/vma-rs/build.rs @@ -0,0 +1,76 @@ +use cc; + +use std::env; + +fn main() { + let mut build = cc::Build::new(); + + build.include("vma_source"); + + // We want to use our own loader, instead of requiring us to link + // in vulkan.dll/.dylib in addition. This is especially important + // for MoltenVK, where there is no default installation path, unlike + // Linux (pkconfig) and Windows (VULKAN_SDK environment variable). + build.define("VMA_STATIC_VULKAN_FUNCTIONS", "0"); + + build.define("VMA_VULKAN_VERSION", "1002000"); + build.define("VMA_DYNAMIC_VULKAN_FUNCTIONS", "0"); + + // TODO: Add some configuration options under crate features + //#define VMA_HEAVY_ASSERT(expr) assert(expr) + //#define VMA_USE_STL_CONTAINERS 1 + //#define VMA_DEDICATED_ALLOCATION 0 + //#define VMA_DEBUG_MARGIN 16 + //#define VMA_DEBUG_DETECT_CORRUPTION 1 + //#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1 + //#define VMA_RECORDING_ENABLED 0 + //#define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY 256 + + let source_files = ["vma_lib/vma_lib.cpp"]; + + for source_file in &source_files { + build.file(&source_file); + } + + build.cpp(true); + + let target = env::var("TARGET").unwrap(); + + // don't assume vulkan headers to be installed, thus import them + build.include("."); + + if target.contains("darwin") { + build + .flag("-std=c++11") + .flag("-Wno-missing-field-initializers") + .flag("-Wno-unused-variable") + .flag("-Wno-unused-parameter") + .flag("-Wno-unused-private-field") + .flag("-Wno-reorder") + .flag("-Wno-type-limits") + .cpp_link_stdlib("c++") + .cpp_set_stdlib("c++"); + } else if target.contains("linux") { + build + .flag("-std=c++11") + .flag("-Wno-missing-field-initializers") + .flag("-Wno-unused-variable") + .flag("-Wno-unused-parameter") + .flag("-Wno-unused-private-field") + .flag("-Wno-reorder") + .flag("-Wno-type-limits") + .cpp_link_stdlib("stdc++"); + } else if target.contains("windows") && target.contains("gnu") { + build + .flag("-std=c++11") + .flag("-Wno-missing-field-initializers") + .flag("-Wno-unused-variable") + .flag("-Wno-unused-parameter") + .flag("-Wno-unused-private-field") + .flag("-Wno-reorder") + .flag("-Wno-type-limits") + .cpp_link_stdlib("stdc++"); + } + + build.compile("vma_cpp"); +} diff --git a/vma-rs/src/allocation.rs b/vma-rs/src/allocation.rs new file mode 100644 index 0000000..3e32728 --- /dev/null +++ b/vma-rs/src/allocation.rs @@ -0,0 +1,470 @@ +use crate::vma_bindings::*; +use anyhow::Result; +use vulkan_sys::prelude::*; + +use crate::allocator_pool::AllocatorPool; + +use std::mem::MaybeUninit; +use std::ptr; +use std::slice; + +#[derive(Debug, Clone)] +pub struct AllocationBuilder { + allocator: VmaAllocator, + + flags: VmaAllocationCreateFlags, + usage: VmaMemoryUsage, + required_flags: VkMemoryPropertyFlagBits, + preferred_flags: VkMemoryPropertyFlagBits, + memory_type_bits: u32, + priority: f32, + + pool: Option, +} + +pub trait Build { + fn build(self, argument: T) -> Result; +} + +impl AllocationBuilder { + pub fn set_flags(mut self, flags: impl Into) -> Self { + self.flags = flags.into(); + + self + } + + pub fn set_usage(mut self, usage: VmaMemoryUsage) -> Self { + self.usage = usage; + + self + } + + pub fn priority(mut self, priority: f32) -> Self { + self.priority = priority; + + self + } + + pub fn set_required_flags( + mut self, + required_flags: impl Into, + ) -> Self { + self.required_flags = required_flags.into(); + + self + } + + pub fn set_preferred_flags( + mut self, + preferred_flags: impl Into, + ) -> Self { + self.preferred_flags = preferred_flags.into(); + + self + } + + pub fn set_memory_type_bits(mut self, memory_type_bits: u32) -> Self { + self.memory_type_bits = memory_type_bits; + + self + } + + pub fn set_pool(mut self, pool: AllocatorPool) -> Self { + self.pool = Some(pool); + + self + } + + fn vma_allocation_create_info(&self) -> VmaAllocationCreateInfo { + let mut create_info = VmaAllocationCreateInfo::new( + self.flags, + self.usage, + self.required_flags, + self.preferred_flags, + self.memory_type_bits, + self.priority, + ); + + if let Some(pool) = &self.pool { + create_info.set_pool(pool.pool()); + } + + create_info + } +} + +impl Build<&VkMemoryRequirements> for AllocationBuilder { + fn build(self, memory_requirements: &VkMemoryRequirements) -> Result { + let create_info = self.vma_allocation_create_info(); + + let mut allocation = MaybeUninit::uninit(); + let mut allocation_info = MaybeUninit::uninit(); + + let result = unsafe { + vmaAllocateMemory( + self.allocator, + memory_requirements, + &create_info, + allocation.as_mut_ptr(), + allocation_info.as_mut_ptr(), + ) + }; + + if result == VK_SUCCESS { + Ok(Allocation::new( + self.allocator, + unsafe { allocation.assume_init() }, + AllocationType::MemoryOnly, + unsafe { allocation_info.assume_init() }, + )) + } else { + Err(anyhow::Error::msg(format!( + "Failed allocating memory for Buffer: {:?}", + result + ))) + } + } +} + +impl Build for AllocationBuilder { + fn build(self, buffer: VkBuffer) -> Result { + let create_info = self.vma_allocation_create_info(); + + let mut allocation = MaybeUninit::uninit(); + let mut allocation_info = MaybeUninit::uninit(); + + let result = unsafe { + vmaAllocateMemoryForBuffer( + self.allocator, + buffer, + &create_info, + allocation.as_mut_ptr(), + allocation_info.as_mut_ptr(), + ) + }; + + if result == VK_SUCCESS { + let mut allocation = Allocation::new( + self.allocator, + unsafe { allocation.assume_init() }, + AllocationType::MemoryOnly, + unsafe { allocation_info.assume_init() }, + ); + + allocation.bind_buffer_memory(buffer)?; + + Ok(allocation) + } else { + Err(anyhow::Error::msg(format!( + "Failed allocating memory for Buffer: {:?}", + result + ))) + } + } +} + +impl Build for AllocationBuilder { + fn build(self, image: VkImage) -> Result { + let create_info = self.vma_allocation_create_info(); + + let mut allocation = MaybeUninit::uninit(); + let mut allocation_info = MaybeUninit::uninit(); + + let result = unsafe { + vmaAllocateMemoryForImage( + self.allocator, + image, + &create_info, + allocation.as_mut_ptr(), + allocation_info.as_mut_ptr(), + ) + }; + + if result == VK_SUCCESS { + let mut allocation = Allocation::new( + self.allocator, + unsafe { allocation.assume_init() }, + AllocationType::MemoryOnly, + unsafe { allocation_info.assume_init() }, + ); + + allocation.bind_image_memory(image)?; + + Ok(allocation) + } else { + Err(anyhow::Error::msg(format!( + "Failed allocating memory for Image: {:?}", + result + ))) + } + } +} + +impl Build<&VkImageCreateInfo> for AllocationBuilder { + fn build(self, image_create_info: &VkImageCreateInfo) -> Result { + let create_info = self.vma_allocation_create_info(); + + let mut image = MaybeUninit::uninit(); + let mut allocation = MaybeUninit::uninit(); + let mut allocation_info = MaybeUninit::uninit(); + + let result = unsafe { + vmaCreateImage( + self.allocator, + image_create_info, + &create_info, + image.as_mut_ptr(), + allocation.as_mut_ptr(), + allocation_info.as_mut_ptr(), + ) + }; + + if result == VK_SUCCESS { + Ok(Allocation { + allocator: self.allocator, + + allocation: unsafe { allocation.assume_init() }, + allocation_type: AllocationType::Image(unsafe { image.assume_init() }), + allocation_info: unsafe { allocation_info.assume_init() }, + }) + } else { + Err(anyhow::Error::msg(format!( + "Failed creating Image and allocating memory for Image: {:?}", + result + ))) + } + } +} + +impl Build<&VkBufferCreateInfo> for AllocationBuilder { + fn build(self, buffer_create_info: &VkBufferCreateInfo) -> Result { + let create_info = self.vma_allocation_create_info(); + + let mut buffer = MaybeUninit::uninit(); + let mut allocation = MaybeUninit::uninit(); + let mut allocation_info = MaybeUninit::uninit(); + + let result = unsafe { + vmaCreateBuffer( + self.allocator, + buffer_create_info, + &create_info, + buffer.as_mut_ptr(), + allocation.as_mut_ptr(), + allocation_info.as_mut_ptr(), + ) + }; + + if result == VK_SUCCESS { + Ok(Allocation { + allocator: self.allocator, + + allocation: unsafe { allocation.assume_init() }, + allocation_type: AllocationType::Buffer(unsafe { buffer.assume_init() }), + allocation_info: unsafe { allocation_info.assume_init() }, + }) + } else { + Err(anyhow::Error::msg(format!( + "Failed creating Buffer and allocating memory for Buffer: {:?}", + result + ))) + } + } +} + +impl AllocationBuilder { + pub(crate) fn new(allocator: VmaAllocator) -> Self { + AllocationBuilder { + allocator, + + flags: VmaAllocationCreateFlags::default(), + usage: VmaMemoryUsage::VMA_MEMORY_USAGE_UNKNOWN, + required_flags: VkMemoryPropertyFlagBits::default(), + preferred_flags: VkMemoryPropertyFlagBits::default(), + memory_type_bits: 0, + priority: 0.0, + + pool: None, + } + } +} + +#[derive(Debug, Clone)] +enum AllocationType { + Buffer(VkBuffer), + Image(VkImage), + MemoryOnly, +} + +#[derive(Debug, Clone)] +pub struct Allocation { + allocator: VmaAllocator, + + allocation: VmaAllocation, + allocation_type: AllocationType, + allocation_info: VmaAllocationInfo, +} + +unsafe impl Send for Allocation {} +unsafe impl Sync for Allocation {} + +impl Allocation { + fn new( + allocator: VmaAllocator, + allocation: VmaAllocation, + allocation_type: AllocationType, + allocation_info: VmaAllocationInfo, + ) -> Self { + Self { + allocator, + allocation, + allocation_type, + allocation_info, + } + } + + pub fn memory_type_index(&self) -> u32 { + self.allocation_info.memoryType + } + + pub fn device_memory(&self) -> VkDeviceMemory { + self.allocation_info.deviceMemory + } + + pub fn offset(&self) -> VkDeviceSize { + self.allocation_info.offset + } + + pub fn size(&self) -> VkDeviceSize { + self.allocation_info.size + } + + pub fn map(&self, length: VkDeviceSize) -> Result> { + let mut data = MaybeUninit::uninit(); + + let result = unsafe { vmaMapMemory(self.allocator, self.allocation, data.as_mut_ptr()) }; + + let slice = + unsafe { slice::from_raw_parts_mut(data.assume_init() as *mut T, length as usize) }; + + if result == VK_SUCCESS { + let mut mapped_memory = VkMappedMemory::new(slice); + + let allocation = self.allocation; + let allocator = self.allocator; + + mapped_memory.set_unmap(move || unsafe { vmaUnmapMemory(allocator, allocation) }); + + Ok(mapped_memory) + } else { + Err(anyhow::Error::msg(format!( + "Mapping VkMemory failed: {:?}", + result + ))) + } + } + + pub fn buffer(&self) -> VkBuffer { + match self.allocation_type { + AllocationType::Buffer(buffer) => buffer, + AllocationType::Image(_) => panic!("Allocation is a VkBuffer"), + AllocationType::MemoryOnly => panic!("Allocation is memory only"), + } + } + + pub fn image(&self) -> VkImage { + match self.allocation_type { + AllocationType::Buffer(_) => panic!("Allocation is a VkImage"), + AllocationType::Image(image) => image, + AllocationType::MemoryOnly => panic!("Allocation is memory only"), + } + } + + pub fn bind_buffer_memory(&mut self, buffer: VkBuffer) -> Result<()> { + #[cfg(debug_assertions)] + { + match self.allocation_type { + AllocationType::Buffer(_) => panic!("allocation already bound to buffer"), + AllocationType::Image(_) => panic!("allocation already bound to image"), + AllocationType::MemoryOnly => (), + } + } + + let result = unsafe { vmaBindBufferMemory(self.allocator, self.allocation, buffer) }; + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::msg(format!( + "Failed binding Buffer to memory: {:?}", + result + ))) + } + } + + pub fn bind_image_memory(&mut self, image: VkImage) -> Result<()> { + #[cfg(debug_assertions)] + { + match self.allocation_type { + AllocationType::Buffer(_) => panic!("allocation already bound to buffer"), + AllocationType::Image(_) => panic!("allocation already bound to image"), + AllocationType::MemoryOnly => (), + } + } + + let result = unsafe { vmaBindImageMemory(self.allocator, self.allocation, image) }; + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::msg(format!( + "Failed binding Image to memory: {:?}", + result + ))) + } + } +} + +impl Drop for Allocation { + fn drop(&mut self) { + match self.allocation_type { + AllocationType::Buffer(buffer) => unsafe { + vmaDestroyBuffer(self.allocator, buffer, self.allocation) + }, + AllocationType::Image(image) => unsafe { + vmaDestroyImage(self.allocator, image, self.allocation) + }, + AllocationType::MemoryOnly => unsafe { vmaFreeMemory(self.allocator, self.allocation) }, + } + } +} + +impl VmaAllocationCreateInfo { + pub fn new( + flags: impl Into, + usage: VmaMemoryUsage, + required_flags: VkMemoryPropertyFlagBits, + preferred_flags: VkMemoryPropertyFlagBits, + memory_type_bits: u32, + priority: f32, + ) -> Self { + VmaAllocationCreateInfo { + flags: flags.into(), + usage, + requiredFlags: required_flags, + preferredFlags: preferred_flags, + memoryTypeBits: memory_type_bits, + pool: ptr::null_mut(), + pUserData: ptr::null_mut(), + priority, + } + } + + pub fn set_pool(&mut self, pool: VmaPool) { + self.pool = pool; + } + + // pub fn set_user_data(&mut self, data: &mut T) { + // self.pUserData = data as *mut T as *mut _ + // } +} diff --git a/vma-rs/src/allocator.rs b/vma-rs/src/allocator.rs new file mode 100644 index 0000000..97766ef --- /dev/null +++ b/vma-rs/src/allocator.rs @@ -0,0 +1,171 @@ +use crate::vma_bindings::*; +use anyhow::Result; +use vulkan_sys::prelude::*; + +use crate::allocation::AllocationBuilder; + +use std::mem::MaybeUninit; +use std::ptr; + +pub struct AllocatorBuilder { + flags: VmaAllocatorCreateFlags, + preferred_large_heap_block_size: VkDeviceSize, + allocation_callbacks: Option, + device_memory_callbacks: Option, + frame_in_use_count: u32, + heap_size_limits: Vec, + vulkan_functions: Option, + record_settings: Option, +} + +impl AllocatorBuilder { + pub fn set_flags(mut self, flags: VmaAllocatorCreateFlags) -> Self { + self.flags = flags.into(); + + self + } + + pub fn set_preferred_large_heap_block_size(mut self, size: VkDeviceSize) -> Self { + self.preferred_large_heap_block_size = size; + + self + } + + pub fn set_allocation_callbacks(mut self, allocation_callbacks: VkAllocationCallbacks) -> Self { + self.allocation_callbacks = Some(allocation_callbacks); + + self + } + + pub fn set_device_memory_callbacks( + mut self, + device_memory_callbacks: VmaDeviceMemoryCallbacks, + ) -> Self { + self.device_memory_callbacks = Some(device_memory_callbacks); + + self + } + + pub fn set_frame_in_use_count(mut self, use_count: u32) -> Self { + self.frame_in_use_count = use_count; + + self + } + + pub fn set_heap_size_limits(mut self, heap_size_limits: Vec) -> Self { + self.heap_size_limits = heap_size_limits; + + self + } + + pub fn set_vulkan_functions(mut self, vulkan_functions: VmaVulkanFunctions) -> Self { + self.vulkan_functions = Some(vulkan_functions); + + self + } + + pub fn set_record_settings(mut self, record_settings: VmaRecordSettings) -> Self { + self.record_settings = Some(record_settings); + + self + } + + pub fn build( + self, + instance: VkInstance, + device: VkDevice, + physical_device: VkPhysicalDevice, + vulkan_api_version: u32, + ) -> Result { + let allocator_create_info = VmaAllocatorCreateInfo { + flags: self.flags, + physicalDevice: physical_device, + instance, + device, + vulkanApiVersion: vulkan_api_version, + preferredLargeHeapBlockSize: self.preferred_large_heap_block_size, + pAllocationCallbacks: match &self.allocation_callbacks { + Some(callbacks) => callbacks as *const _, + None => ptr::null(), + }, + pDeviceMemoryCallbacks: match &self.device_memory_callbacks { + Some(callbacks) => callbacks as *const _, + None => ptr::null(), + }, + frameInUseCount: self.frame_in_use_count, + pHeapSizeLimit: if self.heap_size_limits.is_empty() { + ptr::null() + } else { + self.heap_size_limits.as_ptr() + }, + pVulkanFunctions: match &self.vulkan_functions { + Some(functions) => functions as *const _, + None => ptr::null(), + }, + pRecordSettings: match &self.record_settings { + Some(settings) => settings as *const _, + None => ptr::null(), + }, + }; + + let mut allocator = MaybeUninit::uninit(); + + let result = unsafe { vmaCreateAllocator(&allocator_create_info, allocator.as_mut_ptr()) }; + + if result == VK_SUCCESS { + Ok(Allocator { + allocator: unsafe { allocator.assume_init() }, + }) + } else { + Err(anyhow::Error::msg(format!( + "Failed creating memory allocator: {:?}", + result + ))) + } + } +} + +impl Default for AllocatorBuilder { + fn default() -> Self { + AllocatorBuilder { + flags: 0, + preferred_large_heap_block_size: 0, + allocation_callbacks: None, + device_memory_callbacks: None, + frame_in_use_count: 0, + heap_size_limits: Vec::new(), + vulkan_functions: None, + record_settings: None, + } + } +} + +#[derive(Debug, Clone)] +pub struct Allocator { + allocator: VmaAllocator, +} + +impl Allocator { + pub fn allocate(&self) -> AllocationBuilder { + AllocationBuilder::new(self.allocator) + } + + pub fn statistics(&self) -> VmaStats { + let mut stats = MaybeUninit::uninit(); + + unsafe { + vmaCalculateStats(self.allocator, stats.as_mut_ptr()); + + stats.assume_init() + } + } +} + +unsafe impl Send for Allocator {} +unsafe impl Sync for Allocator {} + +impl Allocator { + pub fn builder() -> AllocatorBuilder { + AllocatorBuilder::default() + } +} diff --git a/vma-rs/src/allocator_pool.rs b/vma-rs/src/allocator_pool.rs new file mode 100644 index 0000000..f00d812 --- /dev/null +++ b/vma-rs/src/allocator_pool.rs @@ -0,0 +1,15 @@ +use crate::vma_bindings::*; + +#[derive(Debug, Clone)] +pub struct AllocatorPool { + pool: VmaPool, +} + +unsafe impl Send for AllocatorPool {} +unsafe impl Sync for AllocatorPool {} + +impl AllocatorPool { + pub(crate) fn pool(&self) -> VmaPool { + self.pool + } +} diff --git a/vma-rs/src/lib.rs b/vma-rs/src/lib.rs new file mode 100644 index 0000000..82044b4 --- /dev/null +++ b/vma-rs/src/lib.rs @@ -0,0 +1,7 @@ +mod vma_bindings; + +pub mod allocation; +pub mod allocator; +pub mod allocator_pool; + +pub mod prelude; diff --git a/vma-rs/src/prelude.rs b/vma-rs/src/prelude.rs new file mode 100644 index 0000000..5911cd4 --- /dev/null +++ b/vma-rs/src/prelude.rs @@ -0,0 +1,6 @@ +pub use crate::allocation::*; +pub use crate::allocator::*; + +pub use crate::vma_bindings::{ + VmaAllocatorCreateFlagBits::*, VmaMemoryUsage, VmaMemoryUsage::*, VmaStats, VmaVulkanFunctions, +}; diff --git a/vma-rs/src/vma_bindings.rs b/vma-rs/src/vma_bindings.rs new file mode 100644 index 0000000..403692c --- /dev/null +++ b/vma-rs/src/vma_bindings.rs @@ -0,0 +1,2202 @@ +#![allow(non_camel_case_types)] +#![allow(non_upper_case_globals)] +#![allow(non_snake_case)] +#![allow(unused)] +#![allow(deref_nullptr)] + +use vulkan_sys::prelude::*; + +pub type PFN_vkGetPhysicalDeviceProperties = extern "system" fn( + physicalDevice: VkPhysicalDevice, + pProperties: *mut VkPhysicalDeviceProperties, +); +pub type PFN_vkGetPhysicalDeviceMemoryProperties = extern "system" fn( + physicalDevice: VkPhysicalDevice, + pMemoryProperties: *mut VkPhysicalDeviceMemoryProperties, +); +pub type PFN_vkAllocateMemory = extern "system" fn( + device: VkDevice, + pAllocateInfo: *const VkMemoryAllocateInfo, + pAllocator: *const VkAllocationCallbacks, + pMemory: *mut VkDeviceMemory, +) -> VkResult; +pub type PFN_vkFreeMemory = extern "system" fn( + device: VkDevice, + memory: VkDeviceMemory, + pAllocator: *const VkAllocationCallbacks, +); +pub type PFN_vkMapMemory = extern "system" fn( + device: VkDevice, + memory: VkDeviceMemory, + offset: VkDeviceSize, + size: VkDeviceSize, + flags: VkMemoryMapFlags, + ppData: *mut *mut ::std::os::raw::c_void, +) -> VkResult; +pub type PFN_vkUnmapMemory = extern "system" fn(device: VkDevice, memory: VkDeviceMemory); +pub type PFN_vkFlushMappedMemoryRanges = extern "system" fn( + device: VkDevice, + memoryRangeCount: u32, + pMemoryRanges: *const VkMappedMemoryRange, +) -> VkResult; +pub type PFN_vkInvalidateMappedMemoryRanges = extern "system" fn( + device: VkDevice, + memoryRangeCount: u32, + pMemoryRanges: *const VkMappedMemoryRange, +) -> VkResult; +pub type PFN_vkBindBufferMemory = extern "system" fn( + device: VkDevice, + buffer: VkBuffer, + memory: VkDeviceMemory, + memoryOffset: VkDeviceSize, +) -> VkResult; +pub type PFN_vkBindImageMemory = extern "system" fn( + device: VkDevice, + image: VkImage, + memory: VkDeviceMemory, + memoryOffset: VkDeviceSize, +) -> VkResult; +pub type PFN_vkGetBufferMemoryRequirements = extern "system" fn( + device: VkDevice, + buffer: VkBuffer, + pMemoryRequirements: *mut VkMemoryRequirements, +); +pub type PFN_vkGetImageMemoryRequirements = extern "system" fn( + device: VkDevice, + image: VkImage, + pMemoryRequirements: *mut VkMemoryRequirements, +); +pub type PFN_vkCreateBuffer = extern "system" fn( + device: VkDevice, + pCreateInfo: *const VkBufferCreateInfo, + pAllocator: *const VkAllocationCallbacks, + pBuffer: *mut VkBuffer, +) -> VkResult; +pub type PFN_vkDestroyBuffer = extern "system" fn( + device: VkDevice, + buffer: VkBuffer, + pAllocator: *const VkAllocationCallbacks, +); +pub type PFN_vkCreateImage = extern "system" fn( + device: VkDevice, + pCreateInfo: *const VkImageCreateInfo, + pAllocator: *const VkAllocationCallbacks, + pImage: *mut VkImage, +) -> VkResult; +pub type PFN_vkDestroyImage = + extern "system" fn(device: VkDevice, image: VkImage, pAllocator: *const VkAllocationCallbacks); +pub type PFN_vkCmdCopyBuffer = extern "system" fn( + commandBuffer: VkCommandBuffer, + srcBuffer: VkBuffer, + dstBuffer: VkBuffer, + regionCount: u32, + pRegions: *const VkBufferCopy, +); +pub type PFN_vkGetImageMemoryRequirements2KHR = extern "system" fn( + device: VkDevice, + pInfo: *const VkImageMemoryRequirementsInfo2, + pMemoryRequirements: *mut VkMemoryRequirements2, +); +pub type PFN_vkGetBufferMemoryRequirements2KHR = extern "system" fn( + device: VkDevice, + pInfo: *const VkBufferMemoryRequirementsInfo2, + pMemoryRequirements: *mut VkMemoryRequirements2, +); +pub type PFN_vkBindBufferMemory2KHR = extern "system" fn( + device: VkDevice, + bindInfoCount: u32, + pBindInfos: *const VkBindBufferMemoryInfo, +) -> VkResult; +pub type PFN_vkBindImageMemory2KHR = extern "system" fn( + device: VkDevice, + bindInfoCount: u32, + pBindInfos: *const VkBindImageMemoryInfo, +) -> VkResult; + +pub type PFN_vkGetPhysicalDeviceMemoryProperties2KHR = extern "system" fn( + physicalDevice: VkPhysicalDevice, + pMemoryProperties: *mut VkPhysicalDeviceMemoryProperties2KHR, +) -> (); + +pub type VkFlags = u32; + +/* automatically generated by rust-bindgen */ + +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VmaAllocator_T { + _unused: [u8; 0], +} +pub type VmaAllocator = *mut VmaAllocator_T; +pub type PFN_vmaAllocateDeviceMemoryFunction = ::std::option::Option< + unsafe extern "C" fn( + allocator: VmaAllocator, + memoryType: u32, + memory: VkDeviceMemory, + size: VkDeviceSize, + pUserData: *mut ::std::os::raw::c_void, + ), +>; +pub type PFN_vmaFreeDeviceMemoryFunction = ::std::option::Option< + unsafe extern "C" fn( + allocator: VmaAllocator, + memoryType: u32, + memory: VkDeviceMemory, + size: VkDeviceSize, + pUserData: *mut ::std::os::raw::c_void, + ), +>; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VmaDeviceMemoryCallbacks { + pub pfnAllocate: PFN_vmaAllocateDeviceMemoryFunction, + pub pfnFree: PFN_vmaFreeDeviceMemoryFunction, + pub pUserData: *mut ::std::os::raw::c_void, +} +#[test] +fn bindgen_test_layout_VmaDeviceMemoryCallbacks() { + assert_eq!( + ::std::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(VmaDeviceMemoryCallbacks)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(VmaDeviceMemoryCallbacks)) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).pfnAllocate as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(VmaDeviceMemoryCallbacks), + "::", + stringify!(pfnAllocate) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).pfnFree as *const _ as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(VmaDeviceMemoryCallbacks), + "::", + stringify!(pfnFree) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).pUserData as *const _ as usize + }, + 16usize, + concat!( + "Offset of field: ", + stringify!(VmaDeviceMemoryCallbacks), + "::", + stringify!(pUserData) + ) + ); +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum VmaAllocatorCreateFlagBits { + VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 1, + VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 2, + VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 4, + VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 8, + VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 16, + VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 32, + VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 64, + VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 2147483647, +} +pub type VmaAllocatorCreateFlags = VkFlags; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct VmaVulkanFunctions { + pub vkGetPhysicalDeviceProperties: PFN_vkGetPhysicalDeviceProperties, + pub vkGetPhysicalDeviceMemoryProperties: PFN_vkGetPhysicalDeviceMemoryProperties, + pub vkAllocateMemory: PFN_vkAllocateMemory, + pub vkFreeMemory: PFN_vkFreeMemory, + pub vkMapMemory: PFN_vkMapMemory, + pub vkUnmapMemory: PFN_vkUnmapMemory, + pub vkFlushMappedMemoryRanges: PFN_vkFlushMappedMemoryRanges, + pub vkInvalidateMappedMemoryRanges: PFN_vkInvalidateMappedMemoryRanges, + pub vkBindBufferMemory: PFN_vkBindBufferMemory, + pub vkBindImageMemory: PFN_vkBindImageMemory, + pub vkGetBufferMemoryRequirements: PFN_vkGetBufferMemoryRequirements, + pub vkGetImageMemoryRequirements: PFN_vkGetImageMemoryRequirements, + pub vkCreateBuffer: PFN_vkCreateBuffer, + pub vkDestroyBuffer: PFN_vkDestroyBuffer, + pub vkCreateImage: PFN_vkCreateImage, + pub vkDestroyImage: PFN_vkDestroyImage, + pub vkCmdCopyBuffer: PFN_vkCmdCopyBuffer, + pub vkGetBufferMemoryRequirements2KHR: PFN_vkGetBufferMemoryRequirements2KHR, + pub vkGetImageMemoryRequirements2KHR: PFN_vkGetImageMemoryRequirements2KHR, + pub vkBindBufferMemory2KHR: PFN_vkBindBufferMemory2KHR, + pub vkBindImageMemory2KHR: PFN_vkBindImageMemory2KHR, + pub vkGetPhysicalDeviceMemoryProperties2KHR: PFN_vkGetPhysicalDeviceMemoryProperties2KHR, +} +#[test] +fn bindgen_test_layout_VmaVulkanFunctions() { + assert_eq!( + ::std::mem::size_of::(), + 176usize, + concat!("Size of: ", stringify!(VmaVulkanFunctions)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(VmaVulkanFunctions)) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).vkGetPhysicalDeviceProperties as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkGetPhysicalDeviceProperties) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).vkGetPhysicalDeviceMemoryProperties + as *const _ as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkGetPhysicalDeviceMemoryProperties) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).vkAllocateMemory as *const _ as usize + }, + 16usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkAllocateMemory) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).vkFreeMemory as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkFreeMemory) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).vkMapMemory as *const _ as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkMapMemory) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).vkUnmapMemory as *const _ as usize + }, + 40usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkUnmapMemory) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).vkFlushMappedMemoryRanges as *const _ + as usize + }, + 48usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkFlushMappedMemoryRanges) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).vkInvalidateMappedMemoryRanges + as *const _ as usize + }, + 56usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkInvalidateMappedMemoryRanges) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).vkBindBufferMemory as *const _ as usize + }, + 64usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkBindBufferMemory) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).vkBindImageMemory as *const _ as usize + }, + 72usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkBindImageMemory) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).vkGetBufferMemoryRequirements as *const _ + as usize + }, + 80usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkGetBufferMemoryRequirements) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).vkGetImageMemoryRequirements as *const _ + as usize + }, + 88usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkGetImageMemoryRequirements) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).vkCreateBuffer as *const _ as usize + }, + 96usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkCreateBuffer) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).vkDestroyBuffer as *const _ as usize + }, + 104usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkDestroyBuffer) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).vkCreateImage as *const _ as usize + }, + 112usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkCreateImage) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).vkDestroyImage as *const _ as usize + }, + 120usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkDestroyImage) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).vkCmdCopyBuffer as *const _ as usize + }, + 128usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkCmdCopyBuffer) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).vkGetBufferMemoryRequirements2KHR + as *const _ as usize + }, + 136usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkGetBufferMemoryRequirements2KHR) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).vkGetImageMemoryRequirements2KHR + as *const _ as usize + }, + 144usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkGetImageMemoryRequirements2KHR) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).vkBindBufferMemory2KHR as *const _ + as usize + }, + 152usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkBindBufferMemory2KHR) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).vkBindImageMemory2KHR as *const _ + as usize + }, + 160usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkBindImageMemory2KHR) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).vkGetPhysicalDeviceMemoryProperties2KHR + as *const _ as usize + }, + 168usize, + concat!( + "Offset of field: ", + stringify!(VmaVulkanFunctions), + "::", + stringify!(vkGetPhysicalDeviceMemoryProperties2KHR) + ) + ); +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum VmaRecordFlagBits { + VMA_RECORD_FLUSH_AFTER_CALL_BIT = 1, + VMA_RECORD_FLAG_BITS_MAX_ENUM = 2147483647, +} +pub type VmaRecordFlags = VkFlags; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VmaRecordSettings { + pub flags: VmaRecordFlags, + pub pFilePath: *const ::std::os::raw::c_char, +} +#[test] +fn bindgen_test_layout_VmaRecordSettings() { + assert_eq!( + ::std::mem::size_of::(), + 16usize, + concat!("Size of: ", stringify!(VmaRecordSettings)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(VmaRecordSettings)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(VmaRecordSettings), + "::", + stringify!(flags) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).pFilePath as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(VmaRecordSettings), + "::", + stringify!(pFilePath) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VmaAllocatorCreateInfo { + pub flags: VmaAllocatorCreateFlags, + pub physicalDevice: VkPhysicalDevice, + pub device: VkDevice, + pub preferredLargeHeapBlockSize: VkDeviceSize, + pub pAllocationCallbacks: *const VkAllocationCallbacks, + pub pDeviceMemoryCallbacks: *const VmaDeviceMemoryCallbacks, + pub frameInUseCount: u32, + pub pHeapSizeLimit: *const VkDeviceSize, + pub pVulkanFunctions: *const VmaVulkanFunctions, + pub pRecordSettings: *const VmaRecordSettings, + pub instance: VkInstance, + pub vulkanApiVersion: u32, +} +#[test] +fn bindgen_test_layout_VmaAllocatorCreateInfo() { + assert_eq!( + ::std::mem::size_of::(), + 96usize, + concat!("Size of: ", stringify!(VmaAllocatorCreateInfo)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(VmaAllocatorCreateInfo)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocatorCreateInfo), + "::", + stringify!(flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).physicalDevice as *const _ as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocatorCreateInfo), + "::", + stringify!(physicalDevice) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).device as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocatorCreateInfo), + "::", + stringify!(device) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).preferredLargeHeapBlockSize + as *const _ as usize + }, + 24usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocatorCreateInfo), + "::", + stringify!(preferredLargeHeapBlockSize) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).pAllocationCallbacks as *const _ + as usize + }, + 32usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocatorCreateInfo), + "::", + stringify!(pAllocationCallbacks) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).pDeviceMemoryCallbacks as *const _ + as usize + }, + 40usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocatorCreateInfo), + "::", + stringify!(pDeviceMemoryCallbacks) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).frameInUseCount as *const _ as usize + }, + 48usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocatorCreateInfo), + "::", + stringify!(frameInUseCount) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).pHeapSizeLimit as *const _ as usize + }, + 56usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocatorCreateInfo), + "::", + stringify!(pHeapSizeLimit) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).pVulkanFunctions as *const _ as usize + }, + 64usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocatorCreateInfo), + "::", + stringify!(pVulkanFunctions) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).pRecordSettings as *const _ as usize + }, + 72usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocatorCreateInfo), + "::", + stringify!(pRecordSettings) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).instance as *const _ as usize }, + 80usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocatorCreateInfo), + "::", + stringify!(instance) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).vulkanApiVersion as *const _ as usize + }, + 88usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocatorCreateInfo), + "::", + stringify!(vulkanApiVersion) + ) + ); +} +extern "C" { + pub fn vmaCreateAllocator( + pCreateInfo: *const VmaAllocatorCreateInfo, + pAllocator: *mut VmaAllocator, + ) -> VkResult; +} +extern "C" { + pub fn vmaDestroyAllocator(allocator: VmaAllocator); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VmaAllocatorInfo { + pub instance: VkInstance, + pub physicalDevice: VkPhysicalDevice, + pub device: VkDevice, +} +#[test] +fn bindgen_test_layout_VmaAllocatorInfo() { + assert_eq!( + ::std::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(VmaAllocatorInfo)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(VmaAllocatorInfo)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).instance as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocatorInfo), + "::", + stringify!(instance) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).physicalDevice as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocatorInfo), + "::", + stringify!(physicalDevice) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).device as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocatorInfo), + "::", + stringify!(device) + ) + ); +} +extern "C" { + pub fn vmaGetAllocatorInfo(allocator: VmaAllocator, pAllocatorInfo: *mut VmaAllocatorInfo); +} +extern "C" { + pub fn vmaGetPhysicalDeviceProperties( + allocator: VmaAllocator, + ppPhysicalDeviceProperties: *mut *const VkPhysicalDeviceProperties, + ); +} +extern "C" { + pub fn vmaGetMemoryProperties( + allocator: VmaAllocator, + ppPhysicalDeviceMemoryProperties: *mut *const VkPhysicalDeviceMemoryProperties, + ); +} +extern "C" { + pub fn vmaGetMemoryTypeProperties( + allocator: VmaAllocator, + memoryTypeIndex: u32, + pFlags: *mut VkMemoryPropertyFlags, + ); +} +extern "C" { + pub fn vmaSetCurrentFrameIndex(allocator: VmaAllocator, frameIndex: u32); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VmaStatInfo { + pub blockCount: u32, + pub allocationCount: u32, + pub unusedRangeCount: u32, + pub usedBytes: VkDeviceSize, + pub unusedBytes: VkDeviceSize, + pub allocationSizeMin: VkDeviceSize, + pub allocationSizeAvg: VkDeviceSize, + pub allocationSizeMax: VkDeviceSize, + pub unusedRangeSizeMin: VkDeviceSize, + pub unusedRangeSizeAvg: VkDeviceSize, + pub unusedRangeSizeMax: VkDeviceSize, +} +#[test] +fn bindgen_test_layout_VmaStatInfo() { + assert_eq!( + ::std::mem::size_of::(), + 80usize, + concat!("Size of: ", stringify!(VmaStatInfo)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(VmaStatInfo)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).blockCount as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(VmaStatInfo), + "::", + stringify!(blockCount) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).allocationCount as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(VmaStatInfo), + "::", + stringify!(allocationCount) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).unusedRangeCount as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(VmaStatInfo), + "::", + stringify!(unusedRangeCount) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).usedBytes as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(VmaStatInfo), + "::", + stringify!(usedBytes) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).unusedBytes as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(VmaStatInfo), + "::", + stringify!(unusedBytes) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).allocationSizeMin as *const _ as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(VmaStatInfo), + "::", + stringify!(allocationSizeMin) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).allocationSizeAvg as *const _ as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(VmaStatInfo), + "::", + stringify!(allocationSizeAvg) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).allocationSizeMax as *const _ as usize }, + 48usize, + concat!( + "Offset of field: ", + stringify!(VmaStatInfo), + "::", + stringify!(allocationSizeMax) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).unusedRangeSizeMin as *const _ as usize }, + 56usize, + concat!( + "Offset of field: ", + stringify!(VmaStatInfo), + "::", + stringify!(unusedRangeSizeMin) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).unusedRangeSizeAvg as *const _ as usize }, + 64usize, + concat!( + "Offset of field: ", + stringify!(VmaStatInfo), + "::", + stringify!(unusedRangeSizeAvg) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).unusedRangeSizeMax as *const _ as usize }, + 72usize, + concat!( + "Offset of field: ", + stringify!(VmaStatInfo), + "::", + stringify!(unusedRangeSizeMax) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VmaStats { + pub memoryType: [VmaStatInfo; 32usize], + pub memoryHeap: [VmaStatInfo; 16usize], + pub total: VmaStatInfo, +} +#[test] +fn bindgen_test_layout_VmaStats() { + assert_eq!( + ::std::mem::size_of::(), + 3920usize, + concat!("Size of: ", stringify!(VmaStats)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(VmaStats)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).memoryType as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(VmaStats), + "::", + stringify!(memoryType) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).memoryHeap as *const _ as usize }, + 2560usize, + concat!( + "Offset of field: ", + stringify!(VmaStats), + "::", + stringify!(memoryHeap) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).total as *const _ as usize }, + 3840usize, + concat!( + "Offset of field: ", + stringify!(VmaStats), + "::", + stringify!(total) + ) + ); +} +extern "C" { + pub fn vmaCalculateStats(allocator: VmaAllocator, pStats: *mut VmaStats); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VmaBudget { + pub blockBytes: VkDeviceSize, + pub allocationBytes: VkDeviceSize, + pub usage: VkDeviceSize, + pub budget: VkDeviceSize, +} +#[test] +fn bindgen_test_layout_VmaBudget() { + assert_eq!( + ::std::mem::size_of::(), + 32usize, + concat!("Size of: ", stringify!(VmaBudget)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(VmaBudget)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).blockBytes as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(VmaBudget), + "::", + stringify!(blockBytes) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).allocationBytes as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(VmaBudget), + "::", + stringify!(allocationBytes) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).usage as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(VmaBudget), + "::", + stringify!(usage) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).budget as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(VmaBudget), + "::", + stringify!(budget) + ) + ); +} +extern "C" { + pub fn vmaGetBudget(allocator: VmaAllocator, pBudget: *mut VmaBudget); +} +extern "C" { + pub fn vmaBuildStatsString( + allocator: VmaAllocator, + ppStatsString: *mut *mut ::std::os::raw::c_char, + detailedMap: VkBool32, + ); +} +extern "C" { + pub fn vmaFreeStatsString(allocator: VmaAllocator, pStatsString: *mut ::std::os::raw::c_char); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VmaPool_T { + _unused: [u8; 0], +} +pub type VmaPool = *mut VmaPool_T; +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum VmaMemoryUsage { + VMA_MEMORY_USAGE_UNKNOWN = 0, + VMA_MEMORY_USAGE_GPU_ONLY = 1, + VMA_MEMORY_USAGE_CPU_ONLY = 2, + VMA_MEMORY_USAGE_CPU_TO_GPU = 3, + VMA_MEMORY_USAGE_GPU_TO_CPU = 4, + VMA_MEMORY_USAGE_CPU_COPY = 5, + VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6, + VMA_MEMORY_USAGE_MAX_ENUM = 2147483647, +} +impl VmaAllocationCreateFlagBits { + pub const VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT: VmaAllocationCreateFlagBits = + VmaAllocationCreateFlagBits::VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT; +} +impl VmaAllocationCreateFlagBits { + pub const VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT: VmaAllocationCreateFlagBits = + VmaAllocationCreateFlagBits::VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT; +} +impl VmaAllocationCreateFlagBits { + pub const VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT: VmaAllocationCreateFlagBits = + VmaAllocationCreateFlagBits::VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT; +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum VmaAllocationCreateFlagBits { + VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 1, + VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 2, + VMA_ALLOCATION_CREATE_MAPPED_BIT = 4, + VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT = 8, + VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT = 16, + VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 32, + VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 64, + VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 128, + VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 256, + VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = 65536, + VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT = 131072, + VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = 262144, + VMA_ALLOCATION_CREATE_STRATEGY_MASK = 458752, + VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 2147483647, +} +pub type VmaAllocationCreateFlags = VkFlags; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VmaAllocationCreateInfo { + pub flags: VmaAllocationCreateFlags, + pub usage: VmaMemoryUsage, + pub requiredFlags: VkMemoryPropertyFlagBits, + pub preferredFlags: VkMemoryPropertyFlagBits, + pub memoryTypeBits: u32, + pub pool: VmaPool, + pub pUserData: *mut ::std::os::raw::c_void, + pub priority: f32, +} +#[test] +fn bindgen_test_layout_VmaAllocationCreateInfo() { + assert_eq!( + ::std::mem::size_of::(), + 48usize, + concat!("Size of: ", stringify!(VmaAllocationCreateInfo)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(VmaAllocationCreateInfo)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocationCreateInfo), + "::", + stringify!(flags) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).usage as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocationCreateInfo), + "::", + stringify!(usage) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).requiredFlags as *const _ as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocationCreateInfo), + "::", + stringify!(requiredFlags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).preferredFlags as *const _ as usize + }, + 12usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocationCreateInfo), + "::", + stringify!(preferredFlags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).memoryTypeBits as *const _ as usize + }, + 16usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocationCreateInfo), + "::", + stringify!(memoryTypeBits) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).pool as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocationCreateInfo), + "::", + stringify!(pool) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).pUserData as *const _ as usize + }, + 32usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocationCreateInfo), + "::", + stringify!(pUserData) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).priority as *const _ as usize + }, + 40usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocationCreateInfo), + "::", + stringify!(priority) + ) + ); +} +extern "C" { + pub fn vmaFindMemoryTypeIndex( + allocator: VmaAllocator, + memoryTypeBits: u32, + pAllocationCreateInfo: *const VmaAllocationCreateInfo, + pMemoryTypeIndex: *mut u32, + ) -> VkResult; +} +extern "C" { + pub fn vmaFindMemoryTypeIndexForBufferInfo( + allocator: VmaAllocator, + pBufferCreateInfo: *const VkBufferCreateInfo, + pAllocationCreateInfo: *const VmaAllocationCreateInfo, + pMemoryTypeIndex: *mut u32, + ) -> VkResult; +} +extern "C" { + pub fn vmaFindMemoryTypeIndexForImageInfo( + allocator: VmaAllocator, + pImageCreateInfo: *const VkImageCreateInfo, + pAllocationCreateInfo: *const VmaAllocationCreateInfo, + pMemoryTypeIndex: *mut u32, + ) -> VkResult; +} +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum VmaPoolCreateFlagBits { + VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 2, + VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 4, + VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT = 8, + VMA_POOL_CREATE_ALGORITHM_MASK = 12, + VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 2147483647, +} +pub type VmaPoolCreateFlags = VkFlags; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VmaPoolCreateInfo { + pub memoryTypeIndex: u32, + pub flags: VmaPoolCreateFlags, + pub blockSize: VkDeviceSize, + pub minBlockCount: isize, + pub maxBlockCount: isize, + pub frameInUseCount: u32, + pub priority: f32, +} +#[test] +fn bindgen_test_layout_VmaPoolCreateInfo() { + assert_eq!( + ::std::mem::size_of::(), + 40usize, + concat!("Size of: ", stringify!(VmaPoolCreateInfo)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(VmaPoolCreateInfo)) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).memoryTypeIndex as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(VmaPoolCreateInfo), + "::", + stringify!(memoryTypeIndex) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(VmaPoolCreateInfo), + "::", + stringify!(flags) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).blockSize as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(VmaPoolCreateInfo), + "::", + stringify!(blockSize) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).minBlockCount as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(VmaPoolCreateInfo), + "::", + stringify!(minBlockCount) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).maxBlockCount as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(VmaPoolCreateInfo), + "::", + stringify!(maxBlockCount) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).frameInUseCount as *const _ as usize + }, + 32usize, + concat!( + "Offset of field: ", + stringify!(VmaPoolCreateInfo), + "::", + stringify!(frameInUseCount) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).priority as *const _ as usize }, + 36usize, + concat!( + "Offset of field: ", + stringify!(VmaPoolCreateInfo), + "::", + stringify!(priority) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VmaPoolStats { + pub size: VkDeviceSize, + pub unusedSize: VkDeviceSize, + pub allocationCount: isize, + pub unusedRangeCount: isize, + pub unusedRangeSizeMax: VkDeviceSize, + pub blockCount: isize, +} +#[test] +fn bindgen_test_layout_VmaPoolStats() { + assert_eq!( + ::std::mem::size_of::(), + 48usize, + concat!("Size of: ", stringify!(VmaPoolStats)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(VmaPoolStats)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).size as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(VmaPoolStats), + "::", + stringify!(size) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).unusedSize as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(VmaPoolStats), + "::", + stringify!(unusedSize) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).allocationCount as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(VmaPoolStats), + "::", + stringify!(allocationCount) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).unusedRangeCount as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(VmaPoolStats), + "::", + stringify!(unusedRangeCount) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).unusedRangeSizeMax as *const _ as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(VmaPoolStats), + "::", + stringify!(unusedRangeSizeMax) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).blockCount as *const _ as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(VmaPoolStats), + "::", + stringify!(blockCount) + ) + ); +} +extern "C" { + pub fn vmaCreatePool( + allocator: VmaAllocator, + pCreateInfo: *const VmaPoolCreateInfo, + pPool: *mut VmaPool, + ) -> VkResult; +} +extern "C" { + pub fn vmaDestroyPool(allocator: VmaAllocator, pool: VmaPool); +} +extern "C" { + pub fn vmaGetPoolStats(allocator: VmaAllocator, pool: VmaPool, pPoolStats: *mut VmaPoolStats); +} +extern "C" { + pub fn vmaMakePoolAllocationsLost( + allocator: VmaAllocator, + pool: VmaPool, + pLostAllocationCount: *mut isize, + ); +} +extern "C" { + pub fn vmaCheckPoolCorruption(allocator: VmaAllocator, pool: VmaPool) -> VkResult; +} +extern "C" { + pub fn vmaGetPoolName( + allocator: VmaAllocator, + pool: VmaPool, + ppName: *mut *const ::std::os::raw::c_char, + ); +} +extern "C" { + pub fn vmaSetPoolName( + allocator: VmaAllocator, + pool: VmaPool, + pName: *const ::std::os::raw::c_char, + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VmaAllocation_T { + _unused: [u8; 0], +} +pub type VmaAllocation = *mut VmaAllocation_T; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VmaAllocationInfo { + pub memoryType: u32, + pub deviceMemory: VkDeviceMemory, + pub offset: VkDeviceSize, + pub size: VkDeviceSize, + pub pMappedData: *mut ::std::os::raw::c_void, + pub pUserData: *mut ::std::os::raw::c_void, +} +#[test] +fn bindgen_test_layout_VmaAllocationInfo() { + assert_eq!( + ::std::mem::size_of::(), + 48usize, + concat!("Size of: ", stringify!(VmaAllocationInfo)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(VmaAllocationInfo)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).memoryType as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocationInfo), + "::", + stringify!(memoryType) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).deviceMemory as *const _ as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocationInfo), + "::", + stringify!(deviceMemory) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).offset as *const _ as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocationInfo), + "::", + stringify!(offset) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).size as *const _ as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocationInfo), + "::", + stringify!(size) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).pMappedData as *const _ as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocationInfo), + "::", + stringify!(pMappedData) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).pUserData as *const _ as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(VmaAllocationInfo), + "::", + stringify!(pUserData) + ) + ); +} +extern "C" { + pub fn vmaAllocateMemory( + allocator: VmaAllocator, + pVkMemoryRequirements: *const VkMemoryRequirements, + pCreateInfo: *const VmaAllocationCreateInfo, + pAllocation: *mut VmaAllocation, + pAllocationInfo: *mut VmaAllocationInfo, + ) -> VkResult; +} +extern "C" { + pub fn vmaAllocateMemoryPages( + allocator: VmaAllocator, + pVkMemoryRequirements: *const VkMemoryRequirements, + pCreateInfo: *const VmaAllocationCreateInfo, + allocationCount: isize, + pAllocations: *mut VmaAllocation, + pAllocationInfo: *mut VmaAllocationInfo, + ) -> VkResult; +} +extern "C" { + pub fn vmaAllocateMemoryForBuffer( + allocator: VmaAllocator, + buffer: VkBuffer, + pCreateInfo: *const VmaAllocationCreateInfo, + pAllocation: *mut VmaAllocation, + pAllocationInfo: *mut VmaAllocationInfo, + ) -> VkResult; +} +extern "C" { + pub fn vmaAllocateMemoryForImage( + allocator: VmaAllocator, + image: VkImage, + pCreateInfo: *const VmaAllocationCreateInfo, + pAllocation: *mut VmaAllocation, + pAllocationInfo: *mut VmaAllocationInfo, + ) -> VkResult; +} +extern "C" { + pub fn vmaFreeMemory(allocator: VmaAllocator, allocation: VmaAllocation); +} +extern "C" { + pub fn vmaFreeMemoryPages( + allocator: VmaAllocator, + allocationCount: isize, + pAllocations: *mut VmaAllocation, + ); +} +extern "C" { + pub fn vmaResizeAllocation( + allocator: VmaAllocator, + allocation: VmaAllocation, + newSize: VkDeviceSize, + ) -> VkResult; +} +extern "C" { + pub fn vmaGetAllocationInfo( + allocator: VmaAllocator, + allocation: VmaAllocation, + pAllocationInfo: *mut VmaAllocationInfo, + ); +} +extern "C" { + pub fn vmaTouchAllocation(allocator: VmaAllocator, allocation: VmaAllocation) -> VkBool32; +} +extern "C" { + pub fn vmaSetAllocationUserData( + allocator: VmaAllocator, + allocation: VmaAllocation, + pUserData: *mut ::std::os::raw::c_void, + ); +} +extern "C" { + pub fn vmaCreateLostAllocation(allocator: VmaAllocator, pAllocation: *mut VmaAllocation); +} +extern "C" { + pub fn vmaMapMemory( + allocator: VmaAllocator, + allocation: VmaAllocation, + ppData: *mut *mut ::std::os::raw::c_void, + ) -> VkResult; +} +extern "C" { + pub fn vmaUnmapMemory(allocator: VmaAllocator, allocation: VmaAllocation); +} +extern "C" { + pub fn vmaFlushAllocation( + allocator: VmaAllocator, + allocation: VmaAllocation, + offset: VkDeviceSize, + size: VkDeviceSize, + ) -> VkResult; +} +extern "C" { + pub fn vmaInvalidateAllocation( + allocator: VmaAllocator, + allocation: VmaAllocation, + offset: VkDeviceSize, + size: VkDeviceSize, + ) -> VkResult; +} +extern "C" { + pub fn vmaFlushAllocations( + allocator: VmaAllocator, + allocationCount: u32, + allocations: *mut VmaAllocation, + offsets: *const VkDeviceSize, + sizes: *const VkDeviceSize, + ) -> VkResult; +} +extern "C" { + pub fn vmaInvalidateAllocations( + allocator: VmaAllocator, + allocationCount: u32, + allocations: *mut VmaAllocation, + offsets: *const VkDeviceSize, + sizes: *const VkDeviceSize, + ) -> VkResult; +} +extern "C" { + pub fn vmaCheckCorruption(allocator: VmaAllocator, memoryTypeBits: u32) -> VkResult; +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VmaDefragmentationContext_T { + _unused: [u8; 0], +} +pub type VmaDefragmentationContext = *mut VmaDefragmentationContext_T; +#[repr(u32)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub enum VmaDefragmentationFlagBits { + VMA_DEFRAGMENTATION_FLAG_INCREMENTAL = 1, + VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 2147483647, +} +pub type VmaDefragmentationFlags = VkFlags; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VmaDefragmentationInfo2 { + pub flags: VmaDefragmentationFlags, + pub allocationCount: u32, + pub pAllocations: *mut VmaAllocation, + pub pAllocationsChanged: *mut VkBool32, + pub poolCount: u32, + pub pPools: *mut VmaPool, + pub maxCpuBytesToMove: VkDeviceSize, + pub maxCpuAllocationsToMove: u32, + pub maxGpuBytesToMove: VkDeviceSize, + pub maxGpuAllocationsToMove: u32, + pub commandBuffer: VkCommandBuffer, +} +#[test] +fn bindgen_test_layout_VmaDefragmentationInfo2() { + assert_eq!( + ::std::mem::size_of::(), + 80usize, + concat!("Size of: ", stringify!(VmaDefragmentationInfo2)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(VmaDefragmentationInfo2)) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).flags as *const _ as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationInfo2), + "::", + stringify!(flags) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).allocationCount as *const _ as usize + }, + 4usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationInfo2), + "::", + stringify!(allocationCount) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).pAllocations as *const _ as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationInfo2), + "::", + stringify!(pAllocations) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).pAllocationsChanged as *const _ + as usize + }, + 16usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationInfo2), + "::", + stringify!(pAllocationsChanged) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).poolCount as *const _ as usize + }, + 24usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationInfo2), + "::", + stringify!(poolCount) + ) + ); + assert_eq!( + unsafe { &(*(::std::ptr::null::())).pPools as *const _ as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationInfo2), + "::", + stringify!(pPools) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).maxCpuBytesToMove as *const _ + as usize + }, + 40usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationInfo2), + "::", + stringify!(maxCpuBytesToMove) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).maxCpuAllocationsToMove as *const _ + as usize + }, + 48usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationInfo2), + "::", + stringify!(maxCpuAllocationsToMove) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).maxGpuBytesToMove as *const _ + as usize + }, + 56usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationInfo2), + "::", + stringify!(maxGpuBytesToMove) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).maxGpuAllocationsToMove as *const _ + as usize + }, + 64usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationInfo2), + "::", + stringify!(maxGpuAllocationsToMove) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).commandBuffer as *const _ as usize + }, + 72usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationInfo2), + "::", + stringify!(commandBuffer) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VmaDefragmentationPassMoveInfo { + pub allocation: VmaAllocation, + pub memory: VkDeviceMemory, + pub offset: VkDeviceSize, +} +#[test] +fn bindgen_test_layout_VmaDefragmentationPassMoveInfo() { + assert_eq!( + ::std::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(VmaDefragmentationPassMoveInfo)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(VmaDefragmentationPassMoveInfo)) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).allocation as *const _ + as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationPassMoveInfo), + "::", + stringify!(allocation) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).memory as *const _ as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationPassMoveInfo), + "::", + stringify!(memory) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).offset as *const _ as usize + }, + 16usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationPassMoveInfo), + "::", + stringify!(offset) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VmaDefragmentationPassInfo { + pub moveCount: u32, + pub pMoves: *mut VmaDefragmentationPassMoveInfo, +} +#[test] +fn bindgen_test_layout_VmaDefragmentationPassInfo() { + assert_eq!( + ::std::mem::size_of::(), + 16usize, + concat!("Size of: ", stringify!(VmaDefragmentationPassInfo)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(VmaDefragmentationPassInfo)) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).moveCount as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationPassInfo), + "::", + stringify!(moveCount) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).pMoves as *const _ as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationPassInfo), + "::", + stringify!(pMoves) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VmaDefragmentationInfo { + pub maxBytesToMove: VkDeviceSize, + pub maxAllocationsToMove: u32, +} +#[test] +fn bindgen_test_layout_VmaDefragmentationInfo() { + assert_eq!( + ::std::mem::size_of::(), + 16usize, + concat!("Size of: ", stringify!(VmaDefragmentationInfo)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(VmaDefragmentationInfo)) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).maxBytesToMove as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationInfo), + "::", + stringify!(maxBytesToMove) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).maxAllocationsToMove as *const _ + as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationInfo), + "::", + stringify!(maxAllocationsToMove) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VmaDefragmentationStats { + pub bytesMoved: VkDeviceSize, + pub bytesFreed: VkDeviceSize, + pub allocationsMoved: u32, + pub deviceMemoryBlocksFreed: u32, +} +#[test] +fn bindgen_test_layout_VmaDefragmentationStats() { + assert_eq!( + ::std::mem::size_of::(), + 24usize, + concat!("Size of: ", stringify!(VmaDefragmentationStats)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(VmaDefragmentationStats)) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).bytesMoved as *const _ as usize + }, + 0usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationStats), + "::", + stringify!(bytesMoved) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).bytesFreed as *const _ as usize + }, + 8usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationStats), + "::", + stringify!(bytesFreed) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).allocationsMoved as *const _ + as usize + }, + 16usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationStats), + "::", + stringify!(allocationsMoved) + ) + ); + assert_eq!( + unsafe { + &(*(::std::ptr::null::())).deviceMemoryBlocksFreed as *const _ + as usize + }, + 20usize, + concat!( + "Offset of field: ", + stringify!(VmaDefragmentationStats), + "::", + stringify!(deviceMemoryBlocksFreed) + ) + ); +} +extern "C" { + pub fn vmaDefragmentationBegin( + allocator: VmaAllocator, + pInfo: *const VmaDefragmentationInfo2, + pStats: *mut VmaDefragmentationStats, + pContext: *mut VmaDefragmentationContext, + ) -> VkResult; +} +extern "C" { + pub fn vmaDefragmentationEnd( + allocator: VmaAllocator, + context: VmaDefragmentationContext, + ) -> VkResult; +} +extern "C" { + pub fn vmaBeginDefragmentationPass( + allocator: VmaAllocator, + context: VmaDefragmentationContext, + pInfo: *mut VmaDefragmentationPassInfo, + ) -> VkResult; +} +extern "C" { + pub fn vmaEndDefragmentationPass( + allocator: VmaAllocator, + context: VmaDefragmentationContext, + ) -> VkResult; +} +extern "C" { + pub fn vmaDefragment( + allocator: VmaAllocator, + pAllocations: *mut VmaAllocation, + allocationCount: isize, + pAllocationsChanged: *mut VkBool32, + pDefragmentationInfo: *const VmaDefragmentationInfo, + pDefragmentationStats: *mut VmaDefragmentationStats, + ) -> VkResult; +} +extern "C" { + pub fn vmaBindBufferMemory( + allocator: VmaAllocator, + allocation: VmaAllocation, + buffer: VkBuffer, + ) -> VkResult; +} +extern "C" { + pub fn vmaBindBufferMemory2( + allocator: VmaAllocator, + allocation: VmaAllocation, + allocationLocalOffset: VkDeviceSize, + buffer: VkBuffer, + pNext: *const ::std::os::raw::c_void, + ) -> VkResult; +} +extern "C" { + pub fn vmaBindImageMemory( + allocator: VmaAllocator, + allocation: VmaAllocation, + image: VkImage, + ) -> VkResult; +} +extern "C" { + pub fn vmaBindImageMemory2( + allocator: VmaAllocator, + allocation: VmaAllocation, + allocationLocalOffset: VkDeviceSize, + image: VkImage, + pNext: *const ::std::os::raw::c_void, + ) -> VkResult; +} +extern "C" { + pub fn vmaCreateBuffer( + allocator: VmaAllocator, + pBufferCreateInfo: *const VkBufferCreateInfo, + pAllocationCreateInfo: *const VmaAllocationCreateInfo, + pBuffer: *mut VkBuffer, + pAllocation: *mut VmaAllocation, + pAllocationInfo: *mut VmaAllocationInfo, + ) -> VkResult; +} +extern "C" { + pub fn vmaDestroyBuffer(allocator: VmaAllocator, buffer: VkBuffer, allocation: VmaAllocation); +} +extern "C" { + pub fn vmaCreateImage( + allocator: VmaAllocator, + pImageCreateInfo: *const VkImageCreateInfo, + pAllocationCreateInfo: *const VmaAllocationCreateInfo, + pImage: *mut VkImage, + pAllocation: *mut VmaAllocation, + pAllocationInfo: *mut VmaAllocationInfo, + ) -> VkResult; +} +extern "C" { + pub fn vmaDestroyImage(allocator: VmaAllocator, image: VkImage, allocation: VmaAllocation); +} diff --git a/vma-rs/vma_lib/vma_lib.cpp b/vma-rs/vma_lib/vma_lib.cpp new file mode 100644 index 0000000..00317e0 --- /dev/null +++ b/vma-rs/vma_lib/vma_lib.cpp @@ -0,0 +1,2 @@ +#define VMA_IMPLEMENTATION +#include "vk_mem_alloc.h" \ No newline at end of file diff --git a/vma-rs/vma_source/vk_mem_alloc.h b/vma-rs/vma_source/vk_mem_alloc.h new file mode 100644 index 0000000..bf77902 --- /dev/null +++ b/vma-rs/vma_source/vk_mem_alloc.h @@ -0,0 +1,19305 @@ +// +// Copyright (c) 2017-2020 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + +#ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H +#define AMD_VULKAN_MEMORY_ALLOCATOR_H + +/** \mainpage Vulkan Memory Allocator + +Version 3.0.0-development (2020-11-03) + +Copyright (c) 2017-2020 Advanced Micro Devices, Inc. All rights reserved. \n +License: MIT + +Documentation of all members: vk_mem_alloc.h + +\section main_table_of_contents Table of contents + +- User guide + - \subpage quick_start + - [Project setup](@ref quick_start_project_setup) + - [Initialization](@ref quick_start_initialization) + - [Resource allocation](@ref quick_start_resource_allocation) + - \subpage choosing_memory_type + - [Usage](@ref choosing_memory_type_usage) + - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags) + - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types) + - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools) + - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations) + - \subpage memory_mapping + - [Mapping functions](@ref memory_mapping_mapping_functions) + - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory) + - [Cache flush and invalidate](@ref memory_mapping_cache_control) + - [Finding out if memory is mappable](@ref memory_mapping_finding_if_memory_mappable) + - \subpage staying_within_budget + - [Querying for budget](@ref staying_within_budget_querying_for_budget) + - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage) + - \subpage resource_aliasing + - \subpage custom_memory_pools + - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex) + - [Linear allocation algorithm](@ref linear_algorithm) + - [Free-at-once](@ref linear_algorithm_free_at_once) + - [Stack](@ref linear_algorithm_stack) + - [Double stack](@ref linear_algorithm_double_stack) + - [Ring buffer](@ref linear_algorithm_ring_buffer) + - [Buddy allocation algorithm](@ref buddy_algorithm) + - \subpage defragmentation + - [Defragmenting CPU memory](@ref defragmentation_cpu) + - [Defragmenting GPU memory](@ref defragmentation_gpu) + - [Additional notes](@ref defragmentation_additional_notes) + - [Writing custom allocation algorithm](@ref defragmentation_custom_algorithm) + - \subpage lost_allocations + - \subpage statistics + - [Numeric statistics](@ref statistics_numeric_statistics) + - [JSON dump](@ref statistics_json_dump) + - \subpage allocation_annotation + - [Allocation user data](@ref allocation_user_data) + - [Allocation names](@ref allocation_names) + - \subpage debugging_memory_usage + - [Memory initialization](@ref debugging_memory_usage_initialization) + - [Margins](@ref debugging_memory_usage_margins) + - [Corruption detection](@ref debugging_memory_usage_corruption_detection) + - \subpage record_and_replay +- \subpage usage_patterns + - [Common mistakes](@ref usage_patterns_common_mistakes) + - [Simple patterns](@ref usage_patterns_simple) + - [Advanced patterns](@ref usage_patterns_advanced) +- \subpage configuration + - [Pointers to Vulkan functions](@ref config_Vulkan_functions) + - [Custom host memory allocator](@ref custom_memory_allocator) + - [Device memory allocation callbacks](@ref allocation_callbacks) + - [Device heap memory limit](@ref heap_memory_limit) + - \subpage vk_khr_dedicated_allocation + - \subpage enabling_buffer_device_address + - \subpage vk_amd_device_coherent_memory +- \subpage general_considerations + - [Thread safety](@ref general_considerations_thread_safety) + - [Validation layer warnings](@ref general_considerations_validation_layer_warnings) + - [Allocation algorithm](@ref general_considerations_allocation_algorithm) + - [Features not supported](@ref general_considerations_features_not_supported) + +\section main_see_also See also + +- [Product page on GPUOpen](https://gpuopen.com/gaming-product/vulkan-memory-allocator/) +- [Source repository on GitHub](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator) + + + + +\page quick_start Quick start + +\section quick_start_project_setup Project setup + +Vulkan Memory Allocator comes in form of a "stb-style" single header file. +You don't need to build it as a separate library project. +You can add this file directly to your project and submit it to code repository next to your other source files. + +"Single header" doesn't mean that everything is contained in C/C++ declarations, +like it tends to be in case of inline functions or C++ templates. +It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro. +If you don't do it properly, you will get linker errors. + +To do it properly: + +-# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library. + This includes declarations of all members of the library. +-# In exacly one CPP file define following macro before this include. + It enables also internal definitions. + +\code +#define VMA_IMPLEMENTATION +#include "vk_mem_alloc.h" +\endcode + +It may be a good idea to create dedicated CPP file just for this purpose. + +Note on language: This library is written in C++, but has C-compatible interface. +Thus you can include and use vk_mem_alloc.h in C or C++ code, but full +implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C. + +Please note that this library includes header ``, which in turn +includes `` on Windows. If you need some specific macros defined +before including these headers (like `WIN32_LEAN_AND_MEAN` or +`WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define +them before every `#include` of this library. + +You may need to configure the way you import Vulkan functions. + +- By default, VMA assumes you you link statically with Vulkan API. If this is not the case, + `#define VMA_STATIC_VULKAN_FUNCTIONS 0` before `#include` of the VMA implementation and use another way. +- You can `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 1` and make sure `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` globals are defined. + All the remaining Vulkan functions will be fetched automatically. +- Finally, you can provide your own pointers to all Vulkan functions needed by VMA using structure member + VmaAllocatorCreateInfo::pVulkanFunctions, if you fetched them in some custom way e.g. using some loader like [Volk](https://github.com/zeux/volk). + + +\section quick_start_initialization Initialization + +At program startup: + +-# Initialize Vulkan to have `VkPhysicalDevice`, `VkDevice` and `VkInstance` object. +-# Fill VmaAllocatorCreateInfo structure and create #VmaAllocator object by + calling vmaCreateAllocator(). + +\code +VmaAllocatorCreateInfo allocatorInfo = {}; +allocatorInfo.vulkanApiVersion = VK_API_VERSION_1_2; +allocatorInfo.physicalDevice = physicalDevice; +allocatorInfo.device = device; +allocatorInfo.instance = instance; + +VmaAllocator allocator; +vmaCreateAllocator(&allocatorInfo, &allocator); +\endcode + +Only members `physicalDevice`, `device`, `instance` are required. +However, you should inform the library which Vulkan version do you use by setting +VmaAllocatorCreateInfo::vulkanApiVersion and which extensions did you enable +by setting VmaAllocatorCreateInfo::flags (like #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT for VK_KHR_buffer_device_address). +Otherwise, VMA would use only features of Vulkan 1.0 core with no extensions. + + +\section quick_start_resource_allocation Resource allocation + +When you want to create a buffer or image: + +-# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure. +-# Fill VmaAllocationCreateInfo structure. +-# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory + already allocated and bound to it. + +\code +VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufferInfo.size = 65536; +bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +Don't forget to destroy your objects when no longer needed: + +\code +vmaDestroyBuffer(allocator, buffer, allocation); +vmaDestroyAllocator(allocator); +\endcode + + +\page choosing_memory_type Choosing memory type + +Physical devices in Vulkan support various combinations of memory heaps and +types. Help with choosing correct and optimal memory type for your specific +resource is one of the key features of this library. You can use it by filling +appropriate members of VmaAllocationCreateInfo structure, as described below. +You can also combine multiple methods. + +-# If you just want to find memory type index that meets your requirements, you + can use function: vmaFindMemoryTypeIndex(), vmaFindMemoryTypeIndexForBufferInfo(), + vmaFindMemoryTypeIndexForImageInfo(). +-# If you want to allocate a region of device memory without association with any + specific image or buffer, you can use function vmaAllocateMemory(). Usage of + this function is not recommended and usually not needed. + vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once, + which may be useful for sparse binding. +-# If you already have a buffer or an image created, you want to allocate memory + for it and then you will bind it yourself, you can use function + vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(). + For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory() + or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2(). +-# If you want to create a buffer or an image, allocate memory for it and bind + them together, all in one call, you can use function vmaCreateBuffer(), + vmaCreateImage(). This is the easiest and recommended way to use this library. + +When using 3. or 4., the library internally queries Vulkan for memory types +supported for that buffer or image (function `vkGetBufferMemoryRequirements()`) +and uses only one of these types. + +If no memory type can be found that meets all the requirements, these functions +return `VK_ERROR_FEATURE_NOT_PRESENT`. + +You can leave VmaAllocationCreateInfo structure completely filled with zeros. +It means no requirements are specified for memory type. +It is valid, although not very useful. + +\section choosing_memory_type_usage Usage + +The easiest way to specify memory requirements is to fill member +VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage. +It defines high level, common usage types. +For more details, see description of this enum. + +For example, if you want to create a uniform buffer that will be filled using +transfer only once or infrequently and used for rendering every frame, you can +do it using following code: + +\code +VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufferInfo.size = 65536; +bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +\section choosing_memory_type_required_preferred_flags Required and preferred flags + +You can specify more detailed requirements by filling members +VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags +with a combination of bits from enum `VkMemoryPropertyFlags`. For example, +if you want to create a buffer that will be persistently mapped on host (so it +must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`, +use following code: + +\code +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; +allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; +allocInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +A memory type is chosen that has all the required flags and as many preferred +flags set as possible. + +If you use VmaAllocationCreateInfo::usage, it is just internally converted to +a set of required and preferred flags. + +\section choosing_memory_type_explicit_memory_types Explicit memory types + +If you inspected memory types available on the physical device and you have +a preference for memory types that you want to use, you can fill member +VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set +means that a memory type with that index is allowed to be used for the +allocation. Special value 0, just like `UINT32_MAX`, means there are no +restrictions to memory type index. + +Please note that this member is NOT just a memory type index. +Still you can use it to choose just one, specific memory type. +For example, if you already determined that your buffer should be created in +memory type 2, use following code: + +\code +uint32_t memoryTypeIndex = 2; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.memoryTypeBits = 1u << memoryTypeIndex; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + + +\section choosing_memory_type_custom_memory_pools Custom memory pools + +If you allocate from custom memory pool, all the ways of specifying memory +requirements described above are not applicable and the aforementioned members +of VmaAllocationCreateInfo structure are ignored. Memory type is selected +explicitly when creating the pool and then used to make all the allocations from +that pool. For further details, see \ref custom_memory_pools. + +\section choosing_memory_type_dedicated_allocations Dedicated allocations + +Memory for allocations is reserved out of larger block of `VkDeviceMemory` +allocated from Vulkan internally. That's the main feature of this whole library. +You can still request a separate memory block to be created for an allocation, +just like you would do in a trivial solution without using any allocator. +In that case, a buffer or image is always bound to that memory at offset 0. +This is called a "dedicated allocation". +You can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +The library can also internally decide to use dedicated allocation in some cases, e.g.: + +- When the size of the allocation is large. +- When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled + and it reports that dedicated allocation is required or recommended for the resource. +- When allocation of next big memory block fails due to not enough device memory, + but allocation with the exact requested size succeeds. + + +\page memory_mapping Memory mapping + +To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`, +to be able to read from it or write to it in CPU code. +Mapping is possible only of memory allocated from a memory type that has +`VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag. +Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose. +You can use them directly with memory allocated by this library, +but it is not recommended because of following issue: +Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed. +This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan. +Because of this, Vulkan Memory Allocator provides following facilities: + +\section memory_mapping_mapping_functions Mapping functions + +The library provides following functions for mapping of a specific #VmaAllocation: vmaMapMemory(), vmaUnmapMemory(). +They are safer and more convenient to use than standard Vulkan functions. +You can map an allocation multiple times simultaneously - mapping is reference-counted internally. +You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block. +The way it's implemented is that the library always maps entire memory block, not just region of the allocation. +For further details, see description of vmaMapMemory() function. +Example: + +\code +// Having these objects initialized: + +struct ConstantBuffer +{ + ... +}; +ConstantBuffer constantBufferData; + +VmaAllocator allocator; +VkBuffer constantBuffer; +VmaAllocation constantBufferAllocation; + +// You can map and fill your buffer using following code: + +void* mappedData; +vmaMapMemory(allocator, constantBufferAllocation, &mappedData); +memcpy(mappedData, &constantBufferData, sizeof(constantBufferData)); +vmaUnmapMemory(allocator, constantBufferAllocation); +\endcode + +When mapping, you may see a warning from Vulkan validation layer similar to this one: + +Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used. + +It happens because the library maps entire `VkDeviceMemory` block, where different +types of images and buffers may end up together, especially on GPUs with unified memory like Intel. +You can safely ignore it if you are sure you access only memory of the intended +object that you wanted to map. + + +\section memory_mapping_persistently_mapped_memory Persistently mapped memory + +Kepping your memory persistently mapped is generally OK in Vulkan. +You don't need to unmap it before using its data on the GPU. +The library provides a special feature designed for that: +Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in +VmaAllocationCreateInfo::flags stay mapped all the time, +so you can just access CPU pointer to it any time +without a need to call any "map" or "unmap" function. +Example: + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = sizeof(ConstantBuffer); +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_ONLY; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +// Buffer is already mapped. You can access its memory. +memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData)); +\endcode + +There are some exceptions though, when you should consider mapping memory only for a short period of time: + +- When operating system is Windows 7 or 8.x (Windows 10 is not affected because it uses WDDM2), + device is discrete AMD GPU, + and memory type is the special 256 MiB pool of `DEVICE_LOCAL + HOST_VISIBLE` memory + (selected when you use #VMA_MEMORY_USAGE_CPU_TO_GPU), + then whenever a memory block allocated from this memory type stays mapped + for the time of any call to `vkQueueSubmit()` or `vkQueuePresentKHR()`, this + block is migrated by WDDM to system RAM, which degrades performance. It doesn't + matter if that particular memory block is actually used by the command buffer + being submitted. +- On Mac/MoltenVK there is a known bug - [Issue #175](https://github.com/KhronosGroup/MoltenVK/issues/175) + which requires unmapping before GPU can see updated texture. +- Keeping many large memory blocks mapped may impact performance or stability of some debugging tools. + +\section memory_mapping_cache_control Cache flush and invalidate + +Memory in Vulkan doesn't need to be unmapped before using it on GPU, +but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set, +you need to manually **invalidate** cache before reading of mapped pointer +and **flush** cache after writing to mapped pointer. +Map/unmap operations don't do that automatically. +Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`, +`vkInvalidateMappedMemoryRanges()`, but this library provides more convenient +functions that refer to given allocation object: vmaFlushAllocation(), +vmaInvalidateAllocation(), +or multiple objects at once: vmaFlushAllocations(), vmaInvalidateAllocations(). + +Regions of memory specified for flush/invalidate must be aligned to +`VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library. +In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations +within blocks are aligned to this value, so their offsets are always multiply of +`nonCoherentAtomSize` and two different allocations never share same "line" of this size. + +Please note that memory allocated with #VMA_MEMORY_USAGE_CPU_ONLY is guaranteed to be `HOST_COHERENT`. + +Also, Windows drivers from all 3 **PC** GPU vendors (AMD, Intel, NVIDIA) +currently provide `HOST_COHERENT` flag on all memory types that are +`HOST_VISIBLE`, so on this platform you may not need to bother. + +\section memory_mapping_finding_if_memory_mappable Finding out if memory is mappable + +It may happen that your allocation ends up in memory that is `HOST_VISIBLE` (available for mapping) +despite it wasn't explicitly requested. +For example, application may work on integrated graphics with unified memory (like Intel) or +allocation from video memory might have failed, so the library chose system memory as fallback. + +You can detect this case and map such allocation to access its memory on CPU directly, +instead of launching a transfer operation. +In order to do that: inspect `allocInfo.memoryType`, call vmaGetMemoryTypeProperties(), +and look for `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag in properties of that memory type. + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = sizeof(ConstantBuffer); +bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; +allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +VkMemoryPropertyFlags memFlags; +vmaGetMemoryTypeProperties(allocator, allocInfo.memoryType, &memFlags); +if((memFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) +{ + // Allocation ended up in mappable memory. You can map it and access it directly. + void* mappedData; + vmaMapMemory(allocator, alloc, &mappedData); + memcpy(mappedData, &constantBufferData, sizeof(constantBufferData)); + vmaUnmapMemory(allocator, alloc); +} +else +{ + // Allocation ended up in non-mappable memory. + // You need to create CPU-side buffer in VMA_MEMORY_USAGE_CPU_ONLY and make a transfer. +} +\endcode + +You can even use #VMA_ALLOCATION_CREATE_MAPPED_BIT flag while creating allocations +that are not necessarily `HOST_VISIBLE` (e.g. using #VMA_MEMORY_USAGE_GPU_ONLY). +If the allocation ends up in memory type that is `HOST_VISIBLE`, it will be persistently mapped and you can use it directly. +If not, the flag is just ignored. +Example: + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = sizeof(ConstantBuffer); +bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +if(allocInfo.pMappedData != nullptr) +{ + // Allocation ended up in mappable memory. + // It's persistently mapped. You can access it directly. + memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData)); +} +else +{ + // Allocation ended up in non-mappable memory. + // You need to create CPU-side buffer in VMA_MEMORY_USAGE_CPU_ONLY and make a transfer. +} +\endcode + + +\page staying_within_budget Staying within budget + +When developing a graphics-intensive game or program, it is important to avoid allocating +more GPU memory than it's physically available. When the memory is over-committed, +various bad things can happen, depending on the specific GPU, graphics driver, and +operating system: + +- It may just work without any problems. +- The application may slow down because some memory blocks are moved to system RAM + and the GPU has to access them through PCI Express bus. +- A new allocation may take very long time to complete, even few seconds, and possibly + freeze entire system. +- The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +- It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST` + returned somewhere later. + +\section staying_within_budget_querying_for_budget Querying for budget + +To query for current memory usage and available budget, use function vmaGetBudget(). +Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap. + +Please note that this function returns different information and works faster than +vmaCalculateStats(). vmaGetBudget() can be called every frame or even before every +allocation, while vmaCalculateStats() is intended to be used rarely, +only to obtain statistical information, e.g. for debugging purposes. + +It is recommended to use VK_EXT_memory_budget device extension to obtain information +about the budget from Vulkan device. VMA is able to use this extension automatically. +When not enabled, the allocator behaves same way, but then it estimates current usage +and available budget based on its internal information and Vulkan memory heap sizes, +which may be less precise. In order to use this extension: + +1. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2 + required by it are available and enable them. Please note that the first is a device + extension and the second is instance extension! +2. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object. +3. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from + Vulkan inside of it to avoid overhead of querying it with every allocation. + +\section staying_within_budget_controlling_memory_usage Controlling memory usage + +There are many ways in which you can try to stay within the budget. + +First, when making new allocation requires allocating a new memory block, the library +tries not to exceed the budget automatically. If a block with default recommended size +(e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even +dedicated memory for just this resource. + +If the size of the requested resource plus current memory usage is more than the +budget, by default the library still tries to create it, leaving it to the Vulkan +implementation whether the allocation succeeds or fails. You can change this behavior +by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is +not made if it would exceed the budget or if the budget is already exceeded. +Some other allocations become lost instead to make room for it, if the mechanism of +[lost allocations](@ref lost_allocations) is used. +If that is not possible, the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag +when creating resources that are not essential for the application (e.g. the texture +of a specific object) and not to pass it when creating critically important resources +(e.g. render targets). + +Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure +a new allocation is created only when it fits inside one of the existing memory blocks. +If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +This also ensures that the function call is very fast because it never goes to Vulkan +to obtain a new block. + +Please note that creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount +set to more than 0 will try to allocate memory blocks without checking whether they +fit within budget. + + +\page resource_aliasing Resource aliasing (overlap) + +New explicit graphics APIs (Vulkan and Direct3D 12), thanks to manual memory +management, give an opportunity to alias (overlap) multiple resources in the +same region of memory - a feature not available in the old APIs (Direct3D 11, OpenGL). +It can be useful to save video memory, but it must be used with caution. + +For example, if you know the flow of your whole render frame in advance, you +are going to use some intermediate textures or buffers only during a small range of render passes, +and you know these ranges don't overlap in time, you can bind these resources to +the same place in memory, even if they have completely different parameters (width, height, format etc.). + +![Resource aliasing (overlap)](../gfx/Aliasing.png) + +Such scenario is possible using VMA, but you need to create your images manually. +Then you need to calculate parameters of an allocation to be made using formula: + +- allocation size = max(size of each image) +- allocation alignment = max(alignment of each image) +- allocation memoryTypeBits = bitwise AND(memoryTypeBits of each image) + +Following example shows two different images bound to the same place in memory, +allocated to fit largest of them. + +\code +// A 512x512 texture to be sampled. +VkImageCreateInfo img1CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +img1CreateInfo.imageType = VK_IMAGE_TYPE_2D; +img1CreateInfo.extent.width = 512; +img1CreateInfo.extent.height = 512; +img1CreateInfo.extent.depth = 1; +img1CreateInfo.mipLevels = 10; +img1CreateInfo.arrayLayers = 1; +img1CreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB; +img1CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +img1CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +img1CreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; +img1CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +// A full screen texture to be used as color attachment. +VkImageCreateInfo img2CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +img2CreateInfo.imageType = VK_IMAGE_TYPE_2D; +img2CreateInfo.extent.width = 1920; +img2CreateInfo.extent.height = 1080; +img2CreateInfo.extent.depth = 1; +img2CreateInfo.mipLevels = 1; +img2CreateInfo.arrayLayers = 1; +img2CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; +img2CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +img2CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +img2CreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; +img2CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +VkImage img1; +res = vkCreateImage(device, &img1CreateInfo, nullptr, &img1); +VkImage img2; +res = vkCreateImage(device, &img2CreateInfo, nullptr, &img2); + +VkMemoryRequirements img1MemReq; +vkGetImageMemoryRequirements(device, img1, &img1MemReq); +VkMemoryRequirements img2MemReq; +vkGetImageMemoryRequirements(device, img2, &img2MemReq); + +VkMemoryRequirements finalMemReq = {}; +finalMemReq.size = std::max(img1MemReq.size, img2MemReq.size); +finalMemReq.alignment = std::max(img1MemReq.alignment, img2MemReq.alignment); +finalMemReq.memoryTypeBits = img1MemReq.memoryTypeBits & img2MemReq.memoryTypeBits; +// Validate if(finalMemReq.memoryTypeBits != 0) + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; + +VmaAllocation alloc; +res = vmaAllocateMemory(allocator, &finalMemReq, &allocCreateInfo, &alloc, nullptr); + +res = vmaBindImageMemory(allocator, alloc, img1); +res = vmaBindImageMemory(allocator, alloc, img2); + +// You can use img1, img2 here, but not at the same time! + +vmaFreeMemory(allocator, alloc); +vkDestroyImage(allocator, img2, nullptr); +vkDestroyImage(allocator, img1, nullptr); +\endcode + +Remember that using resouces that alias in memory requires proper synchronization. +You need to issue a memory barrier to make sure commands that use `img1` and `img2` +don't overlap on GPU timeline. +You also need to treat a resource after aliasing as uninitialized - containing garbage data. +For example, if you use `img1` and then want to use `img2`, you need to issue +an image memory barrier for `img2` with `oldLayout` = `VK_IMAGE_LAYOUT_UNDEFINED`. + +Additional considerations: + +- Vulkan also allows to interpret contents of memory between aliasing resources consistently in some cases. +See chapter 11.8. "Memory Aliasing" of Vulkan specification or `VK_IMAGE_CREATE_ALIAS_BIT` flag. +- You can create more complex layout where different images and buffers are bound +at different offsets inside one large allocation. For example, one can imagine +a big texture used in some render passes, aliasing with a set of many small buffers +used between in some further passes. To bind a resource at non-zero offset of an allocation, +use vmaBindBufferMemory2() / vmaBindImageMemory2(). +- Before allocating memory for the resources you want to alias, check `memoryTypeBits` +returned in memory requirements of each resource to make sure the bits overlap. +Some GPUs may expose multiple memory types suitable e.g. only for buffers or +images with `COLOR_ATTACHMENT` usage, so the sets of memory types supported by your +resources may be disjoint. Aliasing them is not possible in that case. + + +\page custom_memory_pools Custom memory pools + +A memory pool contains a number of `VkDeviceMemory` blocks. +The library automatically creates and manages default pool for each memory type available on the device. +Default memory pool automatically grows in size. +Size of allocated blocks is also variable and managed automatically. + +You can create custom pool and allocate memory out of it. +It can be useful if you want to: + +- Keep certain kind of allocations separate from others. +- Enforce particular, fixed size of Vulkan memory blocks. +- Limit maximum amount of Vulkan memory allocated for that pool. +- Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool. + +To use custom memory pools: + +-# Fill VmaPoolCreateInfo structure. +-# Call vmaCreatePool() to obtain #VmaPool handle. +-# When making an allocation, set VmaAllocationCreateInfo::pool to this handle. + You don't need to specify any other parameters of this structure, like `usage`. + +Example: + +\code +// Create a pool that can have at most 2 blocks, 128 MiB each. +VmaPoolCreateInfo poolCreateInfo = {}; +poolCreateInfo.memoryTypeIndex = ... +poolCreateInfo.blockSize = 128ull * 1024 * 1024; +poolCreateInfo.maxBlockCount = 2; + +VmaPool pool; +vmaCreatePool(allocator, &poolCreateInfo, &pool); + +// Allocate a buffer out of it. +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 1024; +bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.pool = pool; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); +\endcode + +You have to free all allocations made from this pool before destroying it. + +\code +vmaDestroyBuffer(allocator, buf, alloc); +vmaDestroyPool(allocator, pool); +\endcode + +\section custom_memory_pools_MemTypeIndex Choosing memory type index + +When creating a pool, you must explicitly specify memory type index. +To find the one suitable for your buffers or images, you can use helper functions +vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo(). +You need to provide structures with example parameters of buffers or images +that you are going to create in that pool. + +\code +VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +exampleBufCreateInfo.size = 1024; // Whatever. +exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; // Change if needed. + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; // Change if needed. + +uint32_t memTypeIndex; +vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex); + +VmaPoolCreateInfo poolCreateInfo = {}; +poolCreateInfo.memoryTypeIndex = memTypeIndex; +// ... +\endcode + +When creating buffers/images allocated in that pool, provide following parameters: + +- `VkBufferCreateInfo`: Prefer to pass same parameters as above. + Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior. + Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers + or the other way around. +- VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member. + Other members are ignored anyway. + +\section linear_algorithm Linear allocation algorithm + +Each Vulkan memory block managed by this library has accompanying metadata that +keeps track of used and unused regions. By default, the metadata structure and +algorithm tries to find best place for new allocations among free regions to +optimize memory usage. This way you can allocate and free objects in any order. + +![Default allocation algorithm](../gfx/Linear_allocator_1_algo_default.png) + +Sometimes there is a need to use simpler, linear allocation algorithm. You can +create custom pool that uses such algorithm by adding flag +#VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating +#VmaPool object. Then an alternative metadata management is used. It always +creates new allocations after last one and doesn't reuse free regions after +allocations freed in the middle. It results in better allocation performance and +less memory consumed by metadata. + +![Linear allocation algorithm](../gfx/Linear_allocator_2_algo_linear.png) + +With this one flag, you can create a custom pool that can be used in many ways: +free-at-once, stack, double stack, and ring buffer. See below for details. + +\subsection linear_algorithm_free_at_once Free-at-once + +In a pool that uses linear algorithm, you still need to free all the allocations +individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free +them in any order. New allocations are always made after last one - free space +in the middle is not reused. However, when you release all the allocation and +the pool becomes empty, allocation starts from the beginning again. This way you +can use linear algorithm to speed up creation of allocations that you are going +to release all at once. + +![Free-at-once](../gfx/Linear_allocator_3_free_at_once.png) + +This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount +value that allows multiple memory blocks. + +\subsection linear_algorithm_stack Stack + +When you free an allocation that was created last, its space can be reused. +Thanks to this, if you always release allocations in the order opposite to their +creation (LIFO - Last In First Out), you can achieve behavior of a stack. + +![Stack](../gfx/Linear_allocator_4_stack.png) + +This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount +value that allows multiple memory blocks. + +\subsection linear_algorithm_double_stack Double stack + +The space reserved by a custom pool with linear algorithm may be used by two +stacks: + +- First, default one, growing up from offset 0. +- Second, "upper" one, growing down from the end towards lower offsets. + +To make allocation from upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT +to VmaAllocationCreateInfo::flags. + +![Double stack](../gfx/Linear_allocator_7_double_stack.png) + +Double stack is available only in pools with one memory block - +VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. + +When the two stacks' ends meet so there is not enough space between them for a +new allocation, such allocation fails with usual +`VK_ERROR_OUT_OF_DEVICE_MEMORY` error. + +\subsection linear_algorithm_ring_buffer Ring buffer + +When you free some allocations from the beginning and there is not enough free space +for a new one at the end of a pool, allocator's "cursor" wraps around to the +beginning and starts allocation there. Thanks to this, if you always release +allocations in the same order as you created them (FIFO - First In First Out), +you can achieve behavior of a ring buffer / queue. + +![Ring buffer](../gfx/Linear_allocator_5_ring_buffer.png) + +Pools with linear algorithm support [lost allocations](@ref lost_allocations) when used as ring buffer. +If there is not enough free space for a new allocation, but existing allocations +from the front of the queue can become lost, they become lost and the allocation +succeeds. + +![Ring buffer with lost allocations](../gfx/Linear_allocator_6_ring_buffer_lost.png) + +Ring buffer is available only in pools with one memory block - +VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. + +\section buddy_algorithm Buddy allocation algorithm + +There is another allocation algorithm that can be used with custom pools, called +"buddy". Its internal data structure is based on a tree of blocks, each having +size that is a power of two and a half of its parent's size. When you want to +allocate memory of certain size, a free node in the tree is located. If it's too +large, it is recursively split into two halves (called "buddies"). However, if +requested allocation size is not a power of two, the size of a tree node is +aligned up to the nearest power of two and the remaining space is wasted. When +two buddy nodes become free, they are merged back into one larger node. + +![Buddy allocator](../gfx/Buddy_allocator.png) + +The advantage of buddy allocation algorithm over default algorithm is faster +allocation and deallocation, as well as smaller external fragmentation. The +disadvantage is more wasted space (internal fragmentation). + +For more information, please read ["Buddy memory allocation" on Wikipedia](https://en.wikipedia.org/wiki/Buddy_memory_allocation) +or other sources that describe this concept in general. + +To use buddy allocation algorithm with a custom pool, add flag +#VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating +#VmaPool object. + +Several limitations apply to pools that use buddy algorithm: + +- It is recommended to use VmaPoolCreateInfo::blockSize that is a power of two. + Otherwise, only largest power of two smaller than the size is used for + allocations. The remaining space always stays unused. +- [Margins](@ref debugging_memory_usage_margins) and + [corruption detection](@ref debugging_memory_usage_corruption_detection) + don't work in such pools. +- [Lost allocations](@ref lost_allocations) don't work in such pools. You can + use them, but they never become lost. Support may be added in the future. +- [Defragmentation](@ref defragmentation) doesn't work with allocations made from + such pool. + +\page defragmentation Defragmentation + +Interleaved allocations and deallocations of many objects of varying size can +cause fragmentation over time, which can lead to a situation where the library is unable +to find a continuous range of free memory for a new allocation despite there is +enough free space, just scattered across many small free ranges between existing +allocations. + +To mitigate this problem, you can use defragmentation feature: +structure #VmaDefragmentationInfo2, function vmaDefragmentationBegin(), vmaDefragmentationEnd(). +Given set of allocations, +this function can move them to compact used memory, ensure more continuous free +space and possibly also free some `VkDeviceMemory` blocks. + +What the defragmentation does is: + +- Updates #VmaAllocation objects to point to new `VkDeviceMemory` and offset. + After allocation has been moved, its VmaAllocationInfo::deviceMemory and/or + VmaAllocationInfo::offset changes. You must query them again using + vmaGetAllocationInfo() if you need them. +- Moves actual data in memory. + +What it doesn't do, so you need to do it yourself: + +- Recreate buffers and images that were bound to allocations that were defragmented and + bind them with their new places in memory. + You must use `vkDestroyBuffer()`, `vkDestroyImage()`, + `vkCreateBuffer()`, `vkCreateImage()`, vmaBindBufferMemory(), vmaBindImageMemory() + for that purpose and NOT vmaDestroyBuffer(), + vmaDestroyImage(), vmaCreateBuffer(), vmaCreateImage(), because you don't need to + destroy or create allocation objects! +- Recreate views and update descriptors that point to these buffers and images. + +\section defragmentation_cpu Defragmenting CPU memory + +Following example demonstrates how you can run defragmentation on CPU. +Only allocations created in memory types that are `HOST_VISIBLE` can be defragmented. +Others are ignored. + +The way it works is: + +- It temporarily maps entire memory blocks when necessary. +- It moves data using `memmove()` function. + +\code +// Given following variables already initialized: +VkDevice device; +VmaAllocator allocator; +std::vector buffers; +std::vector allocations; + + +const uint32_t allocCount = (uint32_t)allocations.size(); +std::vector allocationsChanged(allocCount); + +VmaDefragmentationInfo2 defragInfo = {}; +defragInfo.allocationCount = allocCount; +defragInfo.pAllocations = allocations.data(); +defragInfo.pAllocationsChanged = allocationsChanged.data(); +defragInfo.maxCpuBytesToMove = VK_WHOLE_SIZE; // No limit. +defragInfo.maxCpuAllocationsToMove = UINT32_MAX; // No limit. + +VmaDefragmentationContext defragCtx; +vmaDefragmentationBegin(allocator, &defragInfo, nullptr, &defragCtx); +vmaDefragmentationEnd(allocator, defragCtx); + +for(uint32_t i = 0; i < allocCount; ++i) +{ + if(allocationsChanged[i]) + { + // Destroy buffer that is immutably bound to memory region which is no longer valid. + vkDestroyBuffer(device, buffers[i], nullptr); + + // Create new buffer with same parameters. + VkBufferCreateInfo bufferInfo = ...; + vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]); + + // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning. + + // Bind new buffer to new memory region. Data contained in it is already moved. + VmaAllocationInfo allocInfo; + vmaGetAllocationInfo(allocator, allocations[i], &allocInfo); + vmaBindBufferMemory(allocator, allocations[i], buffers[i]); + } +} +\endcode + +Setting VmaDefragmentationInfo2::pAllocationsChanged is optional. +This output array tells whether particular allocation in VmaDefragmentationInfo2::pAllocations at the same index +has been modified during defragmentation. +You can pass null, but you then need to query every allocation passed to defragmentation +for new parameters using vmaGetAllocationInfo() if you might need to recreate and rebind a buffer or image associated with it. + +If you use [Custom memory pools](@ref choosing_memory_type_custom_memory_pools), +you can fill VmaDefragmentationInfo2::poolCount and VmaDefragmentationInfo2::pPools +instead of VmaDefragmentationInfo2::allocationCount and VmaDefragmentationInfo2::pAllocations +to defragment all allocations in given pools. +You cannot use VmaDefragmentationInfo2::pAllocationsChanged in that case. +You can also combine both methods. + +\section defragmentation_gpu Defragmenting GPU memory + +It is also possible to defragment allocations created in memory types that are not `HOST_VISIBLE`. +To do that, you need to pass a command buffer that meets requirements as described in +VmaDefragmentationInfo2::commandBuffer. The way it works is: + +- It creates temporary buffers and binds them to entire memory blocks when necessary. +- It issues `vkCmdCopyBuffer()` to passed command buffer. + +Example: + +\code +// Given following variables already initialized: +VkDevice device; +VmaAllocator allocator; +VkCommandBuffer commandBuffer; +std::vector buffers; +std::vector allocations; + + +const uint32_t allocCount = (uint32_t)allocations.size(); +std::vector allocationsChanged(allocCount); + +VkCommandBufferBeginInfo cmdBufBeginInfo = ...; +vkBeginCommandBuffer(commandBuffer, &cmdBufBeginInfo); + +VmaDefragmentationInfo2 defragInfo = {}; +defragInfo.allocationCount = allocCount; +defragInfo.pAllocations = allocations.data(); +defragInfo.pAllocationsChanged = allocationsChanged.data(); +defragInfo.maxGpuBytesToMove = VK_WHOLE_SIZE; // Notice it's "GPU" this time. +defragInfo.maxGpuAllocationsToMove = UINT32_MAX; // Notice it's "GPU" this time. +defragInfo.commandBuffer = commandBuffer; + +VmaDefragmentationContext defragCtx; +vmaDefragmentationBegin(allocator, &defragInfo, nullptr, &defragCtx); + +vkEndCommandBuffer(commandBuffer); + +// Submit commandBuffer. +// Wait for a fence that ensures commandBuffer execution finished. + +vmaDefragmentationEnd(allocator, defragCtx); + +for(uint32_t i = 0; i < allocCount; ++i) +{ + if(allocationsChanged[i]) + { + // Destroy buffer that is immutably bound to memory region which is no longer valid. + vkDestroyBuffer(device, buffers[i], nullptr); + + // Create new buffer with same parameters. + VkBufferCreateInfo bufferInfo = ...; + vkCreateBuffer(device, &bufferInfo, nullptr, &buffers[i]); + + // You can make dummy call to vkGetBufferMemoryRequirements here to silence validation layer warning. + + // Bind new buffer to new memory region. Data contained in it is already moved. + VmaAllocationInfo allocInfo; + vmaGetAllocationInfo(allocator, allocations[i], &allocInfo); + vmaBindBufferMemory(allocator, allocations[i], buffers[i]); + } +} +\endcode + +You can combine these two methods by specifying non-zero `maxGpu*` as well as `maxCpu*` parameters. +The library automatically chooses best method to defragment each memory pool. + +You may try not to block your entire program to wait until defragmentation finishes, +but do it in the background, as long as you carefully fullfill requirements described +in function vmaDefragmentationBegin(). + +\section defragmentation_additional_notes Additional notes + +It is only legal to defragment allocations bound to: + +- buffers +- images created with `VK_IMAGE_CREATE_ALIAS_BIT`, `VK_IMAGE_TILING_LINEAR`, and + being currently in `VK_IMAGE_LAYOUT_GENERAL` or `VK_IMAGE_LAYOUT_PREINITIALIZED`. + +Defragmentation of images created with `VK_IMAGE_TILING_OPTIMAL` or in any other +layout may give undefined results. + +If you defragment allocations bound to images, new images to be bound to new +memory region after defragmentation should be created with `VK_IMAGE_LAYOUT_PREINITIALIZED` +and then transitioned to their original layout from before defragmentation if +needed using an image memory barrier. + +While using defragmentation, you may experience validation layer warnings, which you just need to ignore. +See [Validation layer warnings](@ref general_considerations_validation_layer_warnings). + +Please don't expect memory to be fully compacted after defragmentation. +Algorithms inside are based on some heuristics that try to maximize number of Vulkan +memory blocks to make totally empty to release them, as well as to maximimze continuous +empty space inside remaining blocks, while minimizing the number and size of allocations that +need to be moved. Some fragmentation may still remain - this is normal. + +\section defragmentation_custom_algorithm Writing custom defragmentation algorithm + +If you want to implement your own, custom defragmentation algorithm, +there is infrastructure prepared for that, +but it is not exposed through the library API - you need to hack its source code. +Here are steps needed to do this: + +-# Main thing you need to do is to define your own class derived from base abstract + class `VmaDefragmentationAlgorithm` and implement your version of its pure virtual methods. + See definition and comments of this class for details. +-# Your code needs to interact with device memory block metadata. + If you need more access to its data than it's provided by its public interface, + declare your new class as a friend class e.g. in class `VmaBlockMetadata_Generic`. +-# If you want to create a flag that would enable your algorithm or pass some additional + flags to configure it, add them to `VmaDefragmentationFlagBits` and use them in + VmaDefragmentationInfo2::flags. +-# Modify function `VmaBlockVectorDefragmentationContext::Begin` to create object + of your new class whenever needed. + + +\page lost_allocations Lost allocations + +If your game oversubscribes video memory, if may work OK in previous-generation +graphics APIs (DirectX 9, 10, 11, OpenGL) because resources are automatically +paged to system RAM. In Vulkan you can't do it because when you run out of +memory, an allocation just fails. If you have more data (e.g. textures) that can +fit into VRAM and you don't need it all at once, you may want to upload them to +GPU on demand and "push out" ones that are not used for a long time to make room +for the new ones, effectively using VRAM (or a cartain memory pool) as a form of +cache. Vulkan Memory Allocator can help you with that by supporting a concept of +"lost allocations". + +To create an allocation that can become lost, include #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT +flag in VmaAllocationCreateInfo::flags. Before using a buffer or image bound to +such allocation in every new frame, you need to query it if it's not lost. +To check it, call vmaTouchAllocation(). +If the allocation is lost, you should not use it or buffer/image bound to it. +You mustn't forget to destroy this allocation and this buffer/image. +vmaGetAllocationInfo() can also be used for checking status of the allocation. +Allocation is lost when returned VmaAllocationInfo::deviceMemory == `VK_NULL_HANDLE`. + +To create an allocation that can make some other allocations lost to make room +for it, use #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag. You will +usually use both flags #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT and +#VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT at the same time. + +Warning! Current implementation uses quite naive, brute force algorithm, +which can make allocation calls that use #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT +flag quite slow. A new, more optimal algorithm and data structure to speed this +up is planned for the future. + +Q: When interleaving creation of new allocations with usage of existing ones, +how do you make sure that an allocation won't become lost while it's used in the +current frame? + +It is ensured because vmaTouchAllocation() / vmaGetAllocationInfo() not only returns allocation +status/parameters and checks whether it's not lost, but when it's not, it also +atomically marks it as used in the current frame, which makes it impossible to +become lost in that frame. It uses lockless algorithm, so it works fast and +doesn't involve locking any internal mutex. + +Q: What if my allocation may still be in use by the GPU when it's rendering a +previous frame while I already submit new frame on the CPU? + +You can make sure that allocations "touched" by vmaTouchAllocation() / vmaGetAllocationInfo() will not +become lost for a number of additional frames back from the current one by +specifying this number as VmaAllocatorCreateInfo::frameInUseCount (for default +memory pool) and VmaPoolCreateInfo::frameInUseCount (for custom pool). + +Q: How do you inform the library when new frame starts? + +You need to call function vmaSetCurrentFrameIndex(). + +Example code: + +\code +struct MyBuffer +{ + VkBuffer m_Buf = nullptr; + VmaAllocation m_Alloc = nullptr; + + // Called when the buffer is really needed in the current frame. + void EnsureBuffer(); +}; + +void MyBuffer::EnsureBuffer() +{ + // Buffer has been created. + if(m_Buf != VK_NULL_HANDLE) + { + // Check if its allocation is not lost + mark it as used in current frame. + if(vmaTouchAllocation(allocator, m_Alloc)) + { + // It's all OK - safe to use m_Buf. + return; + } + } + + // Buffer not yet exists or lost - destroy and recreate it. + + vmaDestroyBuffer(allocator, m_Buf, m_Alloc); + + VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; + bufCreateInfo.size = 1024; + bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + + VmaAllocationCreateInfo allocCreateInfo = {}; + allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; + allocCreateInfo.flags = VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT | + VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT; + + vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &m_Buf, &m_Alloc, nullptr); +} +\endcode + +When using lost allocations, you may see some Vulkan validation layer warnings +about overlapping regions of memory bound to different kinds of buffers and +images. This is still valid as long as you implement proper handling of lost +allocations (like in the example above) and don't use them. + +You can create an allocation that is already in lost state from the beginning using function +vmaCreateLostAllocation(). It may be useful if you need a "dummy" allocation that is not null. + +You can call function vmaMakePoolAllocationsLost() to set all eligible allocations +in a specified custom pool to lost state. +Allocations that have been "touched" in current frame or VmaPoolCreateInfo::frameInUseCount frames back +cannot become lost. + +Q: Can I touch allocation that cannot become lost? + +Yes, although it has no visible effect. +Calls to vmaGetAllocationInfo() and vmaTouchAllocation() update last use frame index +also for allocations that cannot become lost, but the only way to observe it is to dump +internal allocator state using vmaBuildStatsString(). +You can use this feature for debugging purposes to explicitly mark allocations that you use +in current frame and then analyze JSON dump to see for how long each allocation stays unused. + + +\page statistics Statistics + +This library contains functions that return information about its internal state, +especially the amount of memory allocated from Vulkan. +Please keep in mind that these functions need to traverse all internal data structures +to gather these information, so they may be quite time-consuming. +Don't call them too often. + +\section statistics_numeric_statistics Numeric statistics + +You can query for overall statistics of the allocator using function vmaCalculateStats(). +Information are returned using structure #VmaStats. +It contains #VmaStatInfo - number of allocated blocks, number of allocations +(occupied ranges in these blocks), number of unused (free) ranges in these blocks, +number of bytes used and unused (but still allocated from Vulkan) and other information. +They are summed across memory heaps, memory types and total for whole allocator. + +You can query for statistics of a custom pool using function vmaGetPoolStats(). +Information are returned using structure #VmaPoolStats. + +You can query for information about specific allocation using function vmaGetAllocationInfo(). +It fill structure #VmaAllocationInfo. + +\section statistics_json_dump JSON dump + +You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString(). +The result is guaranteed to be correct JSON. +It uses ANSI encoding. +Any strings provided by user (see [Allocation names](@ref allocation_names)) +are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding, +this JSON string can be treated as using this encoding. +It must be freed using function vmaFreeStatsString(). + +The format of this JSON string is not part of official documentation of the library, +but it will not change in backward-incompatible way without increasing library major version number +and appropriate mention in changelog. + +The JSON string contains all the data that can be obtained using vmaCalculateStats(). +It can also contain detailed map of allocated memory blocks and their regions - +free and occupied by allocations. +This allows e.g. to visualize the memory or assess fragmentation. + + +\page allocation_annotation Allocation names and user data + +\section allocation_user_data Allocation user data + +You can annotate allocations with your own information, e.g. for debugging purposes. +To do that, fill VmaAllocationCreateInfo::pUserData field when creating +an allocation. It's an opaque `void*` pointer. You can use it e.g. as a pointer, +some handle, index, key, ordinal number or any other value that would associate +the allocation with your custom metadata. + +\code +VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +// Fill bufferInfo... + +MyBufferMetadata* pMetadata = CreateBufferMetadata(); + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; +allocCreateInfo.pUserData = pMetadata; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocCreateInfo, &buffer, &allocation, nullptr); +\endcode + +The pointer may be later retrieved as VmaAllocationInfo::pUserData: + +\code +VmaAllocationInfo allocInfo; +vmaGetAllocationInfo(allocator, allocation, &allocInfo); +MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData; +\endcode + +It can also be changed using function vmaSetAllocationUserData(). + +Values of (non-zero) allocations' `pUserData` are printed in JSON report created by +vmaBuildStatsString(), in hexadecimal form. + +\section allocation_names Allocation names + +There is alternative mode available where `pUserData` pointer is used to point to +a null-terminated string, giving a name to the allocation. To use this mode, +set #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT flag in VmaAllocationCreateInfo::flags. +Then `pUserData` passed as VmaAllocationCreateInfo::pUserData or argument to +vmaSetAllocationUserData() must be either null or pointer to a null-terminated string. +The library creates internal copy of the string, so the pointer you pass doesn't need +to be valid for whole lifetime of the allocation. You can free it after the call. + +\code +VkImageCreateInfo imageInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +// Fill imageInfo... + +std::string imageName = "Texture: "; +imageName += fileName; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_GPU_ONLY; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT; +allocCreateInfo.pUserData = imageName.c_str(); + +VkImage image; +VmaAllocation allocation; +vmaCreateImage(allocator, &imageInfo, &allocCreateInfo, &image, &allocation, nullptr); +\endcode + +The value of `pUserData` pointer of the allocation will be different than the one +you passed when setting allocation's name - pointing to a buffer managed +internally that holds copy of the string. + +\code +VmaAllocationInfo allocInfo; +vmaGetAllocationInfo(allocator, allocation, &allocInfo); +const char* imageName = (const char*)allocInfo.pUserData; +printf("Image name: %s\n", imageName); +\endcode + +That string is also printed in JSON report created by vmaBuildStatsString(). + +\note Passing string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it. +You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library. + + +\page debugging_memory_usage Debugging incorrect memory usage + +If you suspect a bug with memory usage, like usage of uninitialized memory or +memory being overwritten out of bounds of an allocation, +you can use debug features of this library to verify this. + +\section debugging_memory_usage_initialization Memory initialization + +If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used, +you can enable automatic memory initialization to verify this. +To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1. + +\code +#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1 +#include "vk_mem_alloc.h" +\endcode + +It makes memory of all new allocations initialized to bit pattern `0xDCDCDCDC`. +Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`. +Memory is automatically mapped and unmapped if necessary. + +If you find these values while debugging your program, good chances are that you incorrectly +read Vulkan memory that is allocated but not initialized, or already freed, respectively. + +Memory initialization works only with memory types that are `HOST_VISIBLE`. +It works also with dedicated allocations. +It doesn't work with allocations created with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag, +as they cannot be mapped. + +\section debugging_memory_usage_margins Margins + +By default, allocations are laid out in memory blocks next to each other if possible +(considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`). + +![Allocations without margin](../gfx/Margins_1.png) + +Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified +number of bytes as a margin before and after every allocation. + +\code +#define VMA_DEBUG_MARGIN 16 +#include "vk_mem_alloc.h" +\endcode + +![Allocations with margin](../gfx/Margins_2.png) + +If your bug goes away after enabling margins, it means it may be caused by memory +being overwritten outside of allocation boundaries. It is not 100% certain though. +Change in application behavior may also be caused by different order and distribution +of allocations across memory blocks after margins are applied. + +The margin is applied also before first and after last allocation in a block. +It may occur only once between two adjacent allocations. + +Margins work with all types of memory. + +Margin is applied only to allocations made out of memory blocks and not to dedicated +allocations, which have their own memory block of specific size. +It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag +or those automatically decided to put into dedicated allocations, e.g. due to its +large size or recommended by VK_KHR_dedicated_allocation extension. +Margins are also not active in custom pools created with #VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT flag. + +Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space. + +Note that enabling margins increases memory usage and fragmentation. + +\section debugging_memory_usage_corruption_detection Corruption detection + +You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation +of contents of the margins. + +\code +#define VMA_DEBUG_MARGIN 16 +#define VMA_DEBUG_DETECT_CORRUPTION 1 +#include "vk_mem_alloc.h" +\endcode + +When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN` +(it must be multiply of 4) before and after every allocation is filled with a magic number. +This idea is also know as "canary". +Memory is automatically mapped and unmapped if necessary. + +This number is validated automatically when the allocation is destroyed. +If it's not equal to the expected value, `VMA_ASSERT()` is executed. +It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation, +which indicates a serious bug. + +You can also explicitly request checking margins of all allocations in all memory blocks +that belong to specified memory types by using function vmaCheckCorruption(), +or in memory blocks that belong to specified custom pool, by using function +vmaCheckPoolCorruption(). + +Margin validation (corruption detection) works only for memory types that are +`HOST_VISIBLE` and `HOST_COHERENT`. + + +\page record_and_replay Record and replay + +\section record_and_replay_introduction Introduction + +While using the library, sequence of calls to its functions together with their +parameters can be recorded to a file and later replayed using standalone player +application. It can be useful to: + +- Test correctness - check if same sequence of calls will not cause crash or + failures on a target platform. +- Gather statistics - see number of allocations, peak memory usage, number of + calls etc. +- Benchmark performance - see how much time it takes to replay the whole + sequence. + +\section record_and_replay_usage Usage + +Recording functionality is disabled by default. +To enable it, define following macro before every include of this library: + +\code +#define VMA_RECORDING_ENABLED 1 +\endcode + +To record sequence of calls to a file: Fill in +VmaAllocatorCreateInfo::pRecordSettings member while creating #VmaAllocator +object. File is opened and written during whole lifetime of the allocator. + +To replay file: Use VmaReplay - standalone command-line program. +Precompiled binary can be found in "bin" directory. +Its source can be found in "src/VmaReplay" directory. +Its project is generated by Premake. +Command line syntax is printed when the program is launched without parameters. +Basic usage: + + VmaReplay.exe MyRecording.csv + +Documentation of file format can be found in file: "docs/Recording file format.md". +It's a human-readable, text file in CSV format (Comma Separated Values). + +\section record_and_replay_additional_considerations Additional considerations + +- Replaying file that was recorded on a different GPU (with different parameters + like `bufferImageGranularity`, `nonCoherentAtomSize`, and especially different + set of memory heaps and types) may give different performance and memory usage + results, as well as issue some warnings and errors. +- Current implementation of recording in VMA, as well as VmaReplay application, is + coded and tested only on Windows. Inclusion of recording code is driven by + `VMA_RECORDING_ENABLED` macro. Support for other platforms should be easy to + add. Contributions are welcomed. + + +\page usage_patterns Recommended usage patterns + +See also slides from talk: +[Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New) + + +\section usage_patterns_common_mistakes Common mistakes + +Use of CPU_TO_GPU instead of CPU_ONLY memory + +#VMA_MEMORY_USAGE_CPU_TO_GPU is recommended only for resources that will be +mapped and written by the CPU, as well as read directly by the GPU - like some +buffers or textures updated every frame (dynamic). If you create a staging copy +of a resource to be written by CPU and then used as a source of transfer to +another resource placed in the GPU memory, that staging resource should be +created with #VMA_MEMORY_USAGE_CPU_ONLY. Please read the descriptions of these +enums carefully for details. + +Unnecessary use of custom pools + +\ref custom_memory_pools may be useful for special purposes - when you want to +keep certain type of resources separate e.g. to reserve minimum amount of memory +for them, limit maximum amount of memory they can occupy, or make some of them +push out the other through the mechanism of \ref lost_allocations. For most +resources this is not needed and so it is not recommended to create #VmaPool +objects and allocations out of them. Allocating from the default pool is sufficient. + +\section usage_patterns_simple Simple patterns + +\subsection usage_patterns_simple_render_targets Render targets + +When: +Any resources that you frequently write and read on GPU, +e.g. images used as color attachments (aka "render targets"), depth-stencil attachments, +images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)"). + +What to do: +Create them in video memory that is fastest to access from GPU using +#VMA_MEMORY_USAGE_GPU_ONLY. + +Consider using [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension +and/or manually creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT, +especially if they are large or if you plan to destroy and recreate them e.g. when +display resolution changes. +Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later. + +\subsection usage_patterns_simple_immutable_resources Immutable resources + +When: +Any resources that you fill on CPU only once (aka "immutable") or infrequently +and then read frequently on GPU, +e.g. textures, vertex and index buffers, constant buffers that don't change often. + +What to do: +Create them in video memory that is fastest to access from GPU using +#VMA_MEMORY_USAGE_GPU_ONLY. + +To initialize content of such resource, create a CPU-side (aka "staging") copy of it +in system memory - #VMA_MEMORY_USAGE_CPU_ONLY, map it, fill it, +and submit a transfer from it to the GPU resource. +You can keep the staging copy if you need it for another upload transfer in the future. +If you don't, you can destroy it or reuse this buffer for uploading different resource +after the transfer finishes. + +Prefer to create just buffers in system memory rather than images, even for uploading textures. +Use `vkCmdCopyBufferToImage()`. +Dont use images with `VK_IMAGE_TILING_LINEAR`. + +\subsection usage_patterns_dynamic_resources Dynamic resources + +When: +Any resources that change frequently (aka "dynamic"), e.g. every frame or every draw call, +written on CPU, read on GPU. + +What to do: +Create them using #VMA_MEMORY_USAGE_CPU_TO_GPU. +You can map it and write to it directly on CPU, as well as read from it on GPU. + +This is a more complex situation. Different solutions are possible, +and the best one depends on specific GPU type, but you can use this simple approach for the start. +Prefer to write to such resource sequentially (e.g. using `memcpy`). +Don't perform random access or any reads from it on CPU, as it may be very slow. +Also note that textures written directly from the host through a mapped pointer need to be in LINEAR not OPTIMAL layout. + +\subsection usage_patterns_readback Readback + +When: +Resources that contain data written by GPU that you want to read back on CPU, +e.g. results of some computations. + +What to do: +Create them using #VMA_MEMORY_USAGE_GPU_TO_CPU. +You can write to them directly on GPU, as well as map and read them on CPU. + +\section usage_patterns_advanced Advanced patterns + +\subsection usage_patterns_integrated_graphics Detecting integrated graphics + +You can support integrated graphics (like Intel HD Graphics, AMD APU) better +by detecting it in Vulkan. +To do it, call `vkGetPhysicalDeviceProperties()`, inspect +`VkPhysicalDeviceProperties::deviceType` and look for `VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU`. +When you find it, you can assume that memory is unified and all memory types are comparably fast +to access from GPU, regardless of `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + +You can then sum up sizes of all available memory heaps and treat them as useful for +your GPU resources, instead of only `DEVICE_LOCAL` ones. +You can also prefer to create your resources in memory types that are `HOST_VISIBLE` to map them +directly instead of submitting explicit transfer (see below). + +\subsection usage_patterns_direct_vs_transfer Direct access versus transfer + +For resources that you frequently write on CPU and read on GPU, many solutions are possible: + +-# Create one copy in video memory using #VMA_MEMORY_USAGE_GPU_ONLY, + second copy in system memory using #VMA_MEMORY_USAGE_CPU_ONLY and submit explicit transfer each time. +-# Create just a single copy using #VMA_MEMORY_USAGE_CPU_TO_GPU, map it and fill it on CPU, + read it directly on GPU. +-# Create just a single copy using #VMA_MEMORY_USAGE_CPU_ONLY, map it and fill it on CPU, + read it directly on GPU. + +Which solution is the most efficient depends on your resource and especially on the GPU. +It is best to measure it and then make the decision. +Some general recommendations: + +- On integrated graphics use (2) or (3) to avoid unnecesary time and memory overhead + related to using a second copy and making transfer. +- For small resources (e.g. constant buffers) use (2). + Discrete AMD cards have special 256 MiB pool of video memory that is directly mappable. + Even if the resource ends up in system memory, its data may be cached on GPU after first + fetch over PCIe bus. +- For larger resources (e.g. textures), decide between (1) and (2). + You may want to differentiate NVIDIA and AMD, e.g. by looking for memory type that is + both `DEVICE_LOCAL` and `HOST_VISIBLE`. When you find it, use (2), otherwise use (1). + +Similarly, for resources that you frequently write on GPU and read on CPU, multiple +solutions are possible: + +-# Create one copy in video memory using #VMA_MEMORY_USAGE_GPU_ONLY, + second copy in system memory using #VMA_MEMORY_USAGE_GPU_TO_CPU and submit explicit tranfer each time. +-# Create just single copy using #VMA_MEMORY_USAGE_GPU_TO_CPU, write to it directly on GPU, + map it and read it on CPU. + +You should take some measurements to decide which option is faster in case of your specific +resource. + +Note that textures accessed directly from the host through a mapped pointer need to be in LINEAR layout, +which may slow down their usage on the device. +Textures accessed only by the device and transfer operations can use OPTIMAL layout. + +If you don't want to specialize your code for specific types of GPUs, you can still make +an simple optimization for cases when your resource ends up in mappable memory to use it +directly in this case instead of creating CPU-side staging copy. +For details see [Finding out if memory is mappable](@ref memory_mapping_finding_if_memory_mappable). + + +\page configuration Configuration + +Please check "CONFIGURATION SECTION" in the code to find macros that you can define +before each include of this file or change directly in this file to provide +your own implementation of basic facilities like assert, `min()` and `max()` functions, +mutex, atomic etc. +The library uses its own implementation of containers by default, but you can switch to using +STL containers instead. + +For example, define `VMA_ASSERT(expr)` before including the library to provide +custom implementation of the assertion, compatible with your project. +By default it is defined to standard C `assert(expr)` in `_DEBUG` configuration +and empty otherwise. + +\section config_Vulkan_functions Pointers to Vulkan functions + +There are multiple ways to import pointers to Vulkan functions in the library. +In the simplest case you don't need to do anything. +If the compilation or linking of your program or the initialization of the #VmaAllocator +doesn't work for you, you can try to reconfigure it. + +First, the allocator tries to fetch pointers to Vulkan functions linked statically, +like this: + +\code +m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory; +\endcode + +If you want to disable this feature, set configuration macro: `#define VMA_STATIC_VULKAN_FUNCTIONS 0`. + +Second, you can provide the pointers yourself by setting member VmaAllocatorCreateInfo::pVulkanFunctions. +You can fetch them e.g. using functions `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` or +by using a helper library like [volk](https://github.com/zeux/volk). + +Third, VMA tries to fetch remaining pointers that are still null by calling +`vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` on its own. +If you want to disable this feature, set configuration macro: `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0`. + +Finally, all the function pointers required by the library (considering selected +Vulkan version and enabled extensions) are checked with `VMA_ASSERT` if they are not null. + + +\section custom_memory_allocator Custom host memory allocator + +If you use custom allocator for CPU memory rather than default operator `new` +and `delete` from C++, you can make this library using your allocator as well +by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These +functions will be passed to Vulkan, as well as used by the library itself to +make any CPU-side allocations. + +\section allocation_callbacks Device memory allocation callbacks + +The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally. +You can setup callbacks to be informed about these calls, e.g. for the purpose +of gathering some statistics. To do it, fill optional member +VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. + +\section heap_memory_limit Device heap memory limit + +When device memory of certain heap runs out of free space, new allocations may +fail (returning error code) or they may succeed, silently pushing some existing +memory blocks from GPU VRAM to system RAM (which degrades performance). This +behavior is implementation-dependent - it depends on GPU vendor and graphics +driver. + +On AMD cards it can be controlled while creating Vulkan device object by using +VK_AMD_memory_overallocation_behavior extension, if available. + +Alternatively, if you want to test how your program behaves with limited amount of Vulkan device +memory available without switching your graphics card to one that really has +smaller VRAM, you can use a feature of this library intended for this purpose. +To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit. + + + +\page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation + +VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve +performance on some GPUs. It augments Vulkan API with possibility to query +driver whether it prefers particular buffer or image to have its own, dedicated +allocation (separate `VkDeviceMemory` block) for better efficiency - to be able +to do some internal optimizations. + +The extension is supported by this library. It will be used automatically when +enabled. To enable it: + +1 . When creating Vulkan device, check if following 2 device extensions are +supported (call `vkEnumerateDeviceExtensionProperties()`). +If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`). + +- VK_KHR_get_memory_requirements2 +- VK_KHR_dedicated_allocation + +If you enabled these extensions: + +2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating +your #VmaAllocator`to inform the library that you enabled required extensions +and you want the library to use them. + +\code +allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT; + +vmaCreateAllocator(&allocatorInfo, &allocator); +\endcode + +That's all. The extension will be automatically used whenever you create a +buffer using vmaCreateBuffer() or image using vmaCreateImage(). + +When using the extension together with Vulkan Validation Layer, you will receive +warnings like this: + + vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer. + +It is OK, you should just ignore it. It happens because you use function +`vkGetBufferMemoryRequirements2KHR()` instead of standard +`vkGetBufferMemoryRequirements()`, while the validation layer seems to be +unaware of it. + +To learn more about this extension, see: + +- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap44.html#VK_KHR_dedicated_allocation) +- [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5) + + + +\page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory + +VK_AMD_device_coherent_memory is a device extension that enables access to +additional memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and +`VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flag. It is useful mostly for +allocation of buffers intended for writing "breadcrumb markers" in between passes +or draw calls, which in turn are useful for debugging GPU crash/hang/TDR cases. + +When the extension is available but has not been enabled, Vulkan physical device +still exposes those memory types, but their usage is forbidden. VMA automatically +takes care of that - it returns `VK_ERROR_FEATURE_NOT_PRESENT` when an attempt +to allocate memory of such type is made. + +If you want to use this extension in connection with VMA, follow these steps: + +\section vk_amd_device_coherent_memory_initialization Initialization + +1) Call `vkEnumerateDeviceExtensionProperties` for the physical device. +Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_AMD_device_coherent_memory". + +2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. +Attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` to be returned. +Check if the device feature is really supported - check if `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true. + +3) While creating device with `vkCreateDevice`, enable this extension - add "VK_AMD_device_coherent_memory" +to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. + +4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. +Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. +Enable this device feature - attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to +`VkPhysicalDeviceFeatures2::pNext` and set its member `deviceCoherentMemory` to `VK_TRUE`. + +5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you +have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT +to VmaAllocatorCreateInfo::flags. + +\section vk_amd_device_coherent_memory_usage Usage + +After following steps described above, you can create VMA allocations and custom pools +out of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligible +devices. There are multiple ways to do it, for example: + +- You can request or prefer to allocate out of such memory types by adding + `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags + or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with + other ways of \ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage. +- If you manually found memory type index to use for this purpose, force allocation + from this specific index by setting VmaAllocationCreateInfo::memoryTypeBits `= 1u << index`. + +\section vk_amd_device_coherent_memory_more_information More information + +To learn more about this extension, see [VK_AMD_device_coherent_memory in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap44.html#VK_AMD_device_coherent_memory) + +Example use of this extension can be found in the code of the sample and test suite +accompanying this library. + + +\page enabling_buffer_device_address Enabling buffer device address + +Device extension VK_KHR_buffer_device_address +allow to fetch raw GPU pointer to a buffer and pass it for usage in a shader code. +It is promoted to core Vulkan 1.2. + +If you want to use this feature in connection with VMA, follow these steps: + +\section enabling_buffer_device_address_initialization Initialization + +1) (For Vulkan version < 1.2) Call `vkEnumerateDeviceExtensionProperties` for the physical device. +Check if the extension is supported - if returned array of `VkExtensionProperties` contains +"VK_KHR_buffer_device_address". + +2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. +Attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to `VkPhysicalDeviceFeatures2::pNext` to be returned. +Check if the device feature is really supported - check if `VkPhysicalDeviceBufferDeviceAddressFeatures*::bufferDeviceAddress` is true. + +3) (For Vulkan version < 1.2) While creating device with `vkCreateDevice`, enable this extension - add +"VK_KHR_buffer_device_address" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. + +4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. +Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. +Enable this device feature - attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to +`VkPhysicalDeviceFeatures2::pNext` and set its member `bufferDeviceAddress` to `VK_TRUE`. + +5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you +have enabled this feature - add #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT +to VmaAllocatorCreateInfo::flags. + +\section enabling_buffer_device_address_usage Usage + +After following steps described above, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*` using VMA. +The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT*` to +allocated memory blocks wherever it might be needed. + +Please note that the library supports only `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*`. +The second part of this functionality related to "capture and replay" is not supported, +as it is intended for usage in debugging tools like RenderDoc, not in everyday Vulkan usage. + +\section enabling_buffer_device_address_more_information More information + +To learn more about this extension, see [VK_KHR_buffer_device_address in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap46.html#VK_KHR_buffer_device_address) + +Example use of this extension can be found in the code of the sample and test suite +accompanying this library. + +\page general_considerations General considerations + +\section general_considerations_thread_safety Thread safety + +- The library has no global state, so separate #VmaAllocator objects can be used + independently. + There should be no need to create multiple such objects though - one per `VkDevice` is enough. +- By default, all calls to functions that take #VmaAllocator as first parameter + are safe to call from multiple threads simultaneously because they are + synchronized internally when needed. +- When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT + flag, calls to functions that take such #VmaAllocator object must be + synchronized externally. +- Access to a #VmaAllocation object must be externally synchronized. For example, + you must not call vmaGetAllocationInfo() and vmaMapMemory() from different + threads at the same time if you pass the same #VmaAllocation object to these + functions. + +\section general_considerations_validation_layer_warnings Validation layer warnings + +When using this library, you can meet following types of warnings issued by +Vulkan validation layer. They don't necessarily indicate a bug, so you may need +to just ignore them. + +- *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.* + - It happens when VK_KHR_dedicated_allocation extension is enabled. + `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it. +- *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.* + - It happens when you map a buffer or image, because the library maps entire + `VkDeviceMemory` block, where different types of images and buffers may end + up together, especially on GPUs with unified memory like Intel. +- *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.* + - It happens when you use lost allocations, and a new image or buffer is + created in place of an existing object that bacame lost. + - It may happen also when you use [defragmentation](@ref defragmentation). + +\section general_considerations_allocation_algorithm Allocation algorithm + +The library uses following algorithm for allocation, in order: + +-# Try to find free range of memory in existing blocks. +-# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size. +-# If failed, try to create such block with size/2, size/4, size/8. +-# If failed and #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag was + specified, try to find space in existing blocks, possilby making some other + allocations lost. +-# If failed, try to allocate separate `VkDeviceMemory` for this allocation, + just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +-# If failed, choose other memory type that meets the requirements specified in + VmaAllocationCreateInfo and go to point 1. +-# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + +\section general_considerations_features_not_supported Features not supported + +Features deliberately excluded from the scope of this library: + +- Data transfer. Uploading (straming) and downloading data of buffers and images + between CPU and GPU memory and related synchronization is responsibility of the user. + Defining some "texture" object that would automatically stream its data from a + staging copy in CPU memory to GPU memory would rather be a feature of another, + higher-level library implemented on top of VMA. +- Allocations for imported/exported external memory. They tend to require + explicit memory type index and dedicated allocation anyway, so they don't + interact with main features of this library. Such special purpose allocations + should be made manually, using `vkCreateBuffer()` and `vkAllocateMemory()`. +- Sub-allocation of parts of one large buffer. Although recommended as a good practice, + it is the user's responsibility to implement such logic on top of VMA. +- Recreation of buffers and images. Although the library has functions for + buffer and image creation (vmaCreateBuffer(), vmaCreateImage()), you need to + recreate these objects yourself after defragmentation. That's because the big + structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in + #VmaAllocation object. +- Handling CPU memory allocation failures. When dynamically creating small C++ + objects in CPU memory (not Vulkan memory), allocation failures are not checked + and handled gracefully, because that would complicate code significantly and + is usually not needed in desktop PC applications anyway. + Success of an allocation is just checked with an assert. +- Code free of any compiler warnings. Maintaining the library to compile and + work correctly on so many different platforms is hard enough. Being free of + any warnings, on any version of any compiler, is simply not feasible. +- This is a C++ library with C interface. + Bindings or ports to any other programming languages are welcomed as external projects and + are not going to be included into this repository. + +*/ + +#ifdef __cplusplus +extern "C" { +#endif + +/* +Define this macro to 0/1 to disable/enable support for recording functionality, +available through VmaAllocatorCreateInfo::pRecordSettings. +*/ +#ifndef VMA_RECORDING_ENABLED + #define VMA_RECORDING_ENABLED 0 +#endif + +#if !defined(NOMINMAX) && defined(VMA_IMPLEMENTATION) + #define NOMINMAX // For windows.h +#endif + +#if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS + extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; + extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; + extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; + extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; + extern PFN_vkAllocateMemory vkAllocateMemory; + extern PFN_vkFreeMemory vkFreeMemory; + extern PFN_vkMapMemory vkMapMemory; + extern PFN_vkUnmapMemory vkUnmapMemory; + extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; + extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; + extern PFN_vkBindBufferMemory vkBindBufferMemory; + extern PFN_vkBindImageMemory vkBindImageMemory; + extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; + extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; + extern PFN_vkCreateBuffer vkCreateBuffer; + extern PFN_vkDestroyBuffer vkDestroyBuffer; + extern PFN_vkCreateImage vkCreateImage; + extern PFN_vkDestroyImage vkDestroyImage; + extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer; + #if VMA_VULKAN_VERSION >= 1001000 + extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2; + extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2; + extern PFN_vkBindBufferMemory2 vkBindBufferMemory2; + extern PFN_vkBindImageMemory2 vkBindImageMemory2; + extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2; + #endif // #if VMA_VULKAN_VERSION >= 1001000 +#endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES + +#ifndef VULKAN_H_ + #include +#endif + +// Define this macro to declare maximum supported Vulkan version in format AAABBBCCC, +// where AAA = major, BBB = minor, CCC = patch. +// If you want to use version > 1.0, it still needs to be enabled via VmaAllocatorCreateInfo::vulkanApiVersion. +#if !defined(VMA_VULKAN_VERSION) + #if defined(VK_VERSION_1_2) + #define VMA_VULKAN_VERSION 1002000 + #elif defined(VK_VERSION_1_1) + #define VMA_VULKAN_VERSION 1001000 + #else + #define VMA_VULKAN_VERSION 1000000 + #endif +#endif + +#if !defined(VMA_DEDICATED_ALLOCATION) + #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation + #define VMA_DEDICATED_ALLOCATION 1 + #else + #define VMA_DEDICATED_ALLOCATION 0 + #endif +#endif + +#if !defined(VMA_BIND_MEMORY2) + #if VK_KHR_bind_memory2 + #define VMA_BIND_MEMORY2 1 + #else + #define VMA_BIND_MEMORY2 0 + #endif +#endif + +#if !defined(VMA_MEMORY_BUDGET) + #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000) + #define VMA_MEMORY_BUDGET 1 + #else + #define VMA_MEMORY_BUDGET 0 + #endif +#endif + +// Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers. +#if !defined(VMA_BUFFER_DEVICE_ADDRESS) + #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000 + #define VMA_BUFFER_DEVICE_ADDRESS 1 + #else + #define VMA_BUFFER_DEVICE_ADDRESS 0 + #endif +#endif + +// Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers. +#if !defined(VMA_MEMORY_PRIORITY) + #if VK_EXT_memory_priority + #define VMA_MEMORY_PRIORITY 1 + #else + #define VMA_MEMORY_PRIORITY 0 + #endif +#endif + +// Define these macros to decorate all public functions with additional code, +// before and after returned type, appropriately. This may be useful for +// exporting the functions when compiling VMA as a separate library. Example: +// #define VMA_CALL_PRE __declspec(dllexport) +// #define VMA_CALL_POST __cdecl +#ifndef VMA_CALL_PRE + #define VMA_CALL_PRE +#endif +#ifndef VMA_CALL_POST + #define VMA_CALL_POST +#endif + +// Define this macro to decorate pointers with an attribute specifying the +// length of the array they point to if they are not null. +// +// The length may be one of +// - The name of another parameter in the argument list where the pointer is declared +// - The name of another member in the struct where the pointer is declared +// - The name of a member of a struct type, meaning the value of that member in +// the context of the call. For example +// VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"), +// this means the number of memory heaps available in the device associated +// with the VmaAllocator being dealt with. +#ifndef VMA_LEN_IF_NOT_NULL + #define VMA_LEN_IF_NOT_NULL(len) +#endif + +// The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang. +// see: https://clang.llvm.org/docs/AttributeReference.html#nullable +#ifndef VMA_NULLABLE + #ifdef __clang__ + #define VMA_NULLABLE _Nullable + #else + #define VMA_NULLABLE + #endif +#endif + +// The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang. +// see: https://clang.llvm.org/docs/AttributeReference.html#nonnull +#ifndef VMA_NOT_NULL + #ifdef __clang__ + #define VMA_NOT_NULL _Nonnull + #else + #define VMA_NOT_NULL + #endif +#endif + +// If non-dispatchable handles are represented as pointers then we can give +// then nullability annotations +#ifndef VMA_NOT_NULL_NON_DISPATCHABLE + #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL + #else + #define VMA_NOT_NULL_NON_DISPATCHABLE + #endif +#endif + +#ifndef VMA_NULLABLE_NON_DISPATCHABLE + #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE + #else + #define VMA_NULLABLE_NON_DISPATCHABLE + #endif +#endif + +/** \struct VmaAllocator +\brief Represents main object of this library initialized. + +Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it. +Call function vmaDestroyAllocator() to destroy it. + +It is recommended to create just one object of this type per `VkDevice` object, +right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed. +*/ +VK_DEFINE_HANDLE(VmaAllocator) + +/// Callback function called after successful vkAllocateMemory. +typedef void (VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryType, + VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, + VkDeviceSize size, + void* VMA_NULLABLE pUserData); +/// Callback function called before vkFreeMemory. +typedef void (VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryType, + VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, + VkDeviceSize size, + void* VMA_NULLABLE pUserData); + +/** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`. + +Provided for informative purpose, e.g. to gather statistics about number of +allocations or total amount of memory allocated in Vulkan. + +Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. +*/ +typedef struct VmaDeviceMemoryCallbacks { + /// Optional, can be null. + PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate; + /// Optional, can be null. + PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree; + /// Optional, can be null. + void* VMA_NULLABLE pUserData; +} VmaDeviceMemoryCallbacks; + +/// Flags for created #VmaAllocator. +typedef enum VmaAllocatorCreateFlagBits { + /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you. + + Using this flag may increase performance because internal mutexes are not used. + */ + VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001, + /** \brief Enables usage of VK_KHR_dedicated_allocation extension. + + The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. + When it's `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. + + Using this extenion will automatically allocate dedicated blocks of memory for + some buffers and images instead of suballocating place for them out of bigger + memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT + flag) when it is recommended by the driver. It may improve performance on some + GPUs. + + You may set this flag only if you found out that following device extensions are + supported, you enabled them while creating Vulkan device passed as + VmaAllocatorCreateInfo::device, and you want them to be used internally by this + library: + + - VK_KHR_get_memory_requirements2 (device extension) + - VK_KHR_dedicated_allocation (device extension) + + When this flag is set, you can experience following warnings reported by Vulkan + validation layer. You can ignore them. + + > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer. + */ + VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002, + /** + Enables usage of VK_KHR_bind_memory2 extension. + + The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. + When it's `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. + + You may set this flag only if you found out that this device extension is supported, + you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + and you want it to be used internally by this library. + + The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`, + which allow to pass a chain of `pNext` structures while binding. + This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2(). + */ + VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004, + /** + Enables usage of VK_EXT_memory_budget extension. + + You may set this flag only if you found out that this device extension is supported, + you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + and you want it to be used internally by this library, along with another instance extension + VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted). + + The extension provides query for current memory usage and budget, which will probably + be more accurate than an estimation used by the library otherwise. + */ + VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008, + /** + Enables usage of VK_AMD_device_coherent_memory extension. + + You may set this flag only if you: + + - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device, + - want it to be used internally by this library. + + The extension and accompanying device feature provide access to memory types with + `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags. + They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR. + + When the extension is not enabled, such memory types are still enumerated, but their usage is illegal. + To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type, + returning `VK_ERROR_FEATURE_NOT_PRESENT`. + */ + VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010, + /** + Enables usage of "buffer device address" feature, which allows you to use function + `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader. + + You may set this flag only if you: + + 1. (For Vulkan version < 1.2) Found as available and enabled device extension + VK_KHR_buffer_device_address. + This extension is promoted to core Vulkan 1.2. + 2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`. + + When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA. + The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to + allocated memory blocks wherever it might be needed. + + For more information, see documentation chapter \ref enabling_buffer_device_address. + */ + VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020, + /** + Enables usage of VK_EXT_memory_priority extension in the library. + + You may set this flag only if you found available and enabled this device extension, + along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`, + while creating Vulkan device passed as VmaAllocatorCreateInfo::device. + + When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority + are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored. + + A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. + Larger values are higher priority. The granularity of the priorities is implementation-dependent. + It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`. + The value to be used for default priority is 0.5. + For more details, see the documentation of the VK_EXT_memory_priority extension. + */ + VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 0x00000040, + + VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaAllocatorCreateFlagBits; +typedef VkFlags VmaAllocatorCreateFlags; + +/** \brief Pointers to some Vulkan functions - a subset used by the library. + +Used in VmaAllocatorCreateInfo::pVulkanFunctions. +*/ +typedef struct VmaVulkanFunctions { + PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties; + PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties; + PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory; + PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory; + PFN_vkMapMemory VMA_NULLABLE vkMapMemory; + PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory; + PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges; + PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges; + PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory; + PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory; + PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements; + PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements; + PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer; + PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer; + PFN_vkCreateImage VMA_NULLABLE vkCreateImage; + PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage; + PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer; +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR; + PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR; +#endif +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR; + PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR; +#endif +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR; +#endif +} VmaVulkanFunctions; + +/// Flags to be used in VmaRecordSettings::flags. +typedef enum VmaRecordFlagBits { + /** \brief Enables flush after recording every function call. + + Enable it if you expect your application to crash, which may leave recording file truncated. + It may degrade performance though. + */ + VMA_RECORD_FLUSH_AFTER_CALL_BIT = 0x00000001, + + VMA_RECORD_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaRecordFlagBits; +typedef VkFlags VmaRecordFlags; + +/// Parameters for recording calls to VMA functions. To be used in VmaAllocatorCreateInfo::pRecordSettings. +typedef struct VmaRecordSettings +{ + /// Flags for recording. Use #VmaRecordFlagBits enum. + VmaRecordFlags flags; + /** \brief Path to the file that should be written by the recording. + + Suggested extension: "csv". + If the file already exists, it will be overwritten. + It will be opened for the whole time #VmaAllocator object is alive. + If opening this file fails, creation of the whole allocator object fails. + */ + const char* VMA_NOT_NULL pFilePath; +} VmaRecordSettings; + +/// Description of a Allocator to be created. +typedef struct VmaAllocatorCreateInfo +{ + /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum. + VmaAllocatorCreateFlags flags; + /// Vulkan physical device. + /** It must be valid throughout whole lifetime of created allocator. */ + VkPhysicalDevice VMA_NOT_NULL physicalDevice; + /// Vulkan device. + /** It must be valid throughout whole lifetime of created allocator. */ + VkDevice VMA_NOT_NULL device; + /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional. + /** Set to 0 to use default, which is currently 256 MiB. */ + VkDeviceSize preferredLargeHeapBlockSize; + /// Custom CPU memory allocation callbacks. Optional. + /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */ + const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks; + /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional. + /** Optional, can be null. */ + const VmaDeviceMemoryCallbacks* VMA_NULLABLE pDeviceMemoryCallbacks; + /** \brief Maximum number of additional frames that are in use at the same time as current frame. + + This value is used only when you make allocations with + VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocation cannot become + lost if allocation.lastUseFrameIndex >= allocator.currentFrameIndex - frameInUseCount. + + For example, if you double-buffer your command buffers, so resources used for + rendering in previous frame may still be in use by the GPU at the moment you + allocate resources needed for the current frame, set this value to 1. + + If you want to allow any allocations other than used in the current frame to + become lost, set this value to 0. + */ + uint32_t frameInUseCount; + /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap. + + If not NULL, it must be a pointer to an array of + `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on + maximum number of bytes that can be allocated out of particular Vulkan memory + heap. + + Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that + heap. This is also the default in case of `pHeapSizeLimit` = NULL. + + If there is a limit defined for a heap: + + - If user tries to allocate more memory from that heap using this allocator, + the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the + value of this limit will be reported instead when using vmaGetMemoryProperties(). + + Warning! Using this feature may not be equivalent to installing a GPU with + smaller amount of memory, because graphics driver doesn't necessary fail new + allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is + exceeded. It may return success and just silently migrate some device memory + blocks to system RAM. This driver behavior can also be controlled using + VK_AMD_memory_overallocation_behavior extension. + */ + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit; + + /** \brief Pointers to Vulkan functions. Can be null. + + For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions). + */ + const VmaVulkanFunctions* VMA_NULLABLE pVulkanFunctions; + /** \brief Parameters for recording of VMA calls. Can be null. + + If not null, it enables recording of calls to VMA functions to a file. + If support for recording is not enabled using `VMA_RECORDING_ENABLED` macro, + creation of the allocator object fails with `VK_ERROR_FEATURE_NOT_PRESENT`. + */ + const VmaRecordSettings* VMA_NULLABLE pRecordSettings; + /** \brief Handle to Vulkan instance object. + + Starting from version 3.0.0 this member is no longer optional, it must be set! + */ + VkInstance VMA_NOT_NULL instance; + /** \brief Optional. The highest version of Vulkan that the application is designed to use. + + It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`. + The patch version number specified is ignored. Only the major and minor versions are considered. + It must be less or equal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`. + Only versions 1.0, 1.1, 1.2 are supported by the current implementation. + Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`. + */ + uint32_t vulkanApiVersion; +} VmaAllocatorCreateInfo; + +/// Creates Allocator object. +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( + const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocator VMA_NULLABLE * VMA_NOT_NULL pAllocator); + +/// Destroys allocator object. +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( + VmaAllocator VMA_NULLABLE allocator); + +/** \brief Information about existing #VmaAllocator object. +*/ +typedef struct VmaAllocatorInfo +{ + /** \brief Handle to Vulkan instance object. + + This is the same value as has been passed through VmaAllocatorCreateInfo::instance. + */ + VkInstance VMA_NOT_NULL instance; + /** \brief Handle to Vulkan physical device object. + + This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice. + */ + VkPhysicalDevice VMA_NOT_NULL physicalDevice; + /** \brief Handle to Vulkan device object. + + This is the same value as has been passed through VmaAllocatorCreateInfo::device. + */ + VkDevice VMA_NOT_NULL device; +} VmaAllocatorInfo; + +/** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc. + +It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to +`VkPhysicalDevice`, `VkDevice` etc. every time using this function. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator VMA_NOT_NULL allocator, VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo); + +/** +PhysicalDeviceProperties are fetched from physicalDevice by the allocator. +You can access it here, without fetching it again on your own. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( + VmaAllocator VMA_NOT_NULL allocator, + const VkPhysicalDeviceProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceProperties); + +/** +PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator. +You can access it here, without fetching it again on your own. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( + VmaAllocator VMA_NOT_NULL allocator, + const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE * VMA_NOT_NULL ppPhysicalDeviceMemoryProperties); + +/** +\brief Given Memory Type Index, returns Property Flags of this memory type. + +This is just a convenience function. Same information can be obtained using +vmaGetMemoryProperties(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeIndex, + VkMemoryPropertyFlags* VMA_NOT_NULL pFlags); + +/** \brief Sets index of the current frame. + +This function must be used if you make allocations with +#VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT and +#VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flags to inform the allocator +when a new frame begins. Allocations queried using vmaGetAllocationInfo() cannot +become lost in the current frame. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t frameIndex); + +/** \brief Calculated statistics of memory usage in entire allocator. +*/ +typedef struct VmaStatInfo +{ + /// Number of `VkDeviceMemory` Vulkan memory blocks allocated. + uint32_t blockCount; + /// Number of #VmaAllocation allocation objects allocated. + uint32_t allocationCount; + /// Number of free ranges of memory between allocations. + uint32_t unusedRangeCount; + /// Total number of bytes occupied by all allocations. + VkDeviceSize usedBytes; + /// Total number of bytes occupied by unused ranges. + VkDeviceSize unusedBytes; + VkDeviceSize allocationSizeMin, allocationSizeAvg, allocationSizeMax; + VkDeviceSize unusedRangeSizeMin, unusedRangeSizeAvg, unusedRangeSizeMax; +} VmaStatInfo; + +/// General statistics from current state of Allocator. +typedef struct VmaStats +{ + VmaStatInfo memoryType[VK_MAX_MEMORY_TYPES]; + VmaStatInfo memoryHeap[VK_MAX_MEMORY_HEAPS]; + VmaStatInfo total; +} VmaStats; + +/** \brief Retrieves statistics from current state of the Allocator. + +This function is called "calculate" not "get" because it has to traverse all +internal data structures, so it may be quite slow. For faster but more brief statistics +suitable to be called every frame or every allocation, use vmaGetBudget(). + +Note that when using allocator from multiple threads, returned information may immediately +become outdated. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats( + VmaAllocator VMA_NOT_NULL allocator, + VmaStats* VMA_NOT_NULL pStats); + +/** \brief Statistics of current memory usage and available budget, in bytes, for specific memory heap. +*/ +typedef struct VmaBudget +{ + /** \brief Sum size of all `VkDeviceMemory` blocks allocated from particular heap, in bytes. + */ + VkDeviceSize blockBytes; + + /** \brief Sum size of all allocations created in particular heap, in bytes. + + Usually less or equal than `blockBytes`. + Difference `blockBytes - allocationBytes` is the amount of memory allocated but unused - + available for new allocations or wasted due to fragmentation. + + It might be greater than `blockBytes` if there are some allocations in lost state, as they account + to this value as well. + */ + VkDeviceSize allocationBytes; + + /** \brief Estimated current memory usage of the program, in bytes. + + Fetched from system using `VK_EXT_memory_budget` extension if enabled. + + It might be different than `blockBytes` (usually higher) due to additional implicit objects + also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or + `VkDeviceMemory` blocks allocated outside of this library, if any. + */ + VkDeviceSize usage; + + /** \brief Estimated amount of memory available to the program, in bytes. + + Fetched from system using `VK_EXT_memory_budget` extension if enabled. + + It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors + external to the program, like other programs also consuming system resources. + Difference `budget - usage` is the amount of additional memory that can probably + be allocated without problems. Exceeding the budget may result in various problems. + */ + VkDeviceSize budget; +} VmaBudget; + +/** \brief Retrieves information about current memory budget for all memory heaps. + +\param[out] pBudget Must point to array with number of elements at least equal to number of memory heaps in physical device used. + +This function is called "get" not "calculate" because it is very fast, suitable to be called +every frame or every allocation. For more detailed statistics use vmaCalculateStats(). + +Note that when using allocator from multiple threads, returned information may immediately +become outdated. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget( + VmaAllocator VMA_NOT_NULL allocator, + VmaBudget* VMA_NOT_NULL pBudget); + +#ifndef VMA_STATS_STRING_ENABLED +#define VMA_STATS_STRING_ENABLED 1 +#endif + +#if VMA_STATS_STRING_ENABLED + +/// Builds and returns statistics as string in JSON format. +/** @param[out] ppStatsString Must be freed using vmaFreeStatsString() function. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( + VmaAllocator VMA_NOT_NULL allocator, + char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString, + VkBool32 detailedMap); + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( + VmaAllocator VMA_NOT_NULL allocator, + char* VMA_NULLABLE pStatsString); + +#endif // #if VMA_STATS_STRING_ENABLED + +/** \struct VmaPool +\brief Represents custom memory pool + +Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it. +Call function vmaDestroyPool() to destroy it. + +For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools). +*/ +VK_DEFINE_HANDLE(VmaPool) + +typedef enum VmaMemoryUsage +{ + /** No intended memory usage specified. + Use other members of VmaAllocationCreateInfo to specify your requirements. + */ + VMA_MEMORY_USAGE_UNKNOWN = 0, + /** Memory will be used on device only, so fast access from the device is preferred. + It usually means device-local GPU (video) memory. + No need to be mappable on host. + It is roughly equivalent of `D3D12_HEAP_TYPE_DEFAULT`. + + Usage: + + - Resources written and read by device, e.g. images used as attachments. + - Resources transferred from host once (immutable) or infrequently and read by + device multiple times, e.g. textures to be sampled, vertex buffers, uniform + (constant) buffers, and majority of other types of resources used on GPU. + + Allocation may still end up in `HOST_VISIBLE` memory on some implementations. + In such case, you are free to map it. + You can use #VMA_ALLOCATION_CREATE_MAPPED_BIT with this usage type. + */ + VMA_MEMORY_USAGE_GPU_ONLY = 1, + /** Memory will be mappable on host. + It usually means CPU (system) memory. + Guarantees to be `HOST_VISIBLE` and `HOST_COHERENT`. + CPU access is typically uncached. Writes may be write-combined. + Resources created in this pool may still be accessible to the device, but access to them can be slow. + It is roughly equivalent of `D3D12_HEAP_TYPE_UPLOAD`. + + Usage: Staging copy of resources used as transfer source. + */ + VMA_MEMORY_USAGE_CPU_ONLY = 2, + /** + Memory that is both mappable on host (guarantees to be `HOST_VISIBLE`) and preferably fast to access by GPU. + CPU access is typically uncached. Writes may be write-combined. + + Usage: Resources written frequently by host (dynamic), read by device. E.g. textures (with LINEAR layout), vertex buffers, uniform buffers updated every frame or every draw call. + */ + VMA_MEMORY_USAGE_CPU_TO_GPU = 3, + /** Memory mappable on host (guarantees to be `HOST_VISIBLE`) and cached. + It is roughly equivalent of `D3D12_HEAP_TYPE_READBACK`. + + Usage: + + - Resources written by device, read by host - results of some computations, e.g. screen capture, average scene luminance for HDR tone mapping. + - Any resources read or accessed randomly on host, e.g. CPU-side copy of vertex buffer used as source of transfer, but also used for collision detection. + */ + VMA_MEMORY_USAGE_GPU_TO_CPU = 4, + /** CPU memory - memory that is preferably not `DEVICE_LOCAL`, but also not guaranteed to be `HOST_VISIBLE`. + + Usage: Staging copy of resources moved from GPU memory to CPU memory as part + of custom paging/residency mechanism, to be moved back to GPU memory when needed. + */ + VMA_MEMORY_USAGE_CPU_COPY = 5, + /** Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`. + Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation. + + Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`. + + Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + */ + VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6, + + VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF +} VmaMemoryUsage; + +/// Flags to be passed as VmaAllocationCreateInfo::flags. +typedef enum VmaAllocationCreateFlagBits { + /** \brief Set this flag if the allocation should have its own memory block. + + Use it for special, big resources, like fullscreen images used as attachments. + + You should not use this flag if VmaAllocationCreateInfo::pool is not null. + */ + VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001, + + /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block. + + If new allocation cannot be placed in any of the existing blocks, allocation + fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error. + + You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and + #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense. + + If VmaAllocationCreateInfo::pool is not null, this flag is implied and ignored. */ + VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002, + /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it. + + Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData. + + It is valid to use this flag for allocation made from memory type that is not + `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is + useful if you need an allocation that is efficient to use on GPU + (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that + support it (e.g. Intel GPU). + + You should not use this flag together with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT. + */ + VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004, + /** Allocation created with this flag can become lost as a result of another + allocation with #VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT flag, so you + must check it before use. + + To check if allocation is not lost, call vmaGetAllocationInfo() and check if + VmaAllocationInfo::deviceMemory is not `VK_NULL_HANDLE`. + + For details about supporting lost allocations, see Lost Allocations + chapter of User Guide on Main Page. + + You should not use this flag together with #VMA_ALLOCATION_CREATE_MAPPED_BIT. + */ + VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT = 0x00000008, + /** While creating allocation using this flag, other allocations that were + created with flag #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT can become lost. + + For details about supporting lost allocations, see Lost Allocations + chapter of User Guide on Main Page. + */ + VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT = 0x00000010, + /** Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a + null-terminated string. Instead of copying pointer value, a local copy of the + string is made and stored in allocation's `pUserData`. The string is automatically + freed together with the allocation. It is also used in vmaBuildStatsString(). + */ + VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020, + /** Allocation will be created from upper stack in a double stack pool. + + This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag. + */ + VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040, + /** Create both buffer/image and allocation, but don't bind them together. + It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions. + The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage(). + Otherwise it is ignored. + */ + VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080, + /** Create allocation only if additional device memory required for it, if any, won't exceed + memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + */ + VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100, + + /** Allocation strategy that chooses smallest possible free range for the + allocation. + */ + VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = 0x00010000, + /** Allocation strategy that chooses biggest possible free range for the + allocation. + */ + VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT = 0x00020000, + /** Allocation strategy that chooses first suitable free range for the + allocation. + + "First" doesn't necessarily means the one with smallest offset in memory, + but rather the one that is easiest and fastest to find. + */ + VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = 0x00040000, + + /** Allocation strategy that tries to minimize memory usage. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT, + /** Allocation strategy that tries to minimize allocation time. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT, + /** Allocation strategy that tries to minimize memory fragmentation. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT = VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT, + + /** A bit mask to extract only `STRATEGY` bits from entire set of flags. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MASK = + VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT | + VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT | + VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT, + + VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaAllocationCreateFlagBits; +typedef VkFlags VmaAllocationCreateFlags; + +typedef struct VmaAllocationCreateInfo +{ + /// Use #VmaAllocationCreateFlagBits enum. + VmaAllocationCreateFlags flags; + /** \brief Intended usage of memory. + + You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored. + */ + VmaMemoryUsage usage; + /** \brief Flags that must be set in a Memory Type chosen for an allocation. + + Leave 0 if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored.*/ + VkMemoryPropertyFlags requiredFlags; + /** \brief Flags that preferably should be set in a memory type chosen for an allocation. + + Set to 0 if no additional flags are preferred. \n + If `pool` is not null, this member is ignored. */ + VkMemoryPropertyFlags preferredFlags; + /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation. + + Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if + it meets other requirements specified by this structure, with no further + restrictions on memory type index. \n + If `pool` is not null, this member is ignored. + */ + uint32_t memoryTypeBits; + /** \brief Pool that this allocation should be created in. + + Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members: + `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored. + */ + VmaPool VMA_NULLABLE pool; + /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData(). + + If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either + null or pointer to a null-terminated string. The string will be then copied to + internal buffer, so it doesn't need to be valid after allocation call. + */ + void* VMA_NULLABLE pUserData; + /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. + + It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object + and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + Otherwise, it has the priority of a memory block where it is placed and this variable is ignored. + */ + float priority; +} VmaAllocationCreateInfo; + +/** +\brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo. + +This algorithm tries to find a memory type that: + +- Is allowed by memoryTypeBits. +- Contains all the flags from pAllocationCreateInfo->requiredFlags. +- Matches intended usage. +- Has as many flags from pAllocationCreateInfo->preferredFlags as possible. + +\return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result +from this function or any other allocating function probably means that your +device doesn't support any memory type with requested features for the specific +type of resource you want to use it for. Please check parameters of your +resource, like image layout (OPTIMAL versus LINEAR) or mip level count. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + +/** +\brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo. + +It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. +It internally creates a temporary, dummy buffer that never has memory bound. +It is just a convenience function, equivalent to calling: + +- `vkCreateBuffer` +- `vkGetBufferMemoryRequirements` +- `vmaFindMemoryTypeIndex` +- `vkDestroyBuffer` +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + +/** +\brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo. + +It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. +It internally creates a temporary, dummy image that never has memory bound. +It is just a convenience function, equivalent to calling: + +- `vkCreateImage` +- `vkGetImageMemoryRequirements` +- `vmaFindMemoryTypeIndex` +- `vkDestroyImage` +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( + VmaAllocator VMA_NOT_NULL allocator, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + +/// Flags to be passed as VmaPoolCreateInfo::flags. +typedef enum VmaPoolCreateFlagBits { + /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored. + + This is an optional optimization flag. + + If you always allocate using vmaCreateBuffer(), vmaCreateImage(), + vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator + knows exact type of your allocations so it can handle Buffer-Image Granularity + in the optimal way. + + If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(), + exact type of such allocations is not known, so allocator must be conservative + in handling Buffer-Image Granularity, which can lead to suboptimal allocation + (wasted memory). In that case, if you can make sure you always allocate only + buffers and linear images or only optimal images out of this pool, use this flag + to make allocator disregard Buffer-Image Granularity and so make allocations + faster and more optimal. + */ + VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002, + + /** \brief Enables alternative, linear allocation algorithm in this pool. + + Specify this flag to enable linear allocation algorithm, which always creates + new allocations after last one and doesn't reuse space from allocations freed in + between. It trades memory consumption for simplified algorithm and data + structure, which has better performance and uses less memory for metadata. + + By using this flag, you can achieve behavior of free-at-once, stack, + ring buffer, and double stack. For details, see documentation chapter + \ref linear_algorithm. + + When using this flag, you must specify VmaPoolCreateInfo::maxBlockCount == 1 (or 0 for default). + + For more details, see [Linear allocation algorithm](@ref linear_algorithm). + */ + VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004, + + /** \brief Enables alternative, buddy allocation algorithm in this pool. + + It operates on a tree of blocks, each having size that is a power of two and + a half of its parent's size. Comparing to default algorithm, this one provides + faster allocation and deallocation and decreased external fragmentation, + at the expense of more memory wasted (internal fragmentation). + + For more details, see [Buddy allocation algorithm](@ref buddy_algorithm). + */ + VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT = 0x00000008, + + /** Bit mask to extract only `ALGORITHM` bits from entire set of flags. + */ + VMA_POOL_CREATE_ALGORITHM_MASK = + VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT | + VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT, + + VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaPoolCreateFlagBits; +typedef VkFlags VmaPoolCreateFlags; + +/** \brief Describes parameter of created #VmaPool. +*/ +typedef struct VmaPoolCreateInfo { + /** \brief Vulkan memory type index to allocate this pool from. + */ + uint32_t memoryTypeIndex; + /** \brief Use combination of #VmaPoolCreateFlagBits. + */ + VmaPoolCreateFlags flags; + /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional. + + Specify nonzero to set explicit, constant size of memory blocks used by this + pool. + + Leave 0 to use default and let the library manage block sizes automatically. + Sizes of particular blocks may vary. + */ + VkDeviceSize blockSize; + /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty. + + Set to 0 to have no preallocated blocks and allow the pool be completely empty. + */ + size_t minBlockCount; + /** \brief Maximum number of blocks that can be allocated in this pool. Optional. + + Set to 0 to use default, which is `SIZE_MAX`, which means no limit. + + Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated + throughout whole lifetime of this pool. + */ + size_t maxBlockCount; + /** \brief Maximum number of additional frames that are in use at the same time as current frame. + + This value is used only when you make allocations with + #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocation cannot become + lost if allocation.lastUseFrameIndex >= allocator.currentFrameIndex - frameInUseCount. + + For example, if you double-buffer your command buffers, so resources used for + rendering in previous frame may still be in use by the GPU at the moment you + allocate resources needed for the current frame, set this value to 1. + + If you want to allow any allocations other than used in the current frame to + become lost, set this value to 0. + */ + uint32_t frameInUseCount; + /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations. + + It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object. + Otherwise, this variable is ignored. + */ + float priority; +} VmaPoolCreateInfo; + +/** \brief Describes parameter of existing #VmaPool. +*/ +typedef struct VmaPoolStats { + /** \brief Total amount of `VkDeviceMemory` allocated from Vulkan for this pool, in bytes. + */ + VkDeviceSize size; + /** \brief Total number of bytes in the pool not used by any #VmaAllocation. + */ + VkDeviceSize unusedSize; + /** \brief Number of #VmaAllocation objects created from this pool that were not destroyed or lost. + */ + size_t allocationCount; + /** \brief Number of continuous memory ranges in the pool not used by any #VmaAllocation. + */ + size_t unusedRangeCount; + /** \brief Size of the largest continuous free memory region available for new allocation. + + Making a new allocation of that size is not guaranteed to succeed because of + possible additional margin required to respect alignment and buffer/image + granularity. + */ + VkDeviceSize unusedRangeSizeMax; + /** \brief Number of `VkDeviceMemory` blocks allocated for this pool. + */ + size_t blockCount; +} VmaPoolStats; + +/** \brief Allocates Vulkan device memory and creates #VmaPool object. + +@param allocator Allocator object. +@param pCreateInfo Parameters of pool to create. +@param[out] pPool Handle to created pool. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( + VmaAllocator VMA_NOT_NULL allocator, + const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaPool VMA_NULLABLE * VMA_NOT_NULL pPool); + +/** \brief Destroys #VmaPool object and frees Vulkan device memory. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NULLABLE pool); + +/** \brief Retrieves statistics of existing #VmaPool object. + +@param allocator Allocator object. +@param pool Pool object. +@param[out] pPoolStats Statistics of specified pool. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + VmaPoolStats* VMA_NOT_NULL pPoolStats); + +/** \brief Marks all allocations in given pool as lost if they are not used in current frame or VmaPoolCreateInfo::frameInUseCount back from now. + +@param allocator Allocator object. +@param pool Pool. +@param[out] pLostAllocationCount Number of allocations marked as lost. Optional - pass null if you don't need this information. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + size_t* VMA_NULLABLE pLostAllocationCount); + +/** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions. + +Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, +`VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is +`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). + +Possible return values: + +- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool. +- `VK_SUCCESS` - corruption detection has been performed and succeeded. +- `VK_ERROR_VALIDATION_FAILED_EXT` - corruption detection has been performed and found memory corruptions around one of the allocations. + `VMA_ASSERT` is also fired in that case. +- Other value: Error returned by Vulkan, e.g. memory mapping failure. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator VMA_NOT_NULL allocator, VmaPool VMA_NOT_NULL pool); + +/** \brief Retrieves name of a custom pool. + +After the call `ppName` is either null or points to an internally-owned null-terminated string +containing name of the pool that was previously set. The pointer becomes invalid when the pool is +destroyed or its name is changed using vmaSetPoolName(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + const char* VMA_NULLABLE * VMA_NOT_NULL ppName); + +/** \brief Sets name of a custom pool. + +`pName` can be either null or pointer to a null-terminated string with new name for the pool. +Function makes internal copy of the string, so it can be changed or freed immediately after this call. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + const char* VMA_NULLABLE pName); + +/** \struct VmaAllocation +\brief Represents single memory allocation. + +It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type +plus unique offset. + +There are multiple ways to create such object. +You need to fill structure VmaAllocationCreateInfo. +For more information see [Choosing memory type](@ref choosing_memory_type). + +Although the library provides convenience functions that create Vulkan buffer or image, +allocate memory for it and bind them together, +binding of the allocation to a buffer or an image is out of scope of the allocation itself. +Allocation object can exist without buffer/image bound, +binding can be done manually by the user, and destruction of it can be done +independently of destruction of the allocation. + +The object also remembers its size and some other information. +To retrieve this information, use function vmaGetAllocationInfo() and inspect +returned structure VmaAllocationInfo. + +Some kinds allocations can be in lost state. +For more information, see [Lost allocations](@ref lost_allocations). +*/ +VK_DEFINE_HANDLE(VmaAllocation) + +/** \brief Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo(). +*/ +typedef struct VmaAllocationInfo { + /** \brief Memory type index that this allocation was allocated from. + + It never changes. + */ + uint32_t memoryType; + /** \brief Handle to Vulkan memory object. + + Same memory object can be shared by multiple allocations. + + It can change after call to vmaDefragment() if this allocation is passed to the function, or if allocation is lost. + + If the allocation is lost, it is equal to `VK_NULL_HANDLE`. + */ + VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory; + /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation. + + You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function + vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image, + not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation + and apply this offset automatically. + + It can change after call to vmaDefragment() if this allocation is passed to the function, or if allocation is lost. + */ + VkDeviceSize offset; + /** \brief Size of this allocation, in bytes. + + It never changes, unless allocation is lost. + + \note Allocation size returned in this variable may be greater than the size + requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the + allocation is accessible for operations on memory e.g. using a pointer after + mapping with vmaMapMemory(), but operations on the resource e.g. using + `vkCmdCopyBuffer` must be limited to the size of the resource. + */ + VkDeviceSize size; + /** \brief Pointer to the beginning of this allocation as mapped data. + + If the allocation hasn't been mapped using vmaMapMemory() and hasn't been + created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null. + + It can change after call to vmaMapMemory(), vmaUnmapMemory(). + It can also change after call to vmaDefragment() if this allocation is passed to the function. + */ + void* VMA_NULLABLE pMappedData; + /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData(). + + It can change after call to vmaSetAllocationUserData() for this allocation. + */ + void* VMA_NULLABLE pUserData; +} VmaAllocationInfo; + +/** \brief General purpose memory allocation. + +@param[out] pAllocation Handle to allocated memory. +@param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). + +It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(), +vmaCreateBuffer(), vmaCreateImage() instead whenever possible. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( + VmaAllocator VMA_NOT_NULL allocator, + const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief General purpose memory allocation for multiple allocation objects at once. + +@param allocator Allocator object. +@param pVkMemoryRequirements Memory requirements for each allocation. +@param pCreateInfo Creation parameters for each alloction. +@param allocationCount Number of allocations to make. +@param[out] pAllocations Pointer to array that will be filled with handles to created allocations. +@param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations. + +You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). + +Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding. +It is just a general purpose allocation function able to make multiple allocations at once. +It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times. + +All allocations are made using same parameters. All of them are created out of the same memory pool and type. +If any allocation fails, all allocations already made within this function call are also freed, so that when +returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( + VmaAllocator VMA_NOT_NULL allocator, + const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements, + const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo, + size_t allocationCount, + VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations, + VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo); + +/** +@param[out] pAllocation Handle to allocated memory. +@param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +You should free the memory using vmaFreeMemory(). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/// Function similar to vmaAllocateMemoryForBuffer(). +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage(). + +Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( + VmaAllocator VMA_NOT_NULL allocator, + const VmaAllocation VMA_NULLABLE allocation); + +/** \brief Frees memory and destroys multiple allocations. + +Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding. +It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(), +vmaAllocateMemoryPages() and other functions. +It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times. + +Allocations in `pAllocations` array can come from any memory pools and types. +Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( + VmaAllocator VMA_NOT_NULL allocator, + size_t allocationCount, + const VmaAllocation VMA_NULLABLE * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations); + +/** \brief Deprecated. + +\deprecated +In version 2.2.0 it used to try to change allocation's size without moving or reallocating it. +In current version it returns `VK_SUCCESS` only if `newSize` equals current allocation's size. +Otherwise returns `VK_ERROR_OUT_OF_POOL_MEMORY`, indicating that allocation's size could not be changed. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize newSize); + +/** \brief Returns current information about specified allocation and atomically marks it as used in current frame. + +Current paramteres of given allocation are returned in `pAllocationInfo`. + +This function also atomically "touches" allocation - marks it as used in current frame, +just like vmaTouchAllocation(). +If the allocation is in lost state, `pAllocationInfo->deviceMemory == VK_NULL_HANDLE`. + +Although this function uses atomics and doesn't lock any mutex, so it should be quite efficient, +you can avoid calling it too often. + +- You can retrieve same VmaAllocationInfo structure while creating your resource, from function + vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change + (e.g. due to defragmentation or allocation becoming lost). +- If you just want to check if allocation is not lost, vmaTouchAllocation() will work faster. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo); + +/** \brief Returns `VK_TRUE` if allocation is not lost and atomically marks it as used in current frame. + +If the allocation has been created with #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag, +this function returns `VK_TRUE` if it's not in lost state, so it can still be used. +It then also atomically "touches" the allocation - marks it as used in current frame, +so that you can be sure it won't become lost in current frame or next `frameInUseCount` frames. + +If the allocation is in lost state, the function returns `VK_FALSE`. +Memory of such allocation, as well as buffer or image bound to it, should not be used. +Lost allocation and the buffer/image still need to be destroyed. + +If the allocation has been created without #VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag, +this function always returns `VK_TRUE`. +*/ +VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation); + +/** \brief Sets pUserData in given allocation to new value. + +If the allocation was created with VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT, +pUserData must be either null, or pointer to a null-terminated string. The function +makes local copy of the string and sets it as allocation's `pUserData`. String +passed as pUserData doesn't need to be valid for whole lifetime of the allocation - +you can free it after this call. String previously pointed by allocation's +pUserData is freed from memory. + +If the flag was not used, the value of pointer `pUserData` is just copied to +allocation's `pUserData`. It is opaque, so you can use it however you want - e.g. +as a pointer, ordinal number or some handle to you own data. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + void* VMA_NULLABLE pUserData); + +/** \brief Creates new allocation that is in lost state from the beginning. + +It can be useful if you need a dummy, non-null allocation. + +You still need to destroy created object using vmaFreeMemory(). + +Returned allocation is not tied to any specific memory pool or memory type and +not bound to any image or buffer. It has size = 0. It cannot be turned into +a real, non-empty allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation); + +/** \brief Maps memory represented by given allocation and returns pointer to it. + +Maps memory represented by given allocation to make it accessible to CPU code. +When succeeded, `*ppData` contains pointer to first byte of this memory. +If the allocation is part of bigger `VkDeviceMemory` block, the pointer is +correctly offseted to the beginning of region assigned to this particular +allocation. + +Mapping is internally reference-counted and synchronized, so despite raw Vulkan +function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory` +multiple times simultaneously, it is safe to call this function on allocations +assigned to the same memory block. Actual Vulkan memory will be mapped on first +mapping and unmapped on last unmapping. + +If the function succeeded, you must call vmaUnmapMemory() to unmap the +allocation when mapping is no longer needed or before freeing the allocation, at +the latest. + +It also safe to call this function multiple times on the same allocation. You +must call vmaUnmapMemory() same number of times as you called vmaMapMemory(). + +It is also safe to call this function on allocation created with +#VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time. +You must still call vmaUnmapMemory() same number of times as you called +vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the +"0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag. + +This function fails when used on allocation made in memory type that is not +`HOST_VISIBLE`. + +This function always fails when called for allocation that was created with +#VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT flag. Such allocations cannot be +mapped. + +This function doesn't automatically flush or invalidate caches. +If the allocation is made from a memory types that is not `HOST_COHERENT`, +you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + void* VMA_NULLABLE * VMA_NOT_NULL ppData); + +/** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory(). + +For details, see description of vmaMapMemory(). + +This function doesn't automatically flush or invalidate caches. +If the allocation is made from a memory types that is not `HOST_COHERENT`, +you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation); + +/** \brief Flushes memory of given allocation. + +Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation. +It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`. +Unmap operation doesn't do that automatically. + +- `offset` must be relative to the beginning of allocation. +- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. +- `offset` and `size` don't have to be aligned. + They are internally rounded down/up to multiply of `nonCoherentAtomSize`. +- If `size` is 0, this call is ignored. +- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, + this call is ignored. + +Warning! `offset` and `size` are relative to the contents of given `allocation`. +If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. +Do not pass allocation's offset as `offset`!!! + +This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is +called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize offset, + VkDeviceSize size); + +/** \brief Invalidates memory of given allocation. + +Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation. +It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`. +Map operation doesn't do that automatically. + +- `offset` must be relative to the beginning of allocation. +- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. +- `offset` and `size` don't have to be aligned. + They are internally rounded down/up to multiply of `nonCoherentAtomSize`. +- If `size` is 0, this call is ignored. +- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, + this call is ignored. + +Warning! `offset` and `size` are relative to the contents of given `allocation`. +If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. +Do not pass allocation's offset as `offset`!!! + +This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if +it is called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize offset, + VkDeviceSize size); + +/** \brief Flushes memory of given set of allocations. + +Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations. +For more information, see documentation of vmaFlushAllocation(). + +\param allocator +\param allocationCount +\param allocations +\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero. +\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. + +This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is +called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t allocationCount, + const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); + +/** \brief Invalidates memory of given set of allocations. + +Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations. +For more information, see documentation of vmaInvalidateAllocation(). + +\param allocator +\param allocationCount +\param allocations +\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero. +\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. + +This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is +called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t allocationCount, + const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); + +/** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions. + +@param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked. + +Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, +`VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are +`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). + +Possible return values: + +- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types. +- `VK_SUCCESS` - corruption detection has been performed and succeeded. +- `VK_ERROR_VALIDATION_FAILED_EXT` - corruption detection has been performed and found memory corruptions around one of the allocations. + `VMA_ASSERT` is also fired in that case. +- Other value: Error returned by Vulkan, e.g. memory mapping failure. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator VMA_NOT_NULL allocator, uint32_t memoryTypeBits); + +/** \struct VmaDefragmentationContext +\brief Represents Opaque object that represents started defragmentation process. + +Fill structure #VmaDefragmentationInfo2 and call function vmaDefragmentationBegin() to create it. +Call function vmaDefragmentationEnd() to destroy it. +*/ +VK_DEFINE_HANDLE(VmaDefragmentationContext) + +/// Flags to be used in vmaDefragmentationBegin(). None at the moment. Reserved for future use. +typedef enum VmaDefragmentationFlagBits { + VMA_DEFRAGMENTATION_FLAG_INCREMENTAL = 0x1, + VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaDefragmentationFlagBits; +typedef VkFlags VmaDefragmentationFlags; + +/** \brief Parameters for defragmentation. + +To be used with function vmaDefragmentationBegin(). +*/ +typedef struct VmaDefragmentationInfo2 { + /** \brief Reserved for future use. Should be 0. + */ + VmaDefragmentationFlags flags; + /** \brief Number of allocations in `pAllocations` array. + */ + uint32_t allocationCount; + /** \brief Pointer to array of allocations that can be defragmented. + + The array should have `allocationCount` elements. + The array should not contain nulls. + Elements in the array should be unique - same allocation cannot occur twice. + It is safe to pass allocations that are in the lost state - they are ignored. + All allocations not present in this array are considered non-moveable during this defragmentation. + */ + const VmaAllocation VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations; + /** \brief Optional, output. Pointer to array that will be filled with information whether the allocation at certain index has been changed during defragmentation. + + The array should have `allocationCount` elements. + You can pass null if you are not interested in this information. + */ + VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged; + /** \brief Numer of pools in `pPools` array. + */ + uint32_t poolCount; + /** \brief Either null or pointer to array of pools to be defragmented. + + All the allocations in the specified pools can be moved during defragmentation + and there is no way to check if they were really moved as in `pAllocationsChanged`, + so you must query all the allocations in all these pools for new `VkDeviceMemory` + and offset using vmaGetAllocationInfo() if you might need to recreate buffers + and images bound to them. + + The array should have `poolCount` elements. + The array should not contain nulls. + Elements in the array should be unique - same pool cannot occur twice. + + Using this array is equivalent to specifying all allocations from the pools in `pAllocations`. + It might be more efficient. + */ + const VmaPool VMA_NOT_NULL * VMA_NULLABLE VMA_LEN_IF_NOT_NULL(poolCount) pPools; + /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places using transfers on CPU side, like `memcpy()`, `memmove()`. + + `VK_WHOLE_SIZE` means no limit. + */ + VkDeviceSize maxCpuBytesToMove; + /** \brief Maximum number of allocations that can be moved to a different place using transfers on CPU side, like `memcpy()`, `memmove()`. + + `UINT32_MAX` means no limit. + */ + uint32_t maxCpuAllocationsToMove; + /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places using transfers on GPU side, posted to `commandBuffer`. + + `VK_WHOLE_SIZE` means no limit. + */ + VkDeviceSize maxGpuBytesToMove; + /** \brief Maximum number of allocations that can be moved to a different place using transfers on GPU side, posted to `commandBuffer`. + + `UINT32_MAX` means no limit. + */ + uint32_t maxGpuAllocationsToMove; + /** \brief Optional. Command buffer where GPU copy commands will be posted. + + If not null, it must be a valid command buffer handle that supports Transfer queue type. + It must be in the recording state and outside of a render pass instance. + You need to submit it and make sure it finished execution before calling vmaDefragmentationEnd(). + + Passing null means that only CPU defragmentation will be performed. + */ + VkCommandBuffer VMA_NULLABLE commandBuffer; +} VmaDefragmentationInfo2; + +typedef struct VmaDefragmentationPassMoveInfo { + VmaAllocation VMA_NOT_NULL allocation; + VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory; + VkDeviceSize offset; +} VmaDefragmentationPassMoveInfo; + +/** \brief Parameters for incremental defragmentation steps. + +To be used with function vmaBeginDefragmentationPass(). +*/ +typedef struct VmaDefragmentationPassInfo { + uint32_t moveCount; + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(moveCount) pMoves; +} VmaDefragmentationPassInfo; + +/** \brief Deprecated. Optional configuration parameters to be passed to function vmaDefragment(). + +\deprecated This is a part of the old interface. It is recommended to use structure #VmaDefragmentationInfo2 and function vmaDefragmentationBegin() instead. +*/ +typedef struct VmaDefragmentationInfo { + /** \brief Maximum total numbers of bytes that can be copied while moving allocations to different places. + + Default is `VK_WHOLE_SIZE`, which means no limit. + */ + VkDeviceSize maxBytesToMove; + /** \brief Maximum number of allocations that can be moved to different place. + + Default is `UINT32_MAX`, which means no limit. + */ + uint32_t maxAllocationsToMove; +} VmaDefragmentationInfo; + +/** \brief Statistics returned by function vmaDefragment(). */ +typedef struct VmaDefragmentationStats { + /// Total number of bytes that have been copied while moving allocations to different places. + VkDeviceSize bytesMoved; + /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects. + VkDeviceSize bytesFreed; + /// Number of allocations that have been moved to different places. + uint32_t allocationsMoved; + /// Number of empty `VkDeviceMemory` objects that have been released to the system. + uint32_t deviceMemoryBlocksFreed; +} VmaDefragmentationStats; + +/** \brief Begins defragmentation process. + +@param allocator Allocator object. +@param pInfo Structure filled with parameters of defragmentation. +@param[out] pStats Optional. Statistics of defragmentation. You can pass null if you are not interested in this information. +@param[out] pContext Context object that must be passed to vmaDefragmentationEnd() to finish defragmentation. +@return `VK_SUCCESS` and `*pContext == null` if defragmentation finished within this function call. `VK_NOT_READY` and `*pContext != null` if defragmentation has been started and you need to call vmaDefragmentationEnd() to finish it. Negative value in case of error. + +Use this function instead of old, deprecated vmaDefragment(). + +Warning! Between the call to vmaDefragmentationBegin() and vmaDefragmentationEnd(): + +- You should not use any of allocations passed as `pInfo->pAllocations` or + any allocations that belong to pools passed as `pInfo->pPools`, + including calling vmaGetAllocationInfo(), vmaTouchAllocation(), or access + their data. +- Some mutexes protecting internal data structures may be locked, so trying to + make or free any allocations, bind buffers or images, map memory, or launch + another simultaneous defragmentation in between may cause stall (when done on + another thread) or deadlock (when done on the same thread), unless you are + 100% sure that defragmented allocations are in different pools. +- Information returned via `pStats` and `pInfo->pAllocationsChanged` are undefined. + They become valid after call to vmaDefragmentationEnd(). +- If `pInfo->commandBuffer` is not null, you must submit that command buffer + and make sure it finished execution before calling vmaDefragmentationEnd(). + +For more information and important limitations regarding defragmentation, see documentation chapter: +[Defragmentation](@ref defragmentation). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin( + VmaAllocator VMA_NOT_NULL allocator, + const VmaDefragmentationInfo2* VMA_NOT_NULL pInfo, + VmaDefragmentationStats* VMA_NULLABLE pStats, + VmaDefragmentationContext VMA_NULLABLE * VMA_NOT_NULL pContext); + +/** \brief Ends defragmentation process. + +Use this function to finish defragmentation started by vmaDefragmentationBegin(). +It is safe to pass `context == null`. The function then does nothing. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NULLABLE context); + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NULLABLE context, + VmaDefragmentationPassInfo* VMA_NOT_NULL pInfo +); +VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NULLABLE context +); + +/** \brief Deprecated. Compacts memory by moving allocations. + +@param pAllocations Array of allocations that can be moved during this compation. +@param allocationCount Number of elements in pAllocations and pAllocationsChanged arrays. +@param[out] pAllocationsChanged Array of boolean values that will indicate whether matching allocation in pAllocations array has been moved. This parameter is optional. Pass null if you don't need this information. +@param pDefragmentationInfo Configuration parameters. Optional - pass null to use default values. +@param[out] pDefragmentationStats Statistics returned by the function. Optional - pass null if you don't need this information. +@return `VK_SUCCESS` if completed, negative error code in case of error. + +\deprecated This is a part of the old interface. It is recommended to use structure #VmaDefragmentationInfo2 and function vmaDefragmentationBegin() instead. + +This function works by moving allocations to different places (different +`VkDeviceMemory` objects and/or different offsets) in order to optimize memory +usage. Only allocations that are in `pAllocations` array can be moved. All other +allocations are considered nonmovable in this call. Basic rules: + +- Only allocations made in memory types that have + `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` + flags can be compacted. You may pass other allocations but it makes no sense - + these will never be moved. +- Custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT or + #VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT flag are not defragmented. Allocations + passed to this function that come from such pools are ignored. +- Allocations created with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT or + created as dedicated allocations for any other reason are also ignored. +- Both allocations made with or without #VMA_ALLOCATION_CREATE_MAPPED_BIT + flag can be compacted. If not persistently mapped, memory will be mapped + temporarily inside this function if needed. +- You must not pass same #VmaAllocation object multiple times in `pAllocations` array. + +The function also frees empty `VkDeviceMemory` blocks. + +Warning: This function may be time-consuming, so you shouldn't call it too often +(like after every resource creation/destruction). +You can call it on special occasions (like when reloading a game level or +when you just destroyed a lot of objects). Calling it every frame may be OK, but +you should measure that on your platform. + +For more information, see [Defragmentation](@ref defragmentation) chapter. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment( + VmaAllocator VMA_NOT_NULL allocator, + const VmaAllocation VMA_NOT_NULL * VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations, + size_t allocationCount, + VkBool32* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationsChanged, + const VmaDefragmentationInfo* VMA_NULLABLE pDefragmentationInfo, + VmaDefragmentationStats* VMA_NULLABLE pDefragmentationStats); + +/** \brief Binds buffer to allocation. + +Binds specified buffer to region of memory represented by specified allocation. +Gets `VkDeviceMemory` handle and offset from the allocation. +If you want to create a buffer, allocate memory for it and bind them together separately, +you should use this function for binding instead of standard `vkBindBufferMemory()`, +because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple +allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously +(which is illegal in Vulkan). + +It is recommended to use function vmaCreateBuffer() instead of this one. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer); + +/** \brief Binds buffer to allocation with additional parameters. + +@param allocationLocalOffset Additional offset to be added while binding, relative to the beginnig of the `allocation`. Normally it should be 0. +@param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null. + +This function is similar to vmaBindBufferMemory(), but it provides additional parameters. + +If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag +or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, + const void* VMA_NULLABLE pNext); + +/** \brief Binds image to allocation. + +Binds specified image to region of memory represented by specified allocation. +Gets `VkDeviceMemory` handle and offset from the allocation. +If you want to create an image, allocate memory for it and bind them together separately, +you should use this function for binding instead of standard `vkBindImageMemory()`, +because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple +allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously +(which is illegal in Vulkan). + +It is recommended to use function vmaCreateImage() instead of this one. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image); + +/** \brief Binds image to allocation with additional parameters. + +@param allocationLocalOffset Additional offset to be added while binding, relative to the beginnig of the `allocation`. Normally it should be 0. +@param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null. + +This function is similar to vmaBindImageMemory(), but it provides additional parameters. + +If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag +or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, + const void* VMA_NULLABLE pNext); + +/** +@param[out] pBuffer Buffer that was created. +@param[out] pAllocation Allocation that was created. +@param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +This function automatically: + +-# Creates buffer. +-# Allocates appropriate memory for it. +-# Binds the buffer with the memory. + +If any of these operations fail, buffer and allocation are not created, +returned value is negative error code, *pBuffer and *pAllocation are null. + +If the function succeeded, you must destroy both buffer and allocation when you +no longer need them using either convenience function vmaDestroyBuffer() or +separately, using `vkDestroyBuffer()` and vmaFreeMemory(). + +If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used, +VK_KHR_dedicated_allocation extension is used internally to query driver whether +it requires or prefers the new buffer to have dedicated allocation. If yes, +and if dedicated allocation is possible (VmaAllocationCreateInfo::pool is null +and #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated +allocation for this buffer, just like when using +#VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + +\note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer, +although recommended as a good practice, is out of scope of this library and could be implemented +by the user as a higher-level logic on top of VMA. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pBuffer, + VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Destroys Vulkan buffer and frees allocated memory. + +This is just a convenience function equivalent to: + +\code +vkDestroyBuffer(device, buffer, allocationCallbacks); +vmaFreeMemory(allocator, allocation); +\endcode + +It it safe to pass null as buffer and/or allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer, + VmaAllocation VMA_NULLABLE allocation); + +/// Function similar to vmaCreateBuffer(). +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( + VmaAllocator VMA_NOT_NULL allocator, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE * VMA_NOT_NULL pImage, + VmaAllocation VMA_NULLABLE * VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Destroys Vulkan image and frees allocated memory. + +This is just a convenience function equivalent to: + +\code +vkDestroyImage(device, image, allocationCallbacks); +vmaFreeMemory(allocator, allocation); +\endcode + +It it safe to pass null as image and/or allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NULLABLE_NON_DISPATCHABLE image, + VmaAllocation VMA_NULLABLE allocation); + +#ifdef __cplusplus +} +#endif + +#endif // AMD_VULKAN_MEMORY_ALLOCATOR_H + +// For Visual Studio IntelliSense. +#if defined(__cplusplus) && defined(__INTELLISENSE__) +#define VMA_IMPLEMENTATION +#endif + +#ifdef VMA_IMPLEMENTATION +#undef VMA_IMPLEMENTATION + +#include +#include +#include +#include + +#if VMA_RECORDING_ENABLED + #include + #if defined(_WIN32) + #include + #else + #include + #include + #endif +#endif + +/******************************************************************************* +CONFIGURATION SECTION + +Define some of these macros before each #include of this header or change them +here if you need other then default behavior depending on your environment. +*/ + +/* +Define this macro to 1 to make the library fetch pointers to Vulkan functions +internally, like: + + vulkanFunctions.vkAllocateMemory = &vkAllocateMemory; +*/ +#if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES) + #define VMA_STATIC_VULKAN_FUNCTIONS 1 +#endif + +/* +Define this macro to 1 to make the library fetch pointers to Vulkan functions +internally, like: + + vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(m_hDevice, vkAllocateMemory); +*/ +#if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS) + #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1 + #if defined(VK_NO_PROTOTYPES) + extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; + extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; + #endif +#endif + +// Define this macro to 1 to make the library use STL containers instead of its own implementation. +//#define VMA_USE_STL_CONTAINERS 1 + +/* Set this macro to 1 to make the library including and using STL containers: +std::pair, std::vector, std::list, std::unordered_map. + +Set it to 0 or undefined to make the library using its own implementation of +the containers. +*/ +#if VMA_USE_STL_CONTAINERS + #define VMA_USE_STL_VECTOR 1 + #define VMA_USE_STL_UNORDERED_MAP 1 + #define VMA_USE_STL_LIST 1 +#endif + +#ifndef VMA_USE_STL_SHARED_MUTEX + // Compiler conforms to C++17. + #if __cplusplus >= 201703L + #define VMA_USE_STL_SHARED_MUTEX 1 + // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus + // Otherwise it's always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2. + // See: https://blogs.msdn.microsoft.com/vcblog/2018/04/09/msvc-now-correctly-reports-__cplusplus/ + #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L + #define VMA_USE_STL_SHARED_MUTEX 1 + #else + #define VMA_USE_STL_SHARED_MUTEX 0 + #endif +#endif + +/* +THESE INCLUDES ARE NOT ENABLED BY DEFAULT. +Library has its own container implementation. +*/ +#if VMA_USE_STL_VECTOR + #include +#endif + +#if VMA_USE_STL_UNORDERED_MAP + #include +#endif + +#if VMA_USE_STL_LIST + #include +#endif + +/* +Following headers are used in this CONFIGURATION section only, so feel free to +remove them if not needed. +*/ +#include // for assert +#include // for min, max +#include +#include + +#ifndef VMA_NULL + // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0. + #define VMA_NULL nullptr +#endif + +#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16) +#include +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + // alignment must be >= sizeof(void*) + if(alignment < sizeof(void*)) + { + alignment = sizeof(void*); + } + + return memalign(alignment, size); +} +#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC)) +#include + +#if defined(__APPLE__) +#include +#endif + +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ +#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0)) +#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0 + // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only + // with the MacOSX11.0 SDK in Xcode 12 (which is what adds + // MAC_OS_X_VERSION_10_16), even though the function is marked + // availabe for 10.15. That's why the preprocessor checks for 10.16 but + // the __builtin_available checks for 10.15. + // People who use C++17 could call aligned_alloc with the 10.15 SDK already. + if (__builtin_available(macOS 10.15, iOS 13, *)) + return aligned_alloc(alignment, size); +#endif +#endif + // alignment must be >= sizeof(void*) + if(alignment < sizeof(void*)) + { + alignment = sizeof(void*); + } + + void *pointer; + if(posix_memalign(&pointer, alignment, size) == 0) + return pointer; + return VMA_NULL; +} +#elif defined(_WIN32) +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + return _aligned_malloc(size, alignment); +} +#else +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + return aligned_alloc(alignment, size); +} +#endif + +#if defined(_WIN32) +static void vma_aligned_free(void* ptr) +{ + _aligned_free(ptr); +} +#else +static void vma_aligned_free(void* ptr) +{ + free(ptr); +} +#endif + +// If your compiler is not compatible with C++11 and definition of +// aligned_alloc() function is missing, uncommeting following line may help: + +//#include + +// Normal assert to check for programmer's errors, especially in Debug configuration. +#ifndef VMA_ASSERT + #ifdef NDEBUG + #define VMA_ASSERT(expr) + #else + #define VMA_ASSERT(expr) assert(expr) + #endif +#endif + +// Assert that will be called very often, like inside data structures e.g. operator[]. +// Making it non-empty can make program slow. +#ifndef VMA_HEAVY_ASSERT + #ifdef NDEBUG + #define VMA_HEAVY_ASSERT(expr) + #else + #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr) + #endif +#endif + +#ifndef VMA_ALIGN_OF + #define VMA_ALIGN_OF(type) (__alignof(type)) +#endif + +#ifndef VMA_SYSTEM_ALIGNED_MALLOC + #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size)) +#endif + +#ifndef VMA_SYSTEM_ALIGNED_FREE + // VMA_SYSTEM_FREE is the old name, but might have been defined by the user + #if defined(VMA_SYSTEM_FREE) + #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr) + #else + #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr) + #endif +#endif + +#ifndef VMA_MIN + #define VMA_MIN(v1, v2) (std::min((v1), (v2))) +#endif + +#ifndef VMA_MAX + #define VMA_MAX(v1, v2) (std::max((v1), (v2))) +#endif + +#ifndef VMA_SWAP + #define VMA_SWAP(v1, v2) std::swap((v1), (v2)) +#endif + +#ifndef VMA_SORT + #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp) +#endif + +#ifndef VMA_DEBUG_LOG + #define VMA_DEBUG_LOG(format, ...) + /* + #define VMA_DEBUG_LOG(format, ...) do { \ + printf(format, __VA_ARGS__); \ + printf("\n"); \ + } while(false) + */ +#endif + +// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString. +#if VMA_STATS_STRING_ENABLED + static inline void VmaUint32ToStr(char* outStr, size_t strLen, uint32_t num) + { + snprintf(outStr, strLen, "%u", static_cast(num)); + } + static inline void VmaUint64ToStr(char* outStr, size_t strLen, uint64_t num) + { + snprintf(outStr, strLen, "%llu", static_cast(num)); + } + static inline void VmaPtrToStr(char* outStr, size_t strLen, const void* ptr) + { + snprintf(outStr, strLen, "%p", ptr); + } +#endif + +#ifndef VMA_MUTEX + class VmaMutex + { + public: + void Lock() { m_Mutex.lock(); } + void Unlock() { m_Mutex.unlock(); } + bool TryLock() { return m_Mutex.try_lock(); } + private: + std::mutex m_Mutex; + }; + #define VMA_MUTEX VmaMutex +#endif + +// Read-write mutex, where "read" is shared access, "write" is exclusive access. +#ifndef VMA_RW_MUTEX + #if VMA_USE_STL_SHARED_MUTEX + // Use std::shared_mutex from C++17. + #include + class VmaRWMutex + { + public: + void LockRead() { m_Mutex.lock_shared(); } + void UnlockRead() { m_Mutex.unlock_shared(); } + bool TryLockRead() { return m_Mutex.try_lock_shared(); } + void LockWrite() { m_Mutex.lock(); } + void UnlockWrite() { m_Mutex.unlock(); } + bool TryLockWrite() { return m_Mutex.try_lock(); } + private: + std::shared_mutex m_Mutex; + }; + #define VMA_RW_MUTEX VmaRWMutex + #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600 + // Use SRWLOCK from WinAPI. + // Minimum supported client = Windows Vista, server = Windows Server 2008. + class VmaRWMutex + { + public: + VmaRWMutex() { InitializeSRWLock(&m_Lock); } + void LockRead() { AcquireSRWLockShared(&m_Lock); } + void UnlockRead() { ReleaseSRWLockShared(&m_Lock); } + bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; } + void LockWrite() { AcquireSRWLockExclusive(&m_Lock); } + void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); } + bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; } + private: + SRWLOCK m_Lock; + }; + #define VMA_RW_MUTEX VmaRWMutex + #else + // Less efficient fallback: Use normal mutex. + class VmaRWMutex + { + public: + void LockRead() { m_Mutex.Lock(); } + void UnlockRead() { m_Mutex.Unlock(); } + bool TryLockRead() { return m_Mutex.TryLock(); } + void LockWrite() { m_Mutex.Lock(); } + void UnlockWrite() { m_Mutex.Unlock(); } + bool TryLockWrite() { return m_Mutex.TryLock(); } + private: + VMA_MUTEX m_Mutex; + }; + #define VMA_RW_MUTEX VmaRWMutex + #endif // #if VMA_USE_STL_SHARED_MUTEX +#endif // #ifndef VMA_RW_MUTEX + +/* +If providing your own implementation, you need to implement a subset of std::atomic. +*/ +#ifndef VMA_ATOMIC_UINT32 + #include + #define VMA_ATOMIC_UINT32 std::atomic +#endif + +#ifndef VMA_ATOMIC_UINT64 + #include + #define VMA_ATOMIC_UINT64 std::atomic +#endif + +#ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY + /** + Every allocation will have its own memory block. + Define to 1 for debugging purposes only. + */ + #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0) +#endif + +#ifndef VMA_DEBUG_ALIGNMENT + /** + Minimum alignment of all allocations, in bytes. + Set to more than 1 for debugging purposes only. Must be power of two. + */ + #define VMA_DEBUG_ALIGNMENT (1) +#endif + +#ifndef VMA_DEBUG_MARGIN + /** + Minimum margin before and after every allocation, in bytes. + Set nonzero for debugging purposes only. + */ + #define VMA_DEBUG_MARGIN (0) +#endif + +#ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS + /** + Define this macro to 1 to automatically fill new allocations and destroyed + allocations with some bit pattern. + */ + #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0) +#endif + +#ifndef VMA_DEBUG_DETECT_CORRUPTION + /** + Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to + enable writing magic value to the margin before and after every allocation and + validating it, so that memory corruptions (out-of-bounds writes) are detected. + */ + #define VMA_DEBUG_DETECT_CORRUPTION (0) +#endif + +#ifndef VMA_DEBUG_GLOBAL_MUTEX + /** + Set this to 1 for debugging purposes only, to enable single mutex protecting all + entry calls to the library. Can be useful for debugging multithreading issues. + */ + #define VMA_DEBUG_GLOBAL_MUTEX (0) +#endif + +#ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY + /** + Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity. + Set to more than 1 for debugging purposes only. Must be power of two. + */ + #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1) +#endif + +#ifndef VMA_SMALL_HEAP_MAX_SIZE + /// Maximum size of a memory heap in Vulkan to consider it "small". + #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024) +#endif + +#ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE + /// Default size of a block allocated as single VkDeviceMemory from a "large" heap. + #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024) +#endif + +#ifndef VMA_CLASS_NO_COPY + #define VMA_CLASS_NO_COPY(className) \ + private: \ + className(const className&) = delete; \ + className& operator=(const className&) = delete; +#endif + +static const uint32_t VMA_FRAME_INDEX_LOST = UINT32_MAX; + +// Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F. +static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666; + +static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC; +static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF; + +/******************************************************************************* +END OF CONFIGURATION +*/ + +// # Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants. + +static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040; +static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080; +static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000; + +static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u; + +static VkAllocationCallbacks VmaEmptyAllocationCallbacks = { + VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL }; + +// Returns number of bits set to 1 in (v). +static inline uint32_t VmaCountBitsSet(uint32_t v) +{ + uint32_t c = v - ((v >> 1) & 0x55555555); + c = ((c >> 2) & 0x33333333) + (c & 0x33333333); + c = ((c >> 4) + c) & 0x0F0F0F0F; + c = ((c >> 8) + c) & 0x00FF00FF; + c = ((c >> 16) + c) & 0x0000FFFF; + return c; +} + +/* +Returns true if given number is a power of two. +T must be unsigned integer number or signed integer but always nonnegative. +For 0 returns true. +*/ +template +inline bool VmaIsPow2(T x) +{ + return (x & (x-1)) == 0; +} + +// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16. +// Use types like uint32_t, uint64_t as T. +template +static inline T VmaAlignUp(T val, T alignment) +{ + VMA_HEAVY_ASSERT(VmaIsPow2(alignment)); + return (val + alignment - 1) & ~(alignment - 1); +} +// Aligns given value down to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 8. +// Use types like uint32_t, uint64_t as T. +template +static inline T VmaAlignDown(T val, T alignment) +{ + VMA_HEAVY_ASSERT(VmaIsPow2(alignment)); + return val & ~(alignment - 1); +} + +// Division with mathematical rounding to nearest number. +template +static inline T VmaRoundDiv(T x, T y) +{ + return (x + (y / (T)2)) / y; +} + +// Returns smallest power of 2 greater or equal to v. +static inline uint32_t VmaNextPow2(uint32_t v) +{ + v--; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v++; + return v; +} +static inline uint64_t VmaNextPow2(uint64_t v) +{ + v--; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + v++; + return v; +} + +// Returns largest power of 2 less or equal to v. +static inline uint32_t VmaPrevPow2(uint32_t v) +{ + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v = v ^ (v >> 1); + return v; +} +static inline uint64_t VmaPrevPow2(uint64_t v) +{ + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + v = v ^ (v >> 1); + return v; +} + +static inline bool VmaStrIsEmpty(const char* pStr) +{ + return pStr == VMA_NULL || *pStr == '\0'; +} + +#if VMA_STATS_STRING_ENABLED + +static const char* VmaAlgorithmToStr(uint32_t algorithm) +{ + switch(algorithm) + { + case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: + return "Linear"; + case VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT: + return "Buddy"; + case 0: + return "Default"; + default: + VMA_ASSERT(0); + return ""; + } +} + +#endif // #if VMA_STATS_STRING_ENABLED + +#ifndef VMA_SORT + +template +Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp) +{ + Iterator centerValue = end; --centerValue; + Iterator insertIndex = beg; + for(Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex) + { + if(cmp(*memTypeIndex, *centerValue)) + { + if(insertIndex != memTypeIndex) + { + VMA_SWAP(*memTypeIndex, *insertIndex); + } + ++insertIndex; + } + } + if(insertIndex != centerValue) + { + VMA_SWAP(*insertIndex, *centerValue); + } + return insertIndex; +} + +template +void VmaQuickSort(Iterator beg, Iterator end, Compare cmp) +{ + if(beg < end) + { + Iterator it = VmaQuickSortPartition(beg, end, cmp); + VmaQuickSort(beg, it, cmp); + VmaQuickSort(it + 1, end, cmp); + } +} + +#define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp) + +#endif // #ifndef VMA_SORT + +/* +Returns true if two memory blocks occupy overlapping pages. +ResourceA must be in less memory offset than ResourceB. + +Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)" +chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity". +*/ +static inline bool VmaBlocksOnSamePage( + VkDeviceSize resourceAOffset, + VkDeviceSize resourceASize, + VkDeviceSize resourceBOffset, + VkDeviceSize pageSize) +{ + VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0); + VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1; + VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1); + VkDeviceSize resourceBStart = resourceBOffset; + VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1); + return resourceAEndPage == resourceBStartPage; +} + +enum VmaSuballocationType +{ + VMA_SUBALLOCATION_TYPE_FREE = 0, + VMA_SUBALLOCATION_TYPE_UNKNOWN = 1, + VMA_SUBALLOCATION_TYPE_BUFFER = 2, + VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3, + VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4, + VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5, + VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF +}; + +/* +Returns true if given suballocation types could conflict and must respect +VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer +or linear image and another one is optimal image. If type is unknown, behave +conservatively. +*/ +static inline bool VmaIsBufferImageGranularityConflict( + VmaSuballocationType suballocType1, + VmaSuballocationType suballocType2) +{ + if(suballocType1 > suballocType2) + { + VMA_SWAP(suballocType1, suballocType2); + } + + switch(suballocType1) + { + case VMA_SUBALLOCATION_TYPE_FREE: + return false; + case VMA_SUBALLOCATION_TYPE_UNKNOWN: + return true; + case VMA_SUBALLOCATION_TYPE_BUFFER: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL: + return false; + default: + VMA_ASSERT(0); + return true; + } +} + +static void VmaWriteMagicValue(void* pData, VkDeviceSize offset) +{ +#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION + uint32_t* pDst = (uint32_t*)((char*)pData + offset); + const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); + for(size_t i = 0; i < numberCount; ++i, ++pDst) + { + *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE; + } +#else + // no-op +#endif +} + +static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset) +{ +#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION + const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset); + const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); + for(size_t i = 0; i < numberCount; ++i, ++pSrc) + { + if(*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE) + { + return false; + } + } +#endif + return true; +} + +/* +Fills structure with parameters of an example buffer to be used for transfers +during GPU memory defragmentation. +*/ +static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo) +{ + memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo)); + outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size. +} + +// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope). +struct VmaMutexLock +{ + VMA_CLASS_NO_COPY(VmaMutexLock) +public: + VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { if(m_pMutex) { m_pMutex->Lock(); } } + ~VmaMutexLock() + { if(m_pMutex) { m_pMutex->Unlock(); } } +private: + VMA_MUTEX* m_pMutex; +}; + +// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading. +struct VmaMutexLockRead +{ + VMA_CLASS_NO_COPY(VmaMutexLockRead) +public: + VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { if(m_pMutex) { m_pMutex->LockRead(); } } + ~VmaMutexLockRead() { if(m_pMutex) { m_pMutex->UnlockRead(); } } +private: + VMA_RW_MUTEX* m_pMutex; +}; + +// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing. +struct VmaMutexLockWrite +{ + VMA_CLASS_NO_COPY(VmaMutexLockWrite) +public: + VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { if(m_pMutex) { m_pMutex->LockWrite(); } } + ~VmaMutexLockWrite() { if(m_pMutex) { m_pMutex->UnlockWrite(); } } +private: + VMA_RW_MUTEX* m_pMutex; +}; + +#if VMA_DEBUG_GLOBAL_MUTEX + static VMA_MUTEX gDebugGlobalMutex; + #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true); +#else + #define VMA_DEBUG_GLOBAL_MUTEX_LOCK +#endif + +// Minimum size of a free suballocation to register it in the free suballocation collection. +static const VkDeviceSize VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER = 16; + +/* +Performs binary search and returns iterator to first element that is greater or +equal to (key), according to comparison (cmp). + +Cmp should return true if first argument is less than second argument. + +Returned value is the found element, if present in the collection or place where +new element with value (key) should be inserted. +*/ +template +static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess& cmp) +{ + size_t down = 0, up = (end - beg); + while(down < up) + { + const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation + if(cmp(*(beg+mid), key)) + { + down = mid + 1; + } + else + { + up = mid; + } + } + return beg + down; +} + +template +IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp) +{ + IterT it = VmaBinaryFindFirstNotLess( + beg, end, value, cmp); + if(it == end || + (!cmp(*it, value) && !cmp(value, *it))) + { + return it; + } + return end; +} + +/* +Returns true if all pointers in the array are not-null and unique. +Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT. +T must be pointer type, e.g. VmaAllocation, VmaPool. +*/ +template +static bool VmaValidatePointerArray(uint32_t count, const T* arr) +{ + for(uint32_t i = 0; i < count; ++i) + { + const T iPtr = arr[i]; + if(iPtr == VMA_NULL) + { + return false; + } + for(uint32_t j = i + 1; j < count; ++j) + { + if(iPtr == arr[j]) + { + return false; + } + } + } + return true; +} + +template +static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct) +{ + newStruct->pNext = mainStruct->pNext; + mainStruct->pNext = newStruct; +} + +//////////////////////////////////////////////////////////////////////////////// +// Memory allocation + +static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment) +{ + void* result = VMA_NULL; + if((pAllocationCallbacks != VMA_NULL) && + (pAllocationCallbacks->pfnAllocation != VMA_NULL)) + { + result = (*pAllocationCallbacks->pfnAllocation)( + pAllocationCallbacks->pUserData, + size, + alignment, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + } + else + { + result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment); + } + VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed."); + return result; +} + +static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr) +{ + if((pAllocationCallbacks != VMA_NULL) && + (pAllocationCallbacks->pfnFree != VMA_NULL)) + { + (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr); + } + else + { + VMA_SYSTEM_ALIGNED_FREE(ptr); + } +} + +template +static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks) +{ + return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T)); +} + +template +static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count) +{ + return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T)); +} + +#define vma_new(allocator, type) new(VmaAllocate(allocator))(type) + +#define vma_new_array(allocator, type, count) new(VmaAllocateArray((allocator), (count)))(type) + +template +static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr) +{ + ptr->~T(); + VmaFree(pAllocationCallbacks, ptr); +} + +template +static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count) +{ + if(ptr != VMA_NULL) + { + for(size_t i = count; i--; ) + { + ptr[i].~T(); + } + VmaFree(pAllocationCallbacks, ptr); + } +} + +static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr) +{ + if(srcStr != VMA_NULL) + { + const size_t len = strlen(srcStr); + char* const result = vma_new_array(allocs, char, len + 1); + memcpy(result, srcStr, len + 1); + return result; + } + else + { + return VMA_NULL; + } +} + +static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str) +{ + if(str != VMA_NULL) + { + const size_t len = strlen(str); + vma_delete_array(allocs, str, len + 1); + } +} + +// STL-compatible allocator. +template +class VmaStlAllocator +{ +public: + const VkAllocationCallbacks* const m_pCallbacks; + typedef T value_type; + + VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) { } + template VmaStlAllocator(const VmaStlAllocator& src) : m_pCallbacks(src.m_pCallbacks) { } + + T* allocate(size_t n) { return VmaAllocateArray(m_pCallbacks, n); } + void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); } + + template + bool operator==(const VmaStlAllocator& rhs) const + { + return m_pCallbacks == rhs.m_pCallbacks; + } + template + bool operator!=(const VmaStlAllocator& rhs) const + { + return m_pCallbacks != rhs.m_pCallbacks; + } + + VmaStlAllocator& operator=(const VmaStlAllocator& x) = delete; +}; + +#if VMA_USE_STL_VECTOR + +#define VmaVector std::vector + +template +static void VmaVectorInsert(std::vector& vec, size_t index, const T& item) +{ + vec.insert(vec.begin() + index, item); +} + +template +static void VmaVectorRemove(std::vector& vec, size_t index) +{ + vec.erase(vec.begin() + index); +} + +#else // #if VMA_USE_STL_VECTOR + +/* Class with interface compatible with subset of std::vector. +T must be POD because constructors and destructors are not called and memcpy is +used for these objects. */ +template +class VmaVector +{ +public: + typedef T value_type; + + VmaVector(const AllocatorT& allocator) : + m_Allocator(allocator), + m_pArray(VMA_NULL), + m_Count(0), + m_Capacity(0) + { + } + + VmaVector(size_t count, const AllocatorT& allocator) : + m_Allocator(allocator), + m_pArray(count ? (T*)VmaAllocateArray(allocator.m_pCallbacks, count) : VMA_NULL), + m_Count(count), + m_Capacity(count) + { + } + + // This version of the constructor is here for compatibility with pre-C++14 std::vector. + // value is unused. + VmaVector(size_t count, const T& value, const AllocatorT& allocator) + : VmaVector(count, allocator) {} + + VmaVector(const VmaVector& src) : + m_Allocator(src.m_Allocator), + m_pArray(src.m_Count ? (T*)VmaAllocateArray(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL), + m_Count(src.m_Count), + m_Capacity(src.m_Count) + { + if(m_Count != 0) + { + memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T)); + } + } + + ~VmaVector() + { + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + } + + VmaVector& operator=(const VmaVector& rhs) + { + if(&rhs != this) + { + resize(rhs.m_Count); + if(m_Count != 0) + { + memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T)); + } + } + return *this; + } + + bool empty() const { return m_Count == 0; } + size_t size() const { return m_Count; } + T* data() { return m_pArray; } + const T* data() const { return m_pArray; } + + T& operator[](size_t index) + { + VMA_HEAVY_ASSERT(index < m_Count); + return m_pArray[index]; + } + const T& operator[](size_t index) const + { + VMA_HEAVY_ASSERT(index < m_Count); + return m_pArray[index]; + } + + T& front() + { + VMA_HEAVY_ASSERT(m_Count > 0); + return m_pArray[0]; + } + const T& front() const + { + VMA_HEAVY_ASSERT(m_Count > 0); + return m_pArray[0]; + } + T& back() + { + VMA_HEAVY_ASSERT(m_Count > 0); + return m_pArray[m_Count - 1]; + } + const T& back() const + { + VMA_HEAVY_ASSERT(m_Count > 0); + return m_pArray[m_Count - 1]; + } + + void reserve(size_t newCapacity, bool freeMemory = false) + { + newCapacity = VMA_MAX(newCapacity, m_Count); + + if((newCapacity < m_Capacity) && !freeMemory) + { + newCapacity = m_Capacity; + } + + if(newCapacity != m_Capacity) + { + T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator, newCapacity) : VMA_NULL; + if(m_Count != 0) + { + memcpy(newArray, m_pArray, m_Count * sizeof(T)); + } + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + m_Capacity = newCapacity; + m_pArray = newArray; + } + } + + void resize(size_t newCount, bool freeMemory = false) + { + size_t newCapacity = m_Capacity; + if(newCount > m_Capacity) + { + newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8)); + } + else if(freeMemory) + { + newCapacity = newCount; + } + + if(newCapacity != m_Capacity) + { + T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL; + const size_t elementsToCopy = VMA_MIN(m_Count, newCount); + if(elementsToCopy != 0) + { + memcpy(newArray, m_pArray, elementsToCopy * sizeof(T)); + } + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + m_Capacity = newCapacity; + m_pArray = newArray; + } + + m_Count = newCount; + } + + void clear(bool freeMemory = false) + { + resize(0, freeMemory); + } + + void insert(size_t index, const T& src) + { + VMA_HEAVY_ASSERT(index <= m_Count); + const size_t oldCount = size(); + resize(oldCount + 1); + if(index < oldCount) + { + memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T)); + } + m_pArray[index] = src; + } + + void remove(size_t index) + { + VMA_HEAVY_ASSERT(index < m_Count); + const size_t oldCount = size(); + if(index < oldCount - 1) + { + memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T)); + } + resize(oldCount - 1); + } + + void push_back(const T& src) + { + const size_t newIndex = size(); + resize(newIndex + 1); + m_pArray[newIndex] = src; + } + + void pop_back() + { + VMA_HEAVY_ASSERT(m_Count > 0); + resize(size() - 1); + } + + void push_front(const T& src) + { + insert(0, src); + } + + void pop_front() + { + VMA_HEAVY_ASSERT(m_Count > 0); + remove(0); + } + + typedef T* iterator; + + iterator begin() { return m_pArray; } + iterator end() { return m_pArray + m_Count; } + +private: + AllocatorT m_Allocator; + T* m_pArray; + size_t m_Count; + size_t m_Capacity; +}; + +template +static void VmaVectorInsert(VmaVector& vec, size_t index, const T& item) +{ + vec.insert(index, item); +} + +template +static void VmaVectorRemove(VmaVector& vec, size_t index) +{ + vec.remove(index); +} + +#endif // #if VMA_USE_STL_VECTOR + +template +size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value) +{ + const size_t indexToInsert = VmaBinaryFindFirstNotLess( + vector.data(), + vector.data() + vector.size(), + value, + CmpLess()) - vector.data(); + VmaVectorInsert(vector, indexToInsert, value); + return indexToInsert; +} + +template +bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value) +{ + CmpLess comparator; + typename VectorT::iterator it = VmaBinaryFindFirstNotLess( + vector.begin(), + vector.end(), + value, + comparator); + if((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it)) + { + size_t indexToRemove = it - vector.begin(); + VmaVectorRemove(vector, indexToRemove); + return true; + } + return false; +} + +//////////////////////////////////////////////////////////////////////////////// +// class VmaSmallVector + +/* +This is a vector (a variable-sized array), optimized for the case when the array is small. + +It contains some number of elements in-place, which allows it to avoid heap allocation +when the actual number of elements is below that threshold. This allows normal "small" +cases to be fast without losing generality for large inputs. +*/ + +template +class VmaSmallVector +{ +public: + typedef T value_type; + + VmaSmallVector(const AllocatorT& allocator) : + m_Count(0), + m_DynamicArray(allocator) + { + } + VmaSmallVector(size_t count, const AllocatorT& allocator) : + m_Count(count), + m_DynamicArray(count > N ? count : 0, allocator) + { + } + template + VmaSmallVector(const VmaSmallVector& src) = delete; + template + VmaSmallVector& operator=(const VmaSmallVector& rhs) = delete; + + bool empty() const { return m_Count == 0; } + size_t size() const { return m_Count; } + T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } + const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } + + T& operator[](size_t index) + { + VMA_HEAVY_ASSERT(index < m_Count); + return data()[index]; + } + const T& operator[](size_t index) const + { + VMA_HEAVY_ASSERT(index < m_Count); + return data()[index]; + } + + T& front() + { + VMA_HEAVY_ASSERT(m_Count > 0); + return data()[0]; + } + const T& front() const + { + VMA_HEAVY_ASSERT(m_Count > 0); + return data()[0]; + } + T& back() + { + VMA_HEAVY_ASSERT(m_Count > 0); + return data()[m_Count - 1]; + } + const T& back() const + { + VMA_HEAVY_ASSERT(m_Count > 0); + return data()[m_Count - 1]; + } + + void resize(size_t newCount, bool freeMemory = false) + { + if(newCount > N && m_Count > N) + { + // Any direction, staying in m_DynamicArray + m_DynamicArray.resize(newCount, freeMemory); + } + else if(newCount > N && m_Count <= N) + { + // Growing, moving from m_StaticArray to m_DynamicArray + m_DynamicArray.resize(newCount, freeMemory); + if(m_Count > 0) + { + memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T)); + } + } + else if(newCount <= N && m_Count > N) + { + // Shrinking, moving from m_DynamicArray to m_StaticArray + if(newCount > 0) + { + memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T)); + } + m_DynamicArray.resize(0, freeMemory); + } + else + { + // Any direction, staying in m_StaticArray - nothing to do here + } + m_Count = newCount; + } + + void clear(bool freeMemory = false) + { + m_DynamicArray.clear(freeMemory); + m_Count = 0; + } + + void insert(size_t index, const T& src) + { + VMA_HEAVY_ASSERT(index <= m_Count); + const size_t oldCount = size(); + resize(oldCount + 1); + T* const dataPtr = data(); + if(index < oldCount) + { + // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray. + memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T)); + } + dataPtr[index] = src; + } + + void remove(size_t index) + { + VMA_HEAVY_ASSERT(index < m_Count); + const size_t oldCount = size(); + if(index < oldCount - 1) + { + // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray. + T* const dataPtr = data(); + memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T)); + } + resize(oldCount - 1); + } + + void push_back(const T& src) + { + const size_t newIndex = size(); + resize(newIndex + 1); + data()[newIndex] = src; + } + + void pop_back() + { + VMA_HEAVY_ASSERT(m_Count > 0); + resize(size() - 1); + } + + void push_front(const T& src) + { + insert(0, src); + } + + void pop_front() + { + VMA_HEAVY_ASSERT(m_Count > 0); + remove(0); + } + + typedef T* iterator; + + iterator begin() { return data(); } + iterator end() { return data() + m_Count; } + +private: + size_t m_Count; + T m_StaticArray[N]; // Used when m_Size <= N + VmaVector m_DynamicArray; // Used when m_Size > N +}; + +//////////////////////////////////////////////////////////////////////////////// +// class VmaPoolAllocator + +/* +Allocator for objects of type T using a list of arrays (pools) to speed up +allocation. Number of elements that can be allocated is not bounded because +allocator can create multiple blocks. +*/ +template +class VmaPoolAllocator +{ + VMA_CLASS_NO_COPY(VmaPoolAllocator) +public: + VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity); + ~VmaPoolAllocator(); + template T* Alloc(Types... args); + void Free(T* ptr); + +private: + union Item + { + uint32_t NextFreeIndex; + alignas(T) char Value[sizeof(T)]; + }; + + struct ItemBlock + { + Item* pItems; + uint32_t Capacity; + uint32_t FirstFreeIndex; + }; + + const VkAllocationCallbacks* m_pAllocationCallbacks; + const uint32_t m_FirstBlockCapacity; + VmaVector< ItemBlock, VmaStlAllocator > m_ItemBlocks; + + ItemBlock& CreateNewBlock(); +}; + +template +VmaPoolAllocator::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) : + m_pAllocationCallbacks(pAllocationCallbacks), + m_FirstBlockCapacity(firstBlockCapacity), + m_ItemBlocks(VmaStlAllocator(pAllocationCallbacks)) +{ + VMA_ASSERT(m_FirstBlockCapacity > 1); +} + +template +VmaPoolAllocator::~VmaPoolAllocator() +{ + for(size_t i = m_ItemBlocks.size(); i--; ) + vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity); + m_ItemBlocks.clear(); +} + +template +template T* VmaPoolAllocator::Alloc(Types... args) +{ + for(size_t i = m_ItemBlocks.size(); i--; ) + { + ItemBlock& block = m_ItemBlocks[i]; + // This block has some free items: Use first one. + if(block.FirstFreeIndex != UINT32_MAX) + { + Item* const pItem = &block.pItems[block.FirstFreeIndex]; + block.FirstFreeIndex = pItem->NextFreeIndex; + T* result = (T*)&pItem->Value; + new(result)T(std::forward(args)...); // Explicit constructor call. + return result; + } + } + + // No block has free item: Create new one and use it. + ItemBlock& newBlock = CreateNewBlock(); + Item* const pItem = &newBlock.pItems[0]; + newBlock.FirstFreeIndex = pItem->NextFreeIndex; + T* result = (T*)&pItem->Value; + new(result)T(std::forward(args)...); // Explicit constructor call. + return result; +} + +template +void VmaPoolAllocator::Free(T* ptr) +{ + // Search all memory blocks to find ptr. + for(size_t i = m_ItemBlocks.size(); i--; ) + { + ItemBlock& block = m_ItemBlocks[i]; + + // Casting to union. + Item* pItemPtr; + memcpy(&pItemPtr, &ptr, sizeof(pItemPtr)); + + // Check if pItemPtr is in address range of this block. + if((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity)) + { + ptr->~T(); // Explicit destructor call. + const uint32_t index = static_cast(pItemPtr - block.pItems); + pItemPtr->NextFreeIndex = block.FirstFreeIndex; + block.FirstFreeIndex = index; + return; + } + } + VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool."); +} + +template +typename VmaPoolAllocator::ItemBlock& VmaPoolAllocator::CreateNewBlock() +{ + const uint32_t newBlockCapacity = m_ItemBlocks.empty() ? + m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2; + + const ItemBlock newBlock = { + vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity), + newBlockCapacity, + 0 }; + + m_ItemBlocks.push_back(newBlock); + + // Setup singly-linked list of all free items in this block. + for(uint32_t i = 0; i < newBlockCapacity - 1; ++i) + newBlock.pItems[i].NextFreeIndex = i + 1; + newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX; + return m_ItemBlocks.back(); +} + +//////////////////////////////////////////////////////////////////////////////// +// class VmaRawList, VmaList + +#if VMA_USE_STL_LIST + +#define VmaList std::list + +#else // #if VMA_USE_STL_LIST + +template +struct VmaListItem +{ + VmaListItem* pPrev; + VmaListItem* pNext; + T Value; +}; + +// Doubly linked list. +template +class VmaRawList +{ + VMA_CLASS_NO_COPY(VmaRawList) +public: + typedef VmaListItem ItemType; + + VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks); + ~VmaRawList(); + void Clear(); + + size_t GetCount() const { return m_Count; } + bool IsEmpty() const { return m_Count == 0; } + + ItemType* Front() { return m_pFront; } + const ItemType* Front() const { return m_pFront; } + ItemType* Back() { return m_pBack; } + const ItemType* Back() const { return m_pBack; } + + ItemType* PushBack(); + ItemType* PushFront(); + ItemType* PushBack(const T& value); + ItemType* PushFront(const T& value); + void PopBack(); + void PopFront(); + + // Item can be null - it means PushBack. + ItemType* InsertBefore(ItemType* pItem); + // Item can be null - it means PushFront. + ItemType* InsertAfter(ItemType* pItem); + + ItemType* InsertBefore(ItemType* pItem, const T& value); + ItemType* InsertAfter(ItemType* pItem, const T& value); + + void Remove(ItemType* pItem); + +private: + const VkAllocationCallbacks* const m_pAllocationCallbacks; + VmaPoolAllocator m_ItemAllocator; + ItemType* m_pFront; + ItemType* m_pBack; + size_t m_Count; +}; + +template +VmaRawList::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) : + m_pAllocationCallbacks(pAllocationCallbacks), + m_ItemAllocator(pAllocationCallbacks, 128), + m_pFront(VMA_NULL), + m_pBack(VMA_NULL), + m_Count(0) +{ +} + +template +VmaRawList::~VmaRawList() +{ + // Intentionally not calling Clear, because that would be unnecessary + // computations to return all items to m_ItemAllocator as free. +} + +template +void VmaRawList::Clear() +{ + if(IsEmpty() == false) + { + ItemType* pItem = m_pBack; + while(pItem != VMA_NULL) + { + ItemType* const pPrevItem = pItem->pPrev; + m_ItemAllocator.Free(pItem); + pItem = pPrevItem; + } + m_pFront = VMA_NULL; + m_pBack = VMA_NULL; + m_Count = 0; + } +} + +template +VmaListItem* VmaRawList::PushBack() +{ + ItemType* const pNewItem = m_ItemAllocator.Alloc(); + pNewItem->pNext = VMA_NULL; + if(IsEmpty()) + { + pNewItem->pPrev = VMA_NULL; + m_pFront = pNewItem; + m_pBack = pNewItem; + m_Count = 1; + } + else + { + pNewItem->pPrev = m_pBack; + m_pBack->pNext = pNewItem; + m_pBack = pNewItem; + ++m_Count; + } + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushFront() +{ + ItemType* const pNewItem = m_ItemAllocator.Alloc(); + pNewItem->pPrev = VMA_NULL; + if(IsEmpty()) + { + pNewItem->pNext = VMA_NULL; + m_pFront = pNewItem; + m_pBack = pNewItem; + m_Count = 1; + } + else + { + pNewItem->pNext = m_pFront; + m_pFront->pPrev = pNewItem; + m_pFront = pNewItem; + ++m_Count; + } + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushBack(const T& value) +{ + ItemType* const pNewItem = PushBack(); + pNewItem->Value = value; + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushFront(const T& value) +{ + ItemType* const pNewItem = PushFront(); + pNewItem->Value = value; + return pNewItem; +} + +template +void VmaRawList::PopBack() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const pBackItem = m_pBack; + ItemType* const pPrevItem = pBackItem->pPrev; + if(pPrevItem != VMA_NULL) + { + pPrevItem->pNext = VMA_NULL; + } + m_pBack = pPrevItem; + m_ItemAllocator.Free(pBackItem); + --m_Count; +} + +template +void VmaRawList::PopFront() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const pFrontItem = m_pFront; + ItemType* const pNextItem = pFrontItem->pNext; + if(pNextItem != VMA_NULL) + { + pNextItem->pPrev = VMA_NULL; + } + m_pFront = pNextItem; + m_ItemAllocator.Free(pFrontItem); + --m_Count; +} + +template +void VmaRawList::Remove(ItemType* pItem) +{ + VMA_HEAVY_ASSERT(pItem != VMA_NULL); + VMA_HEAVY_ASSERT(m_Count > 0); + + if(pItem->pPrev != VMA_NULL) + { + pItem->pPrev->pNext = pItem->pNext; + } + else + { + VMA_HEAVY_ASSERT(m_pFront == pItem); + m_pFront = pItem->pNext; + } + + if(pItem->pNext != VMA_NULL) + { + pItem->pNext->pPrev = pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(m_pBack == pItem); + m_pBack = pItem->pPrev; + } + + m_ItemAllocator.Free(pItem); + --m_Count; +} + +template +VmaListItem* VmaRawList::InsertBefore(ItemType* pItem) +{ + if(pItem != VMA_NULL) + { + ItemType* const prevItem = pItem->pPrev; + ItemType* const newItem = m_ItemAllocator.Alloc(); + newItem->pPrev = prevItem; + newItem->pNext = pItem; + pItem->pPrev = newItem; + if(prevItem != VMA_NULL) + { + prevItem->pNext = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_pFront == pItem); + m_pFront = newItem; + } + ++m_Count; + return newItem; + } + else + return PushBack(); +} + +template +VmaListItem* VmaRawList::InsertAfter(ItemType* pItem) +{ + if(pItem != VMA_NULL) + { + ItemType* const nextItem = pItem->pNext; + ItemType* const newItem = m_ItemAllocator.Alloc(); + newItem->pNext = nextItem; + newItem->pPrev = pItem; + pItem->pNext = newItem; + if(nextItem != VMA_NULL) + { + nextItem->pPrev = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_pBack == pItem); + m_pBack = newItem; + } + ++m_Count; + return newItem; + } + else + return PushFront(); +} + +template +VmaListItem* VmaRawList::InsertBefore(ItemType* pItem, const T& value) +{ + ItemType* const newItem = InsertBefore(pItem); + newItem->Value = value; + return newItem; +} + +template +VmaListItem* VmaRawList::InsertAfter(ItemType* pItem, const T& value) +{ + ItemType* const newItem = InsertAfter(pItem); + newItem->Value = value; + return newItem; +} + +template +class VmaList +{ + VMA_CLASS_NO_COPY(VmaList) +public: + class iterator + { + public: + iterator() : + m_pList(VMA_NULL), + m_pItem(VMA_NULL) + { + } + + T& operator*() const + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + return m_pItem->Value; + } + T* operator->() const + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + return &m_pItem->Value; + } + + iterator& operator++() + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + m_pItem = m_pItem->pNext; + return *this; + } + iterator& operator--() + { + if(m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Back(); + } + return *this; + } + + iterator operator++(int) + { + iterator result = *this; + ++*this; + return result; + } + iterator operator--(int) + { + iterator result = *this; + --*this; + return result; + } + + bool operator==(const iterator& rhs) const + { + VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); + return m_pItem == rhs.m_pItem; + } + bool operator!=(const iterator& rhs) const + { + VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); + return m_pItem != rhs.m_pItem; + } + + private: + VmaRawList* m_pList; + VmaListItem* m_pItem; + + iterator(VmaRawList* pList, VmaListItem* pItem) : + m_pList(pList), + m_pItem(pItem) + { + } + + friend class VmaList; + }; + + class const_iterator + { + public: + const_iterator() : + m_pList(VMA_NULL), + m_pItem(VMA_NULL) + { + } + + const_iterator(const iterator& src) : + m_pList(src.m_pList), + m_pItem(src.m_pItem) + { + } + + const T& operator*() const + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + return m_pItem->Value; + } + const T* operator->() const + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + return &m_pItem->Value; + } + + const_iterator& operator++() + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + m_pItem = m_pItem->pNext; + return *this; + } + const_iterator& operator--() + { + if(m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Back(); + } + return *this; + } + + const_iterator operator++(int) + { + const_iterator result = *this; + ++*this; + return result; + } + const_iterator operator--(int) + { + const_iterator result = *this; + --*this; + return result; + } + + bool operator==(const const_iterator& rhs) const + { + VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); + return m_pItem == rhs.m_pItem; + } + bool operator!=(const const_iterator& rhs) const + { + VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); + return m_pItem != rhs.m_pItem; + } + + private: + const_iterator(const VmaRawList* pList, const VmaListItem* pItem) : + m_pList(pList), + m_pItem(pItem) + { + } + + const VmaRawList* m_pList; + const VmaListItem* m_pItem; + + friend class VmaList; + }; + + VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) { } + + bool empty() const { return m_RawList.IsEmpty(); } + size_t size() const { return m_RawList.GetCount(); } + + iterator begin() { return iterator(&m_RawList, m_RawList.Front()); } + iterator end() { return iterator(&m_RawList, VMA_NULL); } + + const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); } + const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); } + + void clear() { m_RawList.Clear(); } + void push_back(const T& value) { m_RawList.PushBack(value); } + void erase(iterator it) { m_RawList.Remove(it.m_pItem); } + iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); } + +private: + VmaRawList m_RawList; +}; + +#endif // #if VMA_USE_STL_LIST + +//////////////////////////////////////////////////////////////////////////////// +// class VmaMap + +// Unused in this version. +#if 0 + +#if VMA_USE_STL_UNORDERED_MAP + +#define VmaPair std::pair + +#define VMA_MAP_TYPE(KeyT, ValueT) \ + std::unordered_map< KeyT, ValueT, std::hash, std::equal_to, VmaStlAllocator< std::pair > > + +#else // #if VMA_USE_STL_UNORDERED_MAP + +template +struct VmaPair +{ + T1 first; + T2 second; + + VmaPair() : first(), second() { } + VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) { } +}; + +/* Class compatible with subset of interface of std::unordered_map. +KeyT, ValueT must be POD because they will be stored in VmaVector. +*/ +template +class VmaMap +{ +public: + typedef VmaPair PairType; + typedef PairType* iterator; + + VmaMap(const VmaStlAllocator& allocator) : m_Vector(allocator) { } + + iterator begin() { return m_Vector.begin(); } + iterator end() { return m_Vector.end(); } + + void insert(const PairType& pair); + iterator find(const KeyT& key); + void erase(iterator it); + +private: + VmaVector< PairType, VmaStlAllocator > m_Vector; +}; + +#define VMA_MAP_TYPE(KeyT, ValueT) VmaMap + +template +struct VmaPairFirstLess +{ + bool operator()(const VmaPair& lhs, const VmaPair& rhs) const + { + return lhs.first < rhs.first; + } + bool operator()(const VmaPair& lhs, const FirstT& rhsFirst) const + { + return lhs.first < rhsFirst; + } +}; + +template +void VmaMap::insert(const PairType& pair) +{ + const size_t indexToInsert = VmaBinaryFindFirstNotLess( + m_Vector.data(), + m_Vector.data() + m_Vector.size(), + pair, + VmaPairFirstLess()) - m_Vector.data(); + VmaVectorInsert(m_Vector, indexToInsert, pair); +} + +template +VmaPair* VmaMap::find(const KeyT& key) +{ + PairType* it = VmaBinaryFindFirstNotLess( + m_Vector.data(), + m_Vector.data() + m_Vector.size(), + key, + VmaPairFirstLess()); + if((it != m_Vector.end()) && (it->first == key)) + { + return it; + } + else + { + return m_Vector.end(); + } +} + +template +void VmaMap::erase(iterator it) +{ + VmaVectorRemove(m_Vector, it - m_Vector.begin()); +} + +#endif // #if VMA_USE_STL_UNORDERED_MAP + +#endif // #if 0 + +//////////////////////////////////////////////////////////////////////////////// + +class VmaDeviceMemoryBlock; + +enum VMA_CACHE_OPERATION { VMA_CACHE_FLUSH, VMA_CACHE_INVALIDATE }; + +struct VmaAllocation_T +{ +private: + static const uint8_t MAP_COUNT_FLAG_PERSISTENT_MAP = 0x80; + + enum FLAGS + { + FLAG_USER_DATA_STRING = 0x01, + }; + +public: + enum ALLOCATION_TYPE + { + ALLOCATION_TYPE_NONE, + ALLOCATION_TYPE_BLOCK, + ALLOCATION_TYPE_DEDICATED, + }; + + /* + This struct is allocated using VmaPoolAllocator. + */ + + VmaAllocation_T(uint32_t currentFrameIndex, bool userDataString) : + m_Alignment{1}, + m_Size{0}, + m_pUserData{VMA_NULL}, + m_LastUseFrameIndex{currentFrameIndex}, + m_MemoryTypeIndex{0}, + m_Type{(uint8_t)ALLOCATION_TYPE_NONE}, + m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN}, + m_MapCount{0}, + m_Flags{userDataString ? (uint8_t)FLAG_USER_DATA_STRING : (uint8_t)0} + { +#if VMA_STATS_STRING_ENABLED + m_CreationFrameIndex = currentFrameIndex; + m_BufferImageUsage = 0; +#endif + } + + ~VmaAllocation_T() + { + VMA_ASSERT((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) == 0 && "Allocation was not unmapped before destruction."); + + // Check if owned string was freed. + VMA_ASSERT(m_pUserData == VMA_NULL); + } + + void InitBlockAllocation( + VmaDeviceMemoryBlock* block, + VkDeviceSize offset, + VkDeviceSize alignment, + VkDeviceSize size, + uint32_t memoryTypeIndex, + VmaSuballocationType suballocationType, + bool mapped, + bool canBecomeLost) + { + VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); + VMA_ASSERT(block != VMA_NULL); + m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK; + m_Alignment = alignment; + m_Size = size; + m_MemoryTypeIndex = memoryTypeIndex; + m_MapCount = mapped ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0; + m_SuballocationType = (uint8_t)suballocationType; + m_BlockAllocation.m_Block = block; + m_BlockAllocation.m_Offset = offset; + m_BlockAllocation.m_CanBecomeLost = canBecomeLost; + } + + void InitLost() + { + VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); + VMA_ASSERT(m_LastUseFrameIndex.load() == VMA_FRAME_INDEX_LOST); + m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK; + m_MemoryTypeIndex = 0; + m_BlockAllocation.m_Block = VMA_NULL; + m_BlockAllocation.m_Offset = 0; + m_BlockAllocation.m_CanBecomeLost = true; + } + + void ChangeBlockAllocation( + VmaAllocator hAllocator, + VmaDeviceMemoryBlock* block, + VkDeviceSize offset); + + void ChangeOffset(VkDeviceSize newOffset); + + // pMappedData not null means allocation is created with MAPPED flag. + void InitDedicatedAllocation( + uint32_t memoryTypeIndex, + VkDeviceMemory hMemory, + VmaSuballocationType suballocationType, + void* pMappedData, + VkDeviceSize size) + { + VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); + VMA_ASSERT(hMemory != VK_NULL_HANDLE); + m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED; + m_Alignment = 0; + m_Size = size; + m_MemoryTypeIndex = memoryTypeIndex; + m_SuballocationType = (uint8_t)suballocationType; + m_MapCount = (pMappedData != VMA_NULL) ? MAP_COUNT_FLAG_PERSISTENT_MAP : 0; + m_DedicatedAllocation.m_hMemory = hMemory; + m_DedicatedAllocation.m_pMappedData = pMappedData; + } + + ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; } + VkDeviceSize GetAlignment() const { return m_Alignment; } + VkDeviceSize GetSize() const { return m_Size; } + bool IsUserDataString() const { return (m_Flags & FLAG_USER_DATA_STRING) != 0; } + void* GetUserData() const { return m_pUserData; } + void SetUserData(VmaAllocator hAllocator, void* pUserData); + VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; } + + VmaDeviceMemoryBlock* GetBlock() const + { + VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); + return m_BlockAllocation.m_Block; + } + VkDeviceSize GetOffset() const; + VkDeviceMemory GetMemory() const; + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + bool IsPersistentMap() const { return (m_MapCount & MAP_COUNT_FLAG_PERSISTENT_MAP) != 0; } + void* GetMappedData() const; + bool CanBecomeLost() const; + + uint32_t GetLastUseFrameIndex() const + { + return m_LastUseFrameIndex.load(); + } + bool CompareExchangeLastUseFrameIndex(uint32_t& expected, uint32_t desired) + { + return m_LastUseFrameIndex.compare_exchange_weak(expected, desired); + } + /* + - If hAllocation.LastUseFrameIndex + frameInUseCount < allocator.CurrentFrameIndex, + makes it lost by setting LastUseFrameIndex = VMA_FRAME_INDEX_LOST and returns true. + - Else, returns false. + + If hAllocation is already lost, assert - you should not call it then. + If hAllocation was not created with CAN_BECOME_LOST_BIT, assert. + */ + bool MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount); + + void DedicatedAllocCalcStatsInfo(VmaStatInfo& outInfo) + { + VMA_ASSERT(m_Type == ALLOCATION_TYPE_DEDICATED); + outInfo.blockCount = 1; + outInfo.allocationCount = 1; + outInfo.unusedRangeCount = 0; + outInfo.usedBytes = m_Size; + outInfo.unusedBytes = 0; + outInfo.allocationSizeMin = outInfo.allocationSizeMax = m_Size; + outInfo.unusedRangeSizeMin = UINT64_MAX; + outInfo.unusedRangeSizeMax = 0; + } + + void BlockAllocMap(); + void BlockAllocUnmap(); + VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData); + void DedicatedAllocUnmap(VmaAllocator hAllocator); + +#if VMA_STATS_STRING_ENABLED + uint32_t GetCreationFrameIndex() const { return m_CreationFrameIndex; } + uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; } + + void InitBufferImageUsage(uint32_t bufferImageUsage) + { + VMA_ASSERT(m_BufferImageUsage == 0); + m_BufferImageUsage = bufferImageUsage; + } + + void PrintParameters(class VmaJsonWriter& json) const; +#endif + +private: + VkDeviceSize m_Alignment; + VkDeviceSize m_Size; + void* m_pUserData; + VMA_ATOMIC_UINT32 m_LastUseFrameIndex; + uint32_t m_MemoryTypeIndex; + uint8_t m_Type; // ALLOCATION_TYPE + uint8_t m_SuballocationType; // VmaSuballocationType + // Bit 0x80 is set when allocation was created with VMA_ALLOCATION_CREATE_MAPPED_BIT. + // Bits with mask 0x7F are reference counter for vmaMapMemory()/vmaUnmapMemory(). + uint8_t m_MapCount; + uint8_t m_Flags; // enum FLAGS + + // Allocation out of VmaDeviceMemoryBlock. + struct BlockAllocation + { + VmaDeviceMemoryBlock* m_Block; + VkDeviceSize m_Offset; + bool m_CanBecomeLost; + }; + + // Allocation for an object that has its own private VkDeviceMemory. + struct DedicatedAllocation + { + VkDeviceMemory m_hMemory; + void* m_pMappedData; // Not null means memory is mapped. + }; + + union + { + // Allocation out of VmaDeviceMemoryBlock. + BlockAllocation m_BlockAllocation; + // Allocation for an object that has its own private VkDeviceMemory. + DedicatedAllocation m_DedicatedAllocation; + }; + +#if VMA_STATS_STRING_ENABLED + uint32_t m_CreationFrameIndex; + uint32_t m_BufferImageUsage; // 0 if unknown. +#endif + + void FreeUserDataString(VmaAllocator hAllocator); +}; + +/* +Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as +allocated memory block or free. +*/ +struct VmaSuballocation +{ + VkDeviceSize offset; + VkDeviceSize size; + VmaAllocation hAllocation; + VmaSuballocationType type; +}; + +// Comparator for offsets. +struct VmaSuballocationOffsetLess +{ + bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const + { + return lhs.offset < rhs.offset; + } +}; +struct VmaSuballocationOffsetGreater +{ + bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const + { + return lhs.offset > rhs.offset; + } +}; + +typedef VmaList< VmaSuballocation, VmaStlAllocator > VmaSuballocationList; + +// Cost of one additional allocation lost, as equivalent in bytes. +static const VkDeviceSize VMA_LOST_ALLOCATION_COST = 1048576; + +enum class VmaAllocationRequestType +{ + Normal, + // Used by "Linear" algorithm. + UpperAddress, + EndOf1st, + EndOf2nd, +}; + +/* +Parameters of planned allocation inside a VmaDeviceMemoryBlock. + +If canMakeOtherLost was false: +- item points to a FREE suballocation. +- itemsToMakeLostCount is 0. + +If canMakeOtherLost was true: +- item points to first of sequence of suballocations, which are either FREE, + or point to VmaAllocations that can become lost. +- itemsToMakeLostCount is the number of VmaAllocations that need to be made lost for + the requested allocation to succeed. +*/ +struct VmaAllocationRequest +{ + VkDeviceSize offset; + VkDeviceSize sumFreeSize; // Sum size of free items that overlap with proposed allocation. + VkDeviceSize sumItemSize; // Sum size of items to make lost that overlap with proposed allocation. + VmaSuballocationList::iterator item; + size_t itemsToMakeLostCount; + void* customData; + VmaAllocationRequestType type; + + VkDeviceSize CalcCost() const + { + return sumItemSize + itemsToMakeLostCount * VMA_LOST_ALLOCATION_COST; + } +}; + +/* +Data structure used for bookkeeping of allocations and unused ranges of memory +in a single VkDeviceMemory block. +*/ +class VmaBlockMetadata +{ +public: + VmaBlockMetadata(VmaAllocator hAllocator); + virtual ~VmaBlockMetadata() { } + virtual void Init(VkDeviceSize size) { m_Size = size; } + + // Validates all data structures inside this object. If not valid, returns false. + virtual bool Validate() const = 0; + VkDeviceSize GetSize() const { return m_Size; } + virtual size_t GetAllocationCount() const = 0; + virtual VkDeviceSize GetSumFreeSize() const = 0; + virtual VkDeviceSize GetUnusedRangeSizeMax() const = 0; + // Returns true if this block is empty - contains only single free suballocation. + virtual bool IsEmpty() const = 0; + + virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const = 0; + // Shouldn't modify blockCount. + virtual void AddPoolStats(VmaPoolStats& inoutStats) const = 0; + +#if VMA_STATS_STRING_ENABLED + virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0; +#endif + + // Tries to find a place for suballocation with given parameters inside this block. + // If succeeded, fills pAllocationRequest and returns true. + // If failed, returns false. + virtual bool CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags. + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) = 0; + + virtual bool MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest) = 0; + + virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) = 0; + + virtual VkResult CheckCorruption(const void* pBlockData) = 0; + + // Makes actual allocation based on request. Request must already be checked and valid. + virtual void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation) = 0; + + // Frees suballocation assigned to given memory region. + virtual void Free(const VmaAllocation allocation) = 0; + virtual void FreeAtOffset(VkDeviceSize offset) = 0; + +protected: + const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; } + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap_Begin(class VmaJsonWriter& json, + VkDeviceSize unusedBytes, + size_t allocationCount, + size_t unusedRangeCount) const; + void PrintDetailedMap_Allocation(class VmaJsonWriter& json, + VkDeviceSize offset, + VmaAllocation hAllocation) const; + void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, + VkDeviceSize offset, + VkDeviceSize size) const; + void PrintDetailedMap_End(class VmaJsonWriter& json) const; +#endif + +private: + VkDeviceSize m_Size; + const VkAllocationCallbacks* m_pAllocationCallbacks; +}; + +#define VMA_VALIDATE(cond) do { if(!(cond)) { \ + VMA_ASSERT(0 && "Validation failed: " #cond); \ + return false; \ + } } while(false) + +class VmaBlockMetadata_Generic : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic) +public: + VmaBlockMetadata_Generic(VmaAllocator hAllocator); + virtual ~VmaBlockMetadata_Generic(); + virtual void Init(VkDeviceSize size); + + virtual bool Validate() const; + virtual size_t GetAllocationCount() const { return m_Suballocations.size() - m_FreeCount; } + virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; } + virtual VkDeviceSize GetUnusedRangeSizeMax() const; + virtual bool IsEmpty() const; + + virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const; + virtual void AddPoolStats(VmaPoolStats& inoutStats) const; + +#if VMA_STATS_STRING_ENABLED + virtual void PrintDetailedMap(class VmaJsonWriter& json) const; +#endif + + virtual bool CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); + + virtual bool MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest); + + virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount); + + virtual VkResult CheckCorruption(const void* pBlockData); + + virtual void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation); + + virtual void Free(const VmaAllocation allocation); + virtual void FreeAtOffset(VkDeviceSize offset); + + //////////////////////////////////////////////////////////////////////////////// + // For defragmentation + + bool IsBufferImageGranularityConflictPossible( + VkDeviceSize bufferImageGranularity, + VmaSuballocationType& inOutPrevSuballocType) const; + +private: + friend class VmaDefragmentationAlgorithm_Generic; + friend class VmaDefragmentationAlgorithm_Fast; + + uint32_t m_FreeCount; + VkDeviceSize m_SumFreeSize; + VmaSuballocationList m_Suballocations; + // Suballocations that are free and have size greater than certain threshold. + // Sorted by size, ascending. + VmaVector< VmaSuballocationList::iterator, VmaStlAllocator< VmaSuballocationList::iterator > > m_FreeSuballocationsBySize; + + bool ValidateFreeSuballocationList() const; + + // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem. + // If yes, fills pOffset and returns true. If no, returns false. + bool CheckAllocation( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaSuballocationList::const_iterator suballocItem, + bool canMakeOtherLost, + VkDeviceSize* pOffset, + size_t* itemsToMakeLostCount, + VkDeviceSize* pSumFreeSize, + VkDeviceSize* pSumItemSize) const; + // Given free suballocation, it merges it with following one, which must also be free. + void MergeFreeWithNext(VmaSuballocationList::iterator item); + // Releases given suballocation, making it free. + // Merges it with adjacent free suballocations if applicable. + // Returns iterator to new free suballocation at this place. + VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem); + // Given free suballocation, it inserts it into sorted list of + // m_FreeSuballocationsBySize if it's suitable. + void RegisterFreeSuballocation(VmaSuballocationList::iterator item); + // Given free suballocation, it removes it from sorted list of + // m_FreeSuballocationsBySize if it's suitable. + void UnregisterFreeSuballocation(VmaSuballocationList::iterator item); +}; + +/* +Allocations and their references in internal data structure look like this: + +if(m_2ndVectorMode == SECOND_VECTOR_EMPTY): + + 0 +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | + | | + | | +GetSize() +-------+ + +if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER): + + 0 +-------+ + | Alloc | 2nd[0] + +-------+ + | Alloc | 2nd[1] + +-------+ + | ... | + +-------+ + | Alloc | 2nd[2nd.size() - 1] + +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | +GetSize() +-------+ + +if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK): + + 0 +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | + | | + | | + +-------+ + | Alloc | 2nd[2nd.size() - 1] + +-------+ + | ... | + +-------+ + | Alloc | 2nd[1] + +-------+ + | Alloc | 2nd[0] +GetSize() +-------+ + +*/ +class VmaBlockMetadata_Linear : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear) +public: + VmaBlockMetadata_Linear(VmaAllocator hAllocator); + virtual ~VmaBlockMetadata_Linear(); + virtual void Init(VkDeviceSize size); + + virtual bool Validate() const; + virtual size_t GetAllocationCount() const; + virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize; } + virtual VkDeviceSize GetUnusedRangeSizeMax() const; + virtual bool IsEmpty() const { return GetAllocationCount() == 0; } + + virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const; + virtual void AddPoolStats(VmaPoolStats& inoutStats) const; + +#if VMA_STATS_STRING_ENABLED + virtual void PrintDetailedMap(class VmaJsonWriter& json) const; +#endif + + virtual bool CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); + + virtual bool MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest); + + virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount); + + virtual VkResult CheckCorruption(const void* pBlockData); + + virtual void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation); + + virtual void Free(const VmaAllocation allocation); + virtual void FreeAtOffset(VkDeviceSize offset); + +private: + /* + There are two suballocation vectors, used in ping-pong way. + The one with index m_1stVectorIndex is called 1st. + The one with index (m_1stVectorIndex ^ 1) is called 2nd. + 2nd can be non-empty only when 1st is not empty. + When 2nd is not empty, m_2ndVectorMode indicates its mode of operation. + */ + typedef VmaVector< VmaSuballocation, VmaStlAllocator > SuballocationVectorType; + + enum SECOND_VECTOR_MODE + { + SECOND_VECTOR_EMPTY, + /* + Suballocations in 2nd vector are created later than the ones in 1st, but they + all have smaller offset. + */ + SECOND_VECTOR_RING_BUFFER, + /* + Suballocations in 2nd vector are upper side of double stack. + They all have offsets higher than those in 1st vector. + Top of this stack means smaller offsets, but higher indices in this vector. + */ + SECOND_VECTOR_DOUBLE_STACK, + }; + + VkDeviceSize m_SumFreeSize; + SuballocationVectorType m_Suballocations0, m_Suballocations1; + uint32_t m_1stVectorIndex; + SECOND_VECTOR_MODE m_2ndVectorMode; + + SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } + SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } + const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } + const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } + + // Number of items in 1st vector with hAllocation = null at the beginning. + size_t m_1stNullItemsBeginCount; + // Number of other items in 1st vector with hAllocation = null somewhere in the middle. + size_t m_1stNullItemsMiddleCount; + // Number of items in 2nd vector with hAllocation = null. + size_t m_2ndNullItemsCount; + + bool ShouldCompact1st() const; + void CleanupAfterFree(); + + bool CreateAllocationRequest_LowerAddress( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); + bool CreateAllocationRequest_UpperAddress( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); +}; + +/* +- GetSize() is the original size of allocated memory block. +- m_UsableSize is this size aligned down to a power of two. + All allocations and calculations happen relative to m_UsableSize. +- GetUnusableSize() is the difference between them. + It is repoted as separate, unused range, not available for allocations. + +Node at level 0 has size = m_UsableSize. +Each next level contains nodes with size 2 times smaller than current level. +m_LevelCount is the maximum number of levels to use in the current object. +*/ +class VmaBlockMetadata_Buddy : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy) +public: + VmaBlockMetadata_Buddy(VmaAllocator hAllocator); + virtual ~VmaBlockMetadata_Buddy(); + virtual void Init(VkDeviceSize size); + + virtual bool Validate() const; + virtual size_t GetAllocationCount() const { return m_AllocationCount; } + virtual VkDeviceSize GetSumFreeSize() const { return m_SumFreeSize + GetUnusableSize(); } + virtual VkDeviceSize GetUnusedRangeSizeMax() const; + virtual bool IsEmpty() const { return m_Root->type == Node::TYPE_FREE; } + + virtual void CalcAllocationStatInfo(VmaStatInfo& outInfo) const; + virtual void AddPoolStats(VmaPoolStats& inoutStats) const; + +#if VMA_STATS_STRING_ENABLED + virtual void PrintDetailedMap(class VmaJsonWriter& json) const; +#endif + + virtual bool CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); + + virtual bool MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest); + + virtual uint32_t MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount); + + virtual VkResult CheckCorruption(const void* pBlockData) { return VK_ERROR_FEATURE_NOT_PRESENT; } + + virtual void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation); + + virtual void Free(const VmaAllocation allocation) { FreeAtOffset(allocation, allocation->GetOffset()); } + virtual void FreeAtOffset(VkDeviceSize offset) { FreeAtOffset(VMA_NULL, offset); } + +private: + static const VkDeviceSize MIN_NODE_SIZE = 32; + static const size_t MAX_LEVELS = 30; + + struct ValidationContext + { + size_t calculatedAllocationCount; + size_t calculatedFreeCount; + VkDeviceSize calculatedSumFreeSize; + + ValidationContext() : + calculatedAllocationCount(0), + calculatedFreeCount(0), + calculatedSumFreeSize(0) { } + }; + + struct Node + { + VkDeviceSize offset; + enum TYPE + { + TYPE_FREE, + TYPE_ALLOCATION, + TYPE_SPLIT, + TYPE_COUNT + } type; + Node* parent; + Node* buddy; + + union + { + struct + { + Node* prev; + Node* next; + } free; + struct + { + VmaAllocation alloc; + } allocation; + struct + { + Node* leftChild; + } split; + }; + }; + + // Size of the memory block aligned down to a power of two. + VkDeviceSize m_UsableSize; + uint32_t m_LevelCount; + + Node* m_Root; + struct { + Node* front; + Node* back; + } m_FreeList[MAX_LEVELS]; + // Number of nodes in the tree with type == TYPE_ALLOCATION. + size_t m_AllocationCount; + // Number of nodes in the tree with type == TYPE_FREE. + size_t m_FreeCount; + // This includes space wasted due to internal fragmentation. Doesn't include unusable size. + VkDeviceSize m_SumFreeSize; + + VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; } + void DeleteNode(Node* node); + bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const; + uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const; + inline VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; } + // Alloc passed just for validation. Can be null. + void FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset); + void CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const; + // Adds node to the front of FreeList at given level. + // node->type must be FREE. + // node->free.prev, next can be undefined. + void AddToFreeListFront(uint32_t level, Node* node); + // Removes node from FreeList at given level. + // node->type must be FREE. + // node->free.prev, next stay untouched. + void RemoveFromFreeList(uint32_t level, Node* node); + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const; +#endif +}; + +/* +Represents a single block of device memory (`VkDeviceMemory`) with all the +data about its regions (aka suballocations, #VmaAllocation), assigned and free. + +Thread-safety: This class must be externally synchronized. +*/ +class VmaDeviceMemoryBlock +{ + VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock) +public: + VmaBlockMetadata* m_pMetadata; + + VmaDeviceMemoryBlock(VmaAllocator hAllocator); + + ~VmaDeviceMemoryBlock() + { + VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped."); + VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); + } + + // Always call after construction. + void Init( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t newMemoryTypeIndex, + VkDeviceMemory newMemory, + VkDeviceSize newSize, + uint32_t id, + uint32_t algorithm); + // Always call before destruction. + void Destroy(VmaAllocator allocator); + + VmaPool GetParentPool() const { return m_hParentPool; } + VkDeviceMemory GetDeviceMemory() const { return m_hMemory; } + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + uint32_t GetId() const { return m_Id; } + void* GetMappedData() const { return m_pMappedData; } + + // Validates all data structures inside this object. If not valid, returns false. + bool Validate() const; + + VkResult CheckCorruption(VmaAllocator hAllocator); + + // ppData can be null. + VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData); + void Unmap(VmaAllocator hAllocator, uint32_t count); + + VkResult WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); + VkResult ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); + + VkResult BindBufferMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext); + VkResult BindImageMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext); + +private: + VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. + uint32_t m_MemoryTypeIndex; + uint32_t m_Id; + VkDeviceMemory m_hMemory; + + /* + Protects access to m_hMemory so it's not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory. + Also protects m_MapCount, m_pMappedData. + Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex. + */ + VMA_MUTEX m_Mutex; + uint32_t m_MapCount; + void* m_pMappedData; +}; + +struct VmaPointerLess +{ + bool operator()(const void* lhs, const void* rhs) const + { + return lhs < rhs; + } +}; + +struct VmaDefragmentationMove +{ + size_t srcBlockIndex; + size_t dstBlockIndex; + VkDeviceSize srcOffset; + VkDeviceSize dstOffset; + VkDeviceSize size; + VmaAllocation hAllocation; + VmaDeviceMemoryBlock* pSrcBlock; + VmaDeviceMemoryBlock* pDstBlock; +}; + +class VmaDefragmentationAlgorithm; + +/* +Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific +Vulkan memory type. + +Synchronized internally with a mutex. +*/ +struct VmaBlockVector +{ + VMA_CLASS_NO_COPY(VmaBlockVector) +public: + VmaBlockVector( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceSize preferredBlockSize, + size_t minBlockCount, + size_t maxBlockCount, + VkDeviceSize bufferImageGranularity, + uint32_t frameInUseCount, + bool explicitBlockSize, + uint32_t algorithm, + float priority); + ~VmaBlockVector(); + + VkResult CreateMinBlocks(); + + VmaAllocator GetAllocator() const { return m_hAllocator; } + VmaPool GetParentPool() const { return m_hParentPool; } + bool IsCustomPool() const { return m_hParentPool != VMA_NULL; } + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; } + VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } + uint32_t GetFrameInUseCount() const { return m_FrameInUseCount; } + uint32_t GetAlgorithm() const { return m_Algorithm; } + + void GetPoolStats(VmaPoolStats* pStats); + + bool IsEmpty(); + bool IsCorruptionDetectionEnabled() const; + + VkResult Allocate( + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); + + void Free(const VmaAllocation hAllocation); + + // Adds statistics of this BlockVector to pStats. + void AddStats(VmaStats* pStats); + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json); +#endif + + void MakePoolAllocationsLost( + uint32_t currentFrameIndex, + size_t* pLostAllocationCount); + VkResult CheckCorruption(); + + // Saves results in pCtx->res. + void Defragment( + class VmaBlockVectorDefragmentationContext* pCtx, + VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags, + VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove, + VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove, + VkCommandBuffer commandBuffer); + void DefragmentationEnd( + class VmaBlockVectorDefragmentationContext* pCtx, + uint32_t flags, + VmaDefragmentationStats* pStats); + + uint32_t ProcessDefragmentations( + class VmaBlockVectorDefragmentationContext *pCtx, + VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves); + + void CommitDefragmentations( + class VmaBlockVectorDefragmentationContext *pCtx, + VmaDefragmentationStats* pStats); + + //////////////////////////////////////////////////////////////////////////////// + // To be used only while the m_Mutex is locked. Used during defragmentation. + + size_t GetBlockCount() const { return m_Blocks.size(); } + VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; } + size_t CalcAllocationCount() const; + bool IsBufferImageGranularityConflictPossible() const; + +private: + friend class VmaDefragmentationAlgorithm_Generic; + + const VmaAllocator m_hAllocator; + const VmaPool m_hParentPool; + const uint32_t m_MemoryTypeIndex; + const VkDeviceSize m_PreferredBlockSize; + const size_t m_MinBlockCount; + const size_t m_MaxBlockCount; + const VkDeviceSize m_BufferImageGranularity; + const uint32_t m_FrameInUseCount; + const bool m_ExplicitBlockSize; + const uint32_t m_Algorithm; + const float m_Priority; + VMA_RW_MUTEX m_Mutex; + + /* There can be at most one allocation that is completely empty (except when minBlockCount > 0) - + a hysteresis to avoid pessimistic case of alternating creation and destruction of a VkDeviceMemory. */ + bool m_HasEmptyBlock; + // Incrementally sorted by sumFreeSize, ascending. + VmaVector< VmaDeviceMemoryBlock*, VmaStlAllocator > m_Blocks; + uint32_t m_NextBlockId; + + VkDeviceSize CalcMaxBlockSize() const; + + // Finds and removes given block from vector. + void Remove(VmaDeviceMemoryBlock* pBlock); + + // Performs single step in sorting m_Blocks. They may not be fully sorted + // after this call. + void IncrementallySortBlocks(); + + VkResult AllocatePage( + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation); + + // To be used only without CAN_MAKE_OTHER_LOST flag. + VkResult AllocateFromBlock( + VmaDeviceMemoryBlock* pBlock, + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + uint32_t strategy, + VmaAllocation* pAllocation); + + VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex); + + // Saves result to pCtx->res. + void ApplyDefragmentationMovesCpu( + class VmaBlockVectorDefragmentationContext* pDefragCtx, + const VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves); + // Saves result to pCtx->res. + void ApplyDefragmentationMovesGpu( + class VmaBlockVectorDefragmentationContext* pDefragCtx, + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkCommandBuffer commandBuffer); + + /* + Used during defragmentation. pDefragmentationStats is optional. It's in/out + - updated with new data. + */ + void FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats); + + void UpdateHasEmptyBlock(); +}; + +struct VmaPool_T +{ + VMA_CLASS_NO_COPY(VmaPool_T) +public: + VmaBlockVector m_BlockVector; + + VmaPool_T( + VmaAllocator hAllocator, + const VmaPoolCreateInfo& createInfo, + VkDeviceSize preferredBlockSize); + ~VmaPool_T(); + + uint32_t GetId() const { return m_Id; } + void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; } + + const char* GetName() const { return m_Name; } + void SetName(const char* pName); + +#if VMA_STATS_STRING_ENABLED + //void PrintDetailedMap(class VmaStringBuilder& sb); +#endif + +private: + uint32_t m_Id; + char* m_Name; +}; + +/* +Performs defragmentation: + +- Updates `pBlockVector->m_pMetadata`. +- Updates allocations by calling ChangeBlockAllocation() or ChangeOffset(). +- Does not move actual data, only returns requested moves as `moves`. +*/ +class VmaDefragmentationAlgorithm +{ + VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm) +public: + VmaDefragmentationAlgorithm( + VmaAllocator hAllocator, + VmaBlockVector* pBlockVector, + uint32_t currentFrameIndex) : + m_hAllocator(hAllocator), + m_pBlockVector(pBlockVector), + m_CurrentFrameIndex(currentFrameIndex) + { + } + virtual ~VmaDefragmentationAlgorithm() + { + } + + virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) = 0; + virtual void AddAll() = 0; + + virtual VkResult Defragment( + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + VmaDefragmentationFlags flags) = 0; + + virtual VkDeviceSize GetBytesMoved() const = 0; + virtual uint32_t GetAllocationsMoved() const = 0; + +protected: + VmaAllocator const m_hAllocator; + VmaBlockVector* const m_pBlockVector; + const uint32_t m_CurrentFrameIndex; + + struct AllocationInfo + { + VmaAllocation m_hAllocation; + VkBool32* m_pChanged; + + AllocationInfo() : + m_hAllocation(VK_NULL_HANDLE), + m_pChanged(VMA_NULL) + { + } + AllocationInfo(VmaAllocation hAlloc, VkBool32* pChanged) : + m_hAllocation(hAlloc), + m_pChanged(pChanged) + { + } + }; +}; + +class VmaDefragmentationAlgorithm_Generic : public VmaDefragmentationAlgorithm +{ + VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Generic) +public: + VmaDefragmentationAlgorithm_Generic( + VmaAllocator hAllocator, + VmaBlockVector* pBlockVector, + uint32_t currentFrameIndex, + bool overlappingMoveSupported); + virtual ~VmaDefragmentationAlgorithm_Generic(); + + virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged); + virtual void AddAll() { m_AllAllocations = true; } + + virtual VkResult Defragment( + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + VmaDefragmentationFlags flags); + + virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; } + virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; } + +private: + uint32_t m_AllocationCount; + bool m_AllAllocations; + + VkDeviceSize m_BytesMoved; + uint32_t m_AllocationsMoved; + + struct AllocationInfoSizeGreater + { + bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const + { + return lhs.m_hAllocation->GetSize() > rhs.m_hAllocation->GetSize(); + } + }; + + struct AllocationInfoOffsetGreater + { + bool operator()(const AllocationInfo& lhs, const AllocationInfo& rhs) const + { + return lhs.m_hAllocation->GetOffset() > rhs.m_hAllocation->GetOffset(); + } + }; + + struct BlockInfo + { + size_t m_OriginalBlockIndex; + VmaDeviceMemoryBlock* m_pBlock; + bool m_HasNonMovableAllocations; + VmaVector< AllocationInfo, VmaStlAllocator > m_Allocations; + + BlockInfo(const VkAllocationCallbacks* pAllocationCallbacks) : + m_OriginalBlockIndex(SIZE_MAX), + m_pBlock(VMA_NULL), + m_HasNonMovableAllocations(true), + m_Allocations(pAllocationCallbacks) + { + } + + void CalcHasNonMovableAllocations() + { + const size_t blockAllocCount = m_pBlock->m_pMetadata->GetAllocationCount(); + const size_t defragmentAllocCount = m_Allocations.size(); + m_HasNonMovableAllocations = blockAllocCount != defragmentAllocCount; + } + + void SortAllocationsBySizeDescending() + { + VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoSizeGreater()); + } + + void SortAllocationsByOffsetDescending() + { + VMA_SORT(m_Allocations.begin(), m_Allocations.end(), AllocationInfoOffsetGreater()); + } + }; + + struct BlockPointerLess + { + bool operator()(const BlockInfo* pLhsBlockInfo, const VmaDeviceMemoryBlock* pRhsBlock) const + { + return pLhsBlockInfo->m_pBlock < pRhsBlock; + } + bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const + { + return pLhsBlockInfo->m_pBlock < pRhsBlockInfo->m_pBlock; + } + }; + + // 1. Blocks with some non-movable allocations go first. + // 2. Blocks with smaller sumFreeSize go first. + struct BlockInfoCompareMoveDestination + { + bool operator()(const BlockInfo* pLhsBlockInfo, const BlockInfo* pRhsBlockInfo) const + { + if(pLhsBlockInfo->m_HasNonMovableAllocations && !pRhsBlockInfo->m_HasNonMovableAllocations) + { + return true; + } + if(!pLhsBlockInfo->m_HasNonMovableAllocations && pRhsBlockInfo->m_HasNonMovableAllocations) + { + return false; + } + if(pLhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize() < pRhsBlockInfo->m_pBlock->m_pMetadata->GetSumFreeSize()) + { + return true; + } + return false; + } + }; + + typedef VmaVector< BlockInfo*, VmaStlAllocator > BlockInfoVector; + BlockInfoVector m_Blocks; + + VkResult DefragmentRound( + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + bool freeOldAllocations); + + size_t CalcBlocksWithNonMovableCount() const; + + static bool MoveMakesSense( + size_t dstBlockIndex, VkDeviceSize dstOffset, + size_t srcBlockIndex, VkDeviceSize srcOffset); +}; + +class VmaDefragmentationAlgorithm_Fast : public VmaDefragmentationAlgorithm +{ + VMA_CLASS_NO_COPY(VmaDefragmentationAlgorithm_Fast) +public: + VmaDefragmentationAlgorithm_Fast( + VmaAllocator hAllocator, + VmaBlockVector* pBlockVector, + uint32_t currentFrameIndex, + bool overlappingMoveSupported); + virtual ~VmaDefragmentationAlgorithm_Fast(); + + virtual void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) { ++m_AllocationCount; } + virtual void AddAll() { m_AllAllocations = true; } + + virtual VkResult Defragment( + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + VmaDefragmentationFlags flags); + + virtual VkDeviceSize GetBytesMoved() const { return m_BytesMoved; } + virtual uint32_t GetAllocationsMoved() const { return m_AllocationsMoved; } + +private: + struct BlockInfo + { + size_t origBlockIndex; + }; + + class FreeSpaceDatabase + { + public: + FreeSpaceDatabase() + { + FreeSpace s = {}; + s.blockInfoIndex = SIZE_MAX; + for(size_t i = 0; i < MAX_COUNT; ++i) + { + m_FreeSpaces[i] = s; + } + } + + void Register(size_t blockInfoIndex, VkDeviceSize offset, VkDeviceSize size) + { + if(size < VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + return; + } + + // Find first invalid or the smallest structure. + size_t bestIndex = SIZE_MAX; + for(size_t i = 0; i < MAX_COUNT; ++i) + { + // Empty structure. + if(m_FreeSpaces[i].blockInfoIndex == SIZE_MAX) + { + bestIndex = i; + break; + } + if(m_FreeSpaces[i].size < size && + (bestIndex == SIZE_MAX || m_FreeSpaces[bestIndex].size > m_FreeSpaces[i].size)) + { + bestIndex = i; + } + } + + if(bestIndex != SIZE_MAX) + { + m_FreeSpaces[bestIndex].blockInfoIndex = blockInfoIndex; + m_FreeSpaces[bestIndex].offset = offset; + m_FreeSpaces[bestIndex].size = size; + } + } + + bool Fetch(VkDeviceSize alignment, VkDeviceSize size, + size_t& outBlockInfoIndex, VkDeviceSize& outDstOffset) + { + size_t bestIndex = SIZE_MAX; + VkDeviceSize bestFreeSpaceAfter = 0; + for(size_t i = 0; i < MAX_COUNT; ++i) + { + // Structure is valid. + if(m_FreeSpaces[i].blockInfoIndex != SIZE_MAX) + { + const VkDeviceSize dstOffset = VmaAlignUp(m_FreeSpaces[i].offset, alignment); + // Allocation fits into this structure. + if(dstOffset + size <= m_FreeSpaces[i].offset + m_FreeSpaces[i].size) + { + const VkDeviceSize freeSpaceAfter = (m_FreeSpaces[i].offset + m_FreeSpaces[i].size) - + (dstOffset + size); + if(bestIndex == SIZE_MAX || freeSpaceAfter > bestFreeSpaceAfter) + { + bestIndex = i; + bestFreeSpaceAfter = freeSpaceAfter; + } + } + } + } + + if(bestIndex != SIZE_MAX) + { + outBlockInfoIndex = m_FreeSpaces[bestIndex].blockInfoIndex; + outDstOffset = VmaAlignUp(m_FreeSpaces[bestIndex].offset, alignment); + + if(bestFreeSpaceAfter >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + // Leave this structure for remaining empty space. + const VkDeviceSize alignmentPlusSize = (outDstOffset - m_FreeSpaces[bestIndex].offset) + size; + m_FreeSpaces[bestIndex].offset += alignmentPlusSize; + m_FreeSpaces[bestIndex].size -= alignmentPlusSize; + } + else + { + // This structure becomes invalid. + m_FreeSpaces[bestIndex].blockInfoIndex = SIZE_MAX; + } + + return true; + } + + return false; + } + + private: + static const size_t MAX_COUNT = 4; + + struct FreeSpace + { + size_t blockInfoIndex; // SIZE_MAX means this structure is invalid. + VkDeviceSize offset; + VkDeviceSize size; + } m_FreeSpaces[MAX_COUNT]; + }; + + const bool m_OverlappingMoveSupported; + + uint32_t m_AllocationCount; + bool m_AllAllocations; + + VkDeviceSize m_BytesMoved; + uint32_t m_AllocationsMoved; + + VmaVector< BlockInfo, VmaStlAllocator > m_BlockInfos; + + void PreprocessMetadata(); + void PostprocessMetadata(); + void InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc); +}; + +struct VmaBlockDefragmentationContext +{ + enum BLOCK_FLAG + { + BLOCK_FLAG_USED = 0x00000001, + }; + uint32_t flags; + VkBuffer hBuffer; +}; + +class VmaBlockVectorDefragmentationContext +{ + VMA_CLASS_NO_COPY(VmaBlockVectorDefragmentationContext) +public: + VkResult res; + bool mutexLocked; + VmaVector< VmaBlockDefragmentationContext, VmaStlAllocator > blockContexts; + VmaVector< VmaDefragmentationMove, VmaStlAllocator > defragmentationMoves; + uint32_t defragmentationMovesProcessed; + uint32_t defragmentationMovesCommitted; + bool hasDefragmentationPlan; + + VmaBlockVectorDefragmentationContext( + VmaAllocator hAllocator, + VmaPool hCustomPool, // Optional. + VmaBlockVector* pBlockVector, + uint32_t currFrameIndex); + ~VmaBlockVectorDefragmentationContext(); + + VmaPool GetCustomPool() const { return m_hCustomPool; } + VmaBlockVector* GetBlockVector() const { return m_pBlockVector; } + VmaDefragmentationAlgorithm* GetAlgorithm() const { return m_pAlgorithm; } + + void AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged); + void AddAll() { m_AllAllocations = true; } + + void Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags); + +private: + const VmaAllocator m_hAllocator; + // Null if not from custom pool. + const VmaPool m_hCustomPool; + // Redundant, for convenience not to fetch from m_hCustomPool->m_BlockVector or m_hAllocator->m_pBlockVectors. + VmaBlockVector* const m_pBlockVector; + const uint32_t m_CurrFrameIndex; + // Owner of this object. + VmaDefragmentationAlgorithm* m_pAlgorithm; + + struct AllocInfo + { + VmaAllocation hAlloc; + VkBool32* pChanged; + }; + // Used between constructor and Begin. + VmaVector< AllocInfo, VmaStlAllocator > m_Allocations; + bool m_AllAllocations; +}; + +struct VmaDefragmentationContext_T +{ +private: + VMA_CLASS_NO_COPY(VmaDefragmentationContext_T) +public: + VmaDefragmentationContext_T( + VmaAllocator hAllocator, + uint32_t currFrameIndex, + uint32_t flags, + VmaDefragmentationStats* pStats); + ~VmaDefragmentationContext_T(); + + void AddPools(uint32_t poolCount, const VmaPool* pPools); + void AddAllocations( + uint32_t allocationCount, + const VmaAllocation* pAllocations, + VkBool32* pAllocationsChanged); + + /* + Returns: + - `VK_SUCCESS` if succeeded and object can be destroyed immediately. + - `VK_NOT_READY` if succeeded but the object must remain alive until vmaDefragmentationEnd(). + - Negative value if error occured and object can be destroyed immediately. + */ + VkResult Defragment( + VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove, + VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove, + VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags); + + VkResult DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo); + VkResult DefragmentPassEnd(); + +private: + const VmaAllocator m_hAllocator; + const uint32_t m_CurrFrameIndex; + const uint32_t m_Flags; + VmaDefragmentationStats* const m_pStats; + + VkDeviceSize m_MaxCpuBytesToMove; + uint32_t m_MaxCpuAllocationsToMove; + VkDeviceSize m_MaxGpuBytesToMove; + uint32_t m_MaxGpuAllocationsToMove; + + // Owner of these objects. + VmaBlockVectorDefragmentationContext* m_DefaultPoolContexts[VK_MAX_MEMORY_TYPES]; + // Owner of these objects. + VmaVector< VmaBlockVectorDefragmentationContext*, VmaStlAllocator > m_CustomPoolContexts; +}; + +#if VMA_RECORDING_ENABLED + +class VmaRecorder +{ +public: + VmaRecorder(); + VkResult Init(const VmaRecordSettings& settings, bool useMutex); + void WriteConfiguration( + const VkPhysicalDeviceProperties& devProps, + const VkPhysicalDeviceMemoryProperties& memProps, + uint32_t vulkanApiVersion, + bool dedicatedAllocationExtensionEnabled, + bool bindMemory2ExtensionEnabled, + bool memoryBudgetExtensionEnabled, + bool deviceCoherentMemoryExtensionEnabled); + ~VmaRecorder(); + + void RecordCreateAllocator(uint32_t frameIndex); + void RecordDestroyAllocator(uint32_t frameIndex); + void RecordCreatePool(uint32_t frameIndex, + const VmaPoolCreateInfo& createInfo, + VmaPool pool); + void RecordDestroyPool(uint32_t frameIndex, VmaPool pool); + void RecordAllocateMemory(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation); + void RecordAllocateMemoryPages(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + const VmaAllocationCreateInfo& createInfo, + uint64_t allocationCount, + const VmaAllocation* pAllocations); + void RecordAllocateMemoryForBuffer(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation); + void RecordAllocateMemoryForImage(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation); + void RecordFreeMemory(uint32_t frameIndex, + VmaAllocation allocation); + void RecordFreeMemoryPages(uint32_t frameIndex, + uint64_t allocationCount, + const VmaAllocation* pAllocations); + void RecordSetAllocationUserData(uint32_t frameIndex, + VmaAllocation allocation, + const void* pUserData); + void RecordCreateLostAllocation(uint32_t frameIndex, + VmaAllocation allocation); + void RecordMapMemory(uint32_t frameIndex, + VmaAllocation allocation); + void RecordUnmapMemory(uint32_t frameIndex, + VmaAllocation allocation); + void RecordFlushAllocation(uint32_t frameIndex, + VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size); + void RecordInvalidateAllocation(uint32_t frameIndex, + VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size); + void RecordCreateBuffer(uint32_t frameIndex, + const VkBufferCreateInfo& bufCreateInfo, + const VmaAllocationCreateInfo& allocCreateInfo, + VmaAllocation allocation); + void RecordCreateImage(uint32_t frameIndex, + const VkImageCreateInfo& imageCreateInfo, + const VmaAllocationCreateInfo& allocCreateInfo, + VmaAllocation allocation); + void RecordDestroyBuffer(uint32_t frameIndex, + VmaAllocation allocation); + void RecordDestroyImage(uint32_t frameIndex, + VmaAllocation allocation); + void RecordTouchAllocation(uint32_t frameIndex, + VmaAllocation allocation); + void RecordGetAllocationInfo(uint32_t frameIndex, + VmaAllocation allocation); + void RecordMakePoolAllocationsLost(uint32_t frameIndex, + VmaPool pool); + void RecordDefragmentationBegin(uint32_t frameIndex, + const VmaDefragmentationInfo2& info, + VmaDefragmentationContext ctx); + void RecordDefragmentationEnd(uint32_t frameIndex, + VmaDefragmentationContext ctx); + void RecordSetPoolName(uint32_t frameIndex, + VmaPool pool, + const char* name); + +private: + struct CallParams + { + uint32_t threadId; + double time; + }; + + class UserDataString + { + public: + UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData); + const char* GetString() const { return m_Str; } + + private: + char m_PtrStr[17]; + const char* m_Str; + }; + + bool m_UseMutex; + VmaRecordFlags m_Flags; + FILE* m_File; + VMA_MUTEX m_FileMutex; + std::chrono::time_point m_RecordingStartTime; + + void GetBasicParams(CallParams& outParams); + + // T must be a pointer type, e.g. VmaAllocation, VmaPool. + template + void PrintPointerList(uint64_t count, const T* pItems) + { + if(count) + { + fprintf(m_File, "%p", pItems[0]); + for(uint64_t i = 1; i < count; ++i) + { + fprintf(m_File, " %p", pItems[i]); + } + } + } + + void PrintPointerList(uint64_t count, const VmaAllocation* pItems); + void Flush(); +}; + +#endif // #if VMA_RECORDING_ENABLED + +/* +Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects. +*/ +class VmaAllocationObjectAllocator +{ + VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator) +public: + VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks); + + template VmaAllocation Allocate(Types... args); + void Free(VmaAllocation hAlloc); + +private: + VMA_MUTEX m_Mutex; + VmaPoolAllocator m_Allocator; +}; + +struct VmaCurrentBudgetData +{ + VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS]; + VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS]; + +#if VMA_MEMORY_BUDGET + VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch; + VMA_RW_MUTEX m_BudgetMutex; + uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS]; + uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS]; + uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS]; +#endif // #if VMA_MEMORY_BUDGET + + VmaCurrentBudgetData() + { + for(uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex) + { + m_BlockBytes[heapIndex] = 0; + m_AllocationBytes[heapIndex] = 0; +#if VMA_MEMORY_BUDGET + m_VulkanUsage[heapIndex] = 0; + m_VulkanBudget[heapIndex] = 0; + m_BlockBytesAtBudgetFetch[heapIndex] = 0; +#endif + } + +#if VMA_MEMORY_BUDGET + m_OperationsSinceBudgetFetch = 0; +#endif + } + + void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) + { + m_AllocationBytes[heapIndex] += allocationSize; +#if VMA_MEMORY_BUDGET + ++m_OperationsSinceBudgetFetch; +#endif + } + + void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) + { + VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); // DELME + m_AllocationBytes[heapIndex] -= allocationSize; +#if VMA_MEMORY_BUDGET + ++m_OperationsSinceBudgetFetch; +#endif + } +}; + +// Main allocator object. +struct VmaAllocator_T +{ + VMA_CLASS_NO_COPY(VmaAllocator_T) +public: + bool m_UseMutex; + uint32_t m_VulkanApiVersion; + bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). + bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). + bool m_UseExtMemoryBudget; + bool m_UseAmdDeviceCoherentMemory; + bool m_UseKhrBufferDeviceAddress; + bool m_UseExtMemoryPriority; + VkDevice m_hDevice; + VkInstance m_hInstance; + bool m_AllocationCallbacksSpecified; + VkAllocationCallbacks m_AllocationCallbacks; + VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks; + VmaAllocationObjectAllocator m_AllocationObjectAllocator; + + // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size. + uint32_t m_HeapSizeLimitMask; + + VkPhysicalDeviceProperties m_PhysicalDeviceProperties; + VkPhysicalDeviceMemoryProperties m_MemProps; + + // Default pools. + VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES]; + + // Each vector is sorted by memory (handle value). + typedef VmaVector< VmaAllocation, VmaStlAllocator > AllocationVectorType; + AllocationVectorType* m_pDedicatedAllocations[VK_MAX_MEMORY_TYPES]; + VMA_RW_MUTEX m_DedicatedAllocationsMutex[VK_MAX_MEMORY_TYPES]; + + VmaCurrentBudgetData m_Budget; + + VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo); + VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo); + ~VmaAllocator_T(); + + const VkAllocationCallbacks* GetAllocationCallbacks() const + { + return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : 0; + } + const VmaVulkanFunctions& GetVulkanFunctions() const + { + return m_VulkanFunctions; + } + + VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; } + + VkDeviceSize GetBufferImageGranularity() const + { + return VMA_MAX( + static_cast(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY), + m_PhysicalDeviceProperties.limits.bufferImageGranularity); + } + + uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; } + uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; } + + uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const + { + VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount); + return m_MemProps.memoryTypes[memTypeIndex].heapIndex; + } + // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT. + bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const + { + return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) == + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + } + // Minimum alignment for all allocations in specific memory type. + VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const + { + return IsMemoryTypeNonCoherent(memTypeIndex) ? + VMA_MAX((VkDeviceSize)VMA_DEBUG_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) : + (VkDeviceSize)VMA_DEBUG_ALIGNMENT; + } + + bool IsIntegratedGpu() const + { + return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU; + } + + uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; } + +#if VMA_RECORDING_ENABLED + VmaRecorder* GetRecorder() const { return m_pRecorder; } +#endif + + void GetBufferMemoryRequirements( + VkBuffer hBuffer, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const; + void GetImageMemoryRequirements( + VkImage hImage, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const; + + // Main allocation function. + VkResult AllocateMemory( + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + VkBuffer dedicatedBuffer, + VkBufferUsageFlags dedicatedBufferUsage, // UINT32_MAX when unknown. + VkImage dedicatedImage, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); + + // Main deallocation function. + void FreeMemory( + size_t allocationCount, + const VmaAllocation* pAllocations); + + VkResult ResizeAllocation( + const VmaAllocation alloc, + VkDeviceSize newSize); + + void CalculateStats(VmaStats* pStats); + + void GetBudget( + VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount); + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json); +#endif + + VkResult DefragmentationBegin( + const VmaDefragmentationInfo2& info, + VmaDefragmentationStats* pStats, + VmaDefragmentationContext* pContext); + VkResult DefragmentationEnd( + VmaDefragmentationContext context); + + VkResult DefragmentationPassBegin( + VmaDefragmentationPassInfo* pInfo, + VmaDefragmentationContext context); + VkResult DefragmentationPassEnd( + VmaDefragmentationContext context); + + void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo); + bool TouchAllocation(VmaAllocation hAllocation); + + VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool); + void DestroyPool(VmaPool pool); + void GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats); + + void SetCurrentFrameIndex(uint32_t frameIndex); + uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); } + + void MakePoolAllocationsLost( + VmaPool hPool, + size_t* pLostAllocationCount); + VkResult CheckPoolCorruption(VmaPool hPool); + VkResult CheckCorruption(uint32_t memoryTypeBits); + + void CreateLostAllocation(VmaAllocation* pAllocation); + + // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping. + VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory); + // Call to Vulkan function vkFreeMemory with accompanying bookkeeping. + void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory); + // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR. + VkResult BindVulkanBuffer( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkBuffer buffer, + const void* pNext); + // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR. + VkResult BindVulkanImage( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkImage image, + const void* pNext); + + VkResult Map(VmaAllocation hAllocation, void** ppData); + void Unmap(VmaAllocation hAllocation); + + VkResult BindBufferMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext); + VkResult BindImageMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext); + + VkResult FlushOrInvalidateAllocation( + VmaAllocation hAllocation, + VkDeviceSize offset, VkDeviceSize size, + VMA_CACHE_OPERATION op); + VkResult FlushOrInvalidateAllocations( + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, const VkDeviceSize* sizes, + VMA_CACHE_OPERATION op); + + void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern); + + /* + Returns bit mask of memory types that can support defragmentation on GPU as + they support creation of required buffer for copy operations. + */ + uint32_t GetGpuDefragmentationMemoryTypeBits(); + +private: + VkDeviceSize m_PreferredLargeHeapBlockSize; + + VkPhysicalDevice m_PhysicalDevice; + VMA_ATOMIC_UINT32 m_CurrentFrameIndex; + VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized. + + VMA_RW_MUTEX m_PoolsMutex; + // Protected by m_PoolsMutex. Sorted by pointer value. + VmaVector > m_Pools; + uint32_t m_NextPoolId; + + VmaVulkanFunctions m_VulkanFunctions; + + // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types. + uint32_t m_GlobalMemoryTypeBits; + +#if VMA_RECORDING_ENABLED + VmaRecorder* m_pRecorder; +#endif + + void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions); + +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + void ImportVulkanFunctions_Static(); +#endif + + void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions); + +#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + void ImportVulkanFunctions_Dynamic(); +#endif + + void ValidateVulkanFunctions(); + + VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex); + + VkResult AllocateMemoryOfType( + VkDeviceSize size, + VkDeviceSize alignment, + bool dedicatedAllocation, + VkBuffer dedicatedBuffer, + VkBufferUsageFlags dedicatedBufferUsage, + VkImage dedicatedImage, + const VmaAllocationCreateInfo& createInfo, + uint32_t memTypeIndex, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); + + // Helper function only to be used inside AllocateDedicatedMemory. + VkResult AllocateDedicatedMemoryPage( + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + const VkMemoryAllocateInfo& allocInfo, + bool map, + bool isUserDataString, + void* pUserData, + VmaAllocation* pAllocation); + + // Allocates and registers new VkDeviceMemory specifically for dedicated allocations. + VkResult AllocateDedicatedMemory( + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + bool withinBudget, + bool map, + bool isUserDataString, + void* pUserData, + float priority, + VkBuffer dedicatedBuffer, + VkBufferUsageFlags dedicatedBufferUsage, + VkImage dedicatedImage, + size_t allocationCount, + VmaAllocation* pAllocations); + + void FreeDedicatedMemory(const VmaAllocation allocation); + + /* + Calculates and returns bit mask of memory types that can support defragmentation + on GPU as they support creation of required buffer for copy operations. + */ + uint32_t CalculateGpuDefragmentationMemoryTypeBits() const; + + uint32_t CalculateGlobalMemoryTypeBits() const; + + bool GetFlushOrInvalidateRange( + VmaAllocation allocation, + VkDeviceSize offset, VkDeviceSize size, + VkMappedMemoryRange& outRange) const; + +#if VMA_MEMORY_BUDGET + void UpdateVulkanBudget(); +#endif // #if VMA_MEMORY_BUDGET +}; + +//////////////////////////////////////////////////////////////////////////////// +// Memory allocation #2 after VmaAllocator_T definition + +static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment) +{ + return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment); +} + +static void VmaFree(VmaAllocator hAllocator, void* ptr) +{ + VmaFree(&hAllocator->m_AllocationCallbacks, ptr); +} + +template +static T* VmaAllocate(VmaAllocator hAllocator) +{ + return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T)); +} + +template +static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count) +{ + return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T)); +} + +template +static void vma_delete(VmaAllocator hAllocator, T* ptr) +{ + if(ptr != VMA_NULL) + { + ptr->~T(); + VmaFree(hAllocator, ptr); + } +} + +template +static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count) +{ + if(ptr != VMA_NULL) + { + for(size_t i = count; i--; ) + ptr[i].~T(); + VmaFree(hAllocator, ptr); + } +} + +//////////////////////////////////////////////////////////////////////////////// +// VmaStringBuilder + +#if VMA_STATS_STRING_ENABLED + +class VmaStringBuilder +{ +public: + VmaStringBuilder(VmaAllocator alloc) : m_Data(VmaStlAllocator(alloc->GetAllocationCallbacks())) { } + size_t GetLength() const { return m_Data.size(); } + const char* GetData() const { return m_Data.data(); } + + void Add(char ch) { m_Data.push_back(ch); } + void Add(const char* pStr); + void AddNewLine() { Add('\n'); } + void AddNumber(uint32_t num); + void AddNumber(uint64_t num); + void AddPointer(const void* ptr); + +private: + VmaVector< char, VmaStlAllocator > m_Data; +}; + +void VmaStringBuilder::Add(const char* pStr) +{ + const size_t strLen = strlen(pStr); + if(strLen > 0) + { + const size_t oldCount = m_Data.size(); + m_Data.resize(oldCount + strLen); + memcpy(m_Data.data() + oldCount, pStr, strLen); + } +} + +void VmaStringBuilder::AddNumber(uint32_t num) +{ + char buf[11]; + buf[10] = '\0'; + char *p = &buf[10]; + do + { + *--p = '0' + (num % 10); + num /= 10; + } + while(num); + Add(p); +} + +void VmaStringBuilder::AddNumber(uint64_t num) +{ + char buf[21]; + buf[20] = '\0'; + char *p = &buf[20]; + do + { + *--p = '0' + (num % 10); + num /= 10; + } + while(num); + Add(p); +} + +void VmaStringBuilder::AddPointer(const void* ptr) +{ + char buf[21]; + VmaPtrToStr(buf, sizeof(buf), ptr); + Add(buf); +} + +#endif // #if VMA_STATS_STRING_ENABLED + +//////////////////////////////////////////////////////////////////////////////// +// VmaJsonWriter + +#if VMA_STATS_STRING_ENABLED + +class VmaJsonWriter +{ + VMA_CLASS_NO_COPY(VmaJsonWriter) +public: + VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb); + ~VmaJsonWriter(); + + void BeginObject(bool singleLine = false); + void EndObject(); + + void BeginArray(bool singleLine = false); + void EndArray(); + + void WriteString(const char* pStr); + void BeginString(const char* pStr = VMA_NULL); + void ContinueString(const char* pStr); + void ContinueString(uint32_t n); + void ContinueString(uint64_t n); + void ContinueString_Pointer(const void* ptr); + void EndString(const char* pStr = VMA_NULL); + + void WriteNumber(uint32_t n); + void WriteNumber(uint64_t n); + void WriteBool(bool b); + void WriteNull(); + +private: + static const char* const INDENT; + + enum COLLECTION_TYPE + { + COLLECTION_TYPE_OBJECT, + COLLECTION_TYPE_ARRAY, + }; + struct StackItem + { + COLLECTION_TYPE type; + uint32_t valueCount; + bool singleLineMode; + }; + + VmaStringBuilder& m_SB; + VmaVector< StackItem, VmaStlAllocator > m_Stack; + bool m_InsideString; + + void BeginValue(bool isString); + void WriteIndent(bool oneLess = false); +}; + +const char* const VmaJsonWriter::INDENT = " "; + +VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) : + m_SB(sb), + m_Stack(VmaStlAllocator(pAllocationCallbacks)), + m_InsideString(false) +{ +} + +VmaJsonWriter::~VmaJsonWriter() +{ + VMA_ASSERT(!m_InsideString); + VMA_ASSERT(m_Stack.empty()); +} + +void VmaJsonWriter::BeginObject(bool singleLine) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(false); + m_SB.Add('{'); + + StackItem item; + item.type = COLLECTION_TYPE_OBJECT; + item.valueCount = 0; + item.singleLineMode = singleLine; + m_Stack.push_back(item); +} + +void VmaJsonWriter::EndObject() +{ + VMA_ASSERT(!m_InsideString); + + WriteIndent(true); + m_SB.Add('}'); + + VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT); + m_Stack.pop_back(); +} + +void VmaJsonWriter::BeginArray(bool singleLine) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(false); + m_SB.Add('['); + + StackItem item; + item.type = COLLECTION_TYPE_ARRAY; + item.valueCount = 0; + item.singleLineMode = singleLine; + m_Stack.push_back(item); +} + +void VmaJsonWriter::EndArray() +{ + VMA_ASSERT(!m_InsideString); + + WriteIndent(true); + m_SB.Add(']'); + + VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY); + m_Stack.pop_back(); +} + +void VmaJsonWriter::WriteString(const char* pStr) +{ + BeginString(pStr); + EndString(); +} + +void VmaJsonWriter::BeginString(const char* pStr) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(true); + m_SB.Add('"'); + m_InsideString = true; + if(pStr != VMA_NULL && pStr[0] != '\0') + { + ContinueString(pStr); + } +} + +void VmaJsonWriter::ContinueString(const char* pStr) +{ + VMA_ASSERT(m_InsideString); + + const size_t strLen = strlen(pStr); + for(size_t i = 0; i < strLen; ++i) + { + char ch = pStr[i]; + if(ch == '\\') + { + m_SB.Add("\\\\"); + } + else if(ch == '"') + { + m_SB.Add("\\\""); + } + else if(ch >= 32) + { + m_SB.Add(ch); + } + else switch(ch) + { + case '\b': + m_SB.Add("\\b"); + break; + case '\f': + m_SB.Add("\\f"); + break; + case '\n': + m_SB.Add("\\n"); + break; + case '\r': + m_SB.Add("\\r"); + break; + case '\t': + m_SB.Add("\\t"); + break; + default: + VMA_ASSERT(0 && "Character not currently supported."); + break; + } + } +} + +void VmaJsonWriter::ContinueString(uint32_t n) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::ContinueString(uint64_t n) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::ContinueString_Pointer(const void* ptr) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddPointer(ptr); +} + +void VmaJsonWriter::EndString(const char* pStr) +{ + VMA_ASSERT(m_InsideString); + if(pStr != VMA_NULL && pStr[0] != '\0') + { + ContinueString(pStr); + } + m_SB.Add('"'); + m_InsideString = false; +} + +void VmaJsonWriter::WriteNumber(uint32_t n) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::WriteNumber(uint64_t n) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::WriteBool(bool b) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.Add(b ? "true" : "false"); +} + +void VmaJsonWriter::WriteNull() +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.Add("null"); +} + +void VmaJsonWriter::BeginValue(bool isString) +{ + if(!m_Stack.empty()) + { + StackItem& currItem = m_Stack.back(); + if(currItem.type == COLLECTION_TYPE_OBJECT && + currItem.valueCount % 2 == 0) + { + VMA_ASSERT(isString); + } + + if(currItem.type == COLLECTION_TYPE_OBJECT && + currItem.valueCount % 2 != 0) + { + m_SB.Add(": "); + } + else if(currItem.valueCount > 0) + { + m_SB.Add(", "); + WriteIndent(); + } + else + { + WriteIndent(); + } + ++currItem.valueCount; + } +} + +void VmaJsonWriter::WriteIndent(bool oneLess) +{ + if(!m_Stack.empty() && !m_Stack.back().singleLineMode) + { + m_SB.AddNewLine(); + + size_t count = m_Stack.size(); + if(count > 0 && oneLess) + { + --count; + } + for(size_t i = 0; i < count; ++i) + { + m_SB.Add(INDENT); + } + } +} + +#endif // #if VMA_STATS_STRING_ENABLED + +//////////////////////////////////////////////////////////////////////////////// + +void VmaAllocation_T::SetUserData(VmaAllocator hAllocator, void* pUserData) +{ + if(IsUserDataString()) + { + VMA_ASSERT(pUserData == VMA_NULL || pUserData != m_pUserData); + + FreeUserDataString(hAllocator); + + if(pUserData != VMA_NULL) + { + m_pUserData = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), (const char*)pUserData); + } + } + else + { + m_pUserData = pUserData; + } +} + +void VmaAllocation_T::ChangeBlockAllocation( + VmaAllocator hAllocator, + VmaDeviceMemoryBlock* block, + VkDeviceSize offset) +{ + VMA_ASSERT(block != VMA_NULL); + VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); + + // Move mapping reference counter from old block to new block. + if(block != m_BlockAllocation.m_Block) + { + uint32_t mapRefCount = m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP; + if(IsPersistentMap()) + ++mapRefCount; + m_BlockAllocation.m_Block->Unmap(hAllocator, mapRefCount); + block->Map(hAllocator, mapRefCount, VMA_NULL); + } + + m_BlockAllocation.m_Block = block; + m_BlockAllocation.m_Offset = offset; +} + +void VmaAllocation_T::ChangeOffset(VkDeviceSize newOffset) +{ + VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); + m_BlockAllocation.m_Offset = newOffset; +} + +VkDeviceSize VmaAllocation_T::GetOffset() const +{ + switch(m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Offset; + case ALLOCATION_TYPE_DEDICATED: + return 0; + default: + VMA_ASSERT(0); + return 0; + } +} + +VkDeviceMemory VmaAllocation_T::GetMemory() const +{ + switch(m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Block->GetDeviceMemory(); + case ALLOCATION_TYPE_DEDICATED: + return m_DedicatedAllocation.m_hMemory; + default: + VMA_ASSERT(0); + return VK_NULL_HANDLE; + } +} + +void* VmaAllocation_T::GetMappedData() const +{ + switch(m_Type) + { + case ALLOCATION_TYPE_BLOCK: + if(m_MapCount != 0) + { + void* pBlockData = m_BlockAllocation.m_Block->GetMappedData(); + VMA_ASSERT(pBlockData != VMA_NULL); + return (char*)pBlockData + m_BlockAllocation.m_Offset; + } + else + { + return VMA_NULL; + } + break; + case ALLOCATION_TYPE_DEDICATED: + VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0)); + return m_DedicatedAllocation.m_pMappedData; + default: + VMA_ASSERT(0); + return VMA_NULL; + } +} + +bool VmaAllocation_T::CanBecomeLost() const +{ + switch(m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_CanBecomeLost; + case ALLOCATION_TYPE_DEDICATED: + return false; + default: + VMA_ASSERT(0); + return false; + } +} + +bool VmaAllocation_T::MakeLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) +{ + VMA_ASSERT(CanBecomeLost()); + + /* + Warning: This is a carefully designed algorithm. + Do not modify unless you really know what you're doing :) + */ + uint32_t localLastUseFrameIndex = GetLastUseFrameIndex(); + for(;;) + { + if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST) + { + VMA_ASSERT(0); + return false; + } + else if(localLastUseFrameIndex + frameInUseCount >= currentFrameIndex) + { + return false; + } + else // Last use time earlier than current time. + { + if(CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, VMA_FRAME_INDEX_LOST)) + { + // Setting hAllocation.LastUseFrameIndex atomic to VMA_FRAME_INDEX_LOST is enough to mark it as LOST. + // Calling code just needs to unregister this allocation in owning VmaDeviceMemoryBlock. + return true; + } + } + } +} + +#if VMA_STATS_STRING_ENABLED + +// Correspond to values of enum VmaSuballocationType. +static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = { + "FREE", + "UNKNOWN", + "BUFFER", + "IMAGE_UNKNOWN", + "IMAGE_LINEAR", + "IMAGE_OPTIMAL", +}; + +void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const +{ + json.WriteString("Type"); + json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]); + + json.WriteString("Size"); + json.WriteNumber(m_Size); + + if(m_pUserData != VMA_NULL) + { + json.WriteString("UserData"); + if(IsUserDataString()) + { + json.WriteString((const char*)m_pUserData); + } + else + { + json.BeginString(); + json.ContinueString_Pointer(m_pUserData); + json.EndString(); + } + } + + json.WriteString("CreationFrameIndex"); + json.WriteNumber(m_CreationFrameIndex); + + json.WriteString("LastUseFrameIndex"); + json.WriteNumber(GetLastUseFrameIndex()); + + if(m_BufferImageUsage != 0) + { + json.WriteString("Usage"); + json.WriteNumber(m_BufferImageUsage); + } +} + +#endif + +void VmaAllocation_T::FreeUserDataString(VmaAllocator hAllocator) +{ + VMA_ASSERT(IsUserDataString()); + VmaFreeString(hAllocator->GetAllocationCallbacks(), (char*)m_pUserData); + m_pUserData = VMA_NULL; +} + +void VmaAllocation_T::BlockAllocMap() +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); + + if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F) + { + ++m_MapCount; + } + else + { + VMA_ASSERT(0 && "Allocation mapped too many times simultaneously."); + } +} + +void VmaAllocation_T::BlockAllocUnmap() +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); + + if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0) + { + --m_MapCount; + } + else + { + VMA_ASSERT(0 && "Unmapping allocation not previously mapped."); + } +} + +VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData) +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); + + if(m_MapCount != 0) + { + if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) < 0x7F) + { + VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL); + *ppData = m_DedicatedAllocation.m_pMappedData; + ++m_MapCount; + return VK_SUCCESS; + } + else + { + VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously."); + return VK_ERROR_MEMORY_MAP_FAILED; + } + } + else + { + VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( + hAllocator->m_hDevice, + m_DedicatedAllocation.m_hMemory, + 0, // offset + VK_WHOLE_SIZE, + 0, // flags + ppData); + if(result == VK_SUCCESS) + { + m_DedicatedAllocation.m_pMappedData = *ppData; + m_MapCount = 1; + } + return result; + } +} + +void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator) +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); + + if((m_MapCount & ~MAP_COUNT_FLAG_PERSISTENT_MAP) != 0) + { + --m_MapCount; + if(m_MapCount == 0) + { + m_DedicatedAllocation.m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)( + hAllocator->m_hDevice, + m_DedicatedAllocation.m_hMemory); + } + } + else + { + VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped."); + } +} + +#if VMA_STATS_STRING_ENABLED + +static void VmaPrintStatInfo(VmaJsonWriter& json, const VmaStatInfo& stat) +{ + json.BeginObject(); + + json.WriteString("Blocks"); + json.WriteNumber(stat.blockCount); + + json.WriteString("Allocations"); + json.WriteNumber(stat.allocationCount); + + json.WriteString("UnusedRanges"); + json.WriteNumber(stat.unusedRangeCount); + + json.WriteString("UsedBytes"); + json.WriteNumber(stat.usedBytes); + + json.WriteString("UnusedBytes"); + json.WriteNumber(stat.unusedBytes); + + if(stat.allocationCount > 1) + { + json.WriteString("AllocationSize"); + json.BeginObject(true); + json.WriteString("Min"); + json.WriteNumber(stat.allocationSizeMin); + json.WriteString("Avg"); + json.WriteNumber(stat.allocationSizeAvg); + json.WriteString("Max"); + json.WriteNumber(stat.allocationSizeMax); + json.EndObject(); + } + + if(stat.unusedRangeCount > 1) + { + json.WriteString("UnusedRangeSize"); + json.BeginObject(true); + json.WriteString("Min"); + json.WriteNumber(stat.unusedRangeSizeMin); + json.WriteString("Avg"); + json.WriteNumber(stat.unusedRangeSizeAvg); + json.WriteString("Max"); + json.WriteNumber(stat.unusedRangeSizeMax); + json.EndObject(); + } + + json.EndObject(); +} + +#endif // #if VMA_STATS_STRING_ENABLED + +struct VmaSuballocationItemSizeLess +{ + bool operator()( + const VmaSuballocationList::iterator lhs, + const VmaSuballocationList::iterator rhs) const + { + return lhs->size < rhs->size; + } + bool operator()( + const VmaSuballocationList::iterator lhs, + VkDeviceSize rhsSize) const + { + return lhs->size < rhsSize; + } +}; + + +//////////////////////////////////////////////////////////////////////////////// +// class VmaBlockMetadata + +VmaBlockMetadata::VmaBlockMetadata(VmaAllocator hAllocator) : + m_Size(0), + m_pAllocationCallbacks(hAllocator->GetAllocationCallbacks()) +{ +} + +#if VMA_STATS_STRING_ENABLED + +void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json, + VkDeviceSize unusedBytes, + size_t allocationCount, + size_t unusedRangeCount) const +{ + json.BeginObject(); + + json.WriteString("TotalBytes"); + json.WriteNumber(GetSize()); + + json.WriteString("UnusedBytes"); + json.WriteNumber(unusedBytes); + + json.WriteString("Allocations"); + json.WriteNumber((uint64_t)allocationCount); + + json.WriteString("UnusedRanges"); + json.WriteNumber((uint64_t)unusedRangeCount); + + json.WriteString("Suballocations"); + json.BeginArray(); +} + +void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json, + VkDeviceSize offset, + VmaAllocation hAllocation) const +{ + json.BeginObject(true); + + json.WriteString("Offset"); + json.WriteNumber(offset); + + hAllocation->PrintParameters(json); + + json.EndObject(); +} + +void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, + VkDeviceSize offset, + VkDeviceSize size) const +{ + json.BeginObject(true); + + json.WriteString("Offset"); + json.WriteNumber(offset); + + json.WriteString("Type"); + json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]); + + json.WriteString("Size"); + json.WriteNumber(size); + + json.EndObject(); +} + +void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const +{ + json.EndArray(); + json.EndObject(); +} + +#endif // #if VMA_STATS_STRING_ENABLED + +//////////////////////////////////////////////////////////////////////////////// +// class VmaBlockMetadata_Generic + +VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(VmaAllocator hAllocator) : + VmaBlockMetadata(hAllocator), + m_FreeCount(0), + m_SumFreeSize(0), + m_Suballocations(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + m_FreeSuballocationsBySize(VmaStlAllocator(hAllocator->GetAllocationCallbacks())) +{ +} + +VmaBlockMetadata_Generic::~VmaBlockMetadata_Generic() +{ +} + +void VmaBlockMetadata_Generic::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + + m_FreeCount = 1; + m_SumFreeSize = size; + + VmaSuballocation suballoc = {}; + suballoc.offset = 0; + suballoc.size = size; + suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + suballoc.hAllocation = VK_NULL_HANDLE; + + VMA_ASSERT(size > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER); + m_Suballocations.push_back(suballoc); + VmaSuballocationList::iterator suballocItem = m_Suballocations.end(); + --suballocItem; + m_FreeSuballocationsBySize.push_back(suballocItem); +} + +bool VmaBlockMetadata_Generic::Validate() const +{ + VMA_VALIDATE(!m_Suballocations.empty()); + + // Expected offset of new suballocation as calculated from previous ones. + VkDeviceSize calculatedOffset = 0; + // Expected number of free suballocations as calculated from traversing their list. + uint32_t calculatedFreeCount = 0; + // Expected sum size of free suballocations as calculated from traversing their list. + VkDeviceSize calculatedSumFreeSize = 0; + // Expected number of free suballocations that should be registered in + // m_FreeSuballocationsBySize calculated from traversing their list. + size_t freeSuballocationsToRegister = 0; + // True if previous visited suballocation was free. + bool prevFree = false; + + for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin(); + suballocItem != m_Suballocations.cend(); + ++suballocItem) + { + const VmaSuballocation& subAlloc = *suballocItem; + + // Actual offset of this suballocation doesn't match expected one. + VMA_VALIDATE(subAlloc.offset == calculatedOffset); + + const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE); + // Two adjacent free suballocations are invalid. They should be merged. + VMA_VALIDATE(!prevFree || !currFree); + + VMA_VALIDATE(currFree == (subAlloc.hAllocation == VK_NULL_HANDLE)); + + if(currFree) + { + calculatedSumFreeSize += subAlloc.size; + ++calculatedFreeCount; + if(subAlloc.size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + ++freeSuballocationsToRegister; + } + + // Margin required between allocations - every free space must be at least that large. + VMA_VALIDATE(subAlloc.size >= VMA_DEBUG_MARGIN); + } + else + { + VMA_VALIDATE(subAlloc.hAllocation->GetOffset() == subAlloc.offset); + VMA_VALIDATE(subAlloc.hAllocation->GetSize() == subAlloc.size); + + // Margin required between allocations - previous allocation must be free. + VMA_VALIDATE(VMA_DEBUG_MARGIN == 0 || prevFree); + } + + calculatedOffset += subAlloc.size; + prevFree = currFree; + } + + // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't + // match expected one. + VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister); + + VkDeviceSize lastSize = 0; + for(size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i) + { + VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i]; + + // Only free suballocations can be registered in m_FreeSuballocationsBySize. + VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE); + // They must be sorted by size ascending. + VMA_VALIDATE(suballocItem->size >= lastSize); + + lastSize = suballocItem->size; + } + + // Check if totals match calculacted values. + VMA_VALIDATE(ValidateFreeSuballocationList()); + VMA_VALIDATE(calculatedOffset == GetSize()); + VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize); + VMA_VALIDATE(calculatedFreeCount == m_FreeCount); + + return true; +} + +VkDeviceSize VmaBlockMetadata_Generic::GetUnusedRangeSizeMax() const +{ + if(!m_FreeSuballocationsBySize.empty()) + { + return m_FreeSuballocationsBySize.back()->size; + } + else + { + return 0; + } +} + +bool VmaBlockMetadata_Generic::IsEmpty() const +{ + return (m_Suballocations.size() == 1) && (m_FreeCount == 1); +} + +void VmaBlockMetadata_Generic::CalcAllocationStatInfo(VmaStatInfo& outInfo) const +{ + outInfo.blockCount = 1; + + const uint32_t rangeCount = (uint32_t)m_Suballocations.size(); + outInfo.allocationCount = rangeCount - m_FreeCount; + outInfo.unusedRangeCount = m_FreeCount; + + outInfo.unusedBytes = m_SumFreeSize; + outInfo.usedBytes = GetSize() - outInfo.unusedBytes; + + outInfo.allocationSizeMin = UINT64_MAX; + outInfo.allocationSizeMax = 0; + outInfo.unusedRangeSizeMin = UINT64_MAX; + outInfo.unusedRangeSizeMax = 0; + + for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin(); + suballocItem != m_Suballocations.cend(); + ++suballocItem) + { + const VmaSuballocation& suballoc = *suballocItem; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size); + outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, suballoc.size); + } + else + { + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, suballoc.size); + outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, suballoc.size); + } + } +} + +void VmaBlockMetadata_Generic::AddPoolStats(VmaPoolStats& inoutStats) const +{ + const uint32_t rangeCount = (uint32_t)m_Suballocations.size(); + + inoutStats.size += GetSize(); + inoutStats.unusedSize += m_SumFreeSize; + inoutStats.allocationCount += rangeCount - m_FreeCount; + inoutStats.unusedRangeCount += m_FreeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax()); +} + +#if VMA_STATS_STRING_ENABLED + +void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json) const +{ + PrintDetailedMap_Begin(json, + m_SumFreeSize, // unusedBytes + m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount + m_FreeCount); // unusedRangeCount + + size_t i = 0; + for(VmaSuballocationList::const_iterator suballocItem = m_Suballocations.cbegin(); + suballocItem != m_Suballocations.cend(); + ++suballocItem, ++i) + { + if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { + PrintDetailedMap_UnusedRange(json, suballocItem->offset, suballocItem->size); + } + else + { + PrintDetailedMap_Allocation(json, suballocItem->offset, suballocItem->hAllocation); + } + } + + PrintDetailedMap_End(json); +} + +#endif // #if VMA_STATS_STRING_ENABLED + +bool VmaBlockMetadata_Generic::CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(allocSize > 0); + VMA_ASSERT(!upperAddress); + VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(pAllocationRequest != VMA_NULL); + VMA_HEAVY_ASSERT(Validate()); + + pAllocationRequest->type = VmaAllocationRequestType::Normal; + + // There is not enough total free space in this block to fullfill the request: Early return. + if(canMakeOtherLost == false && + m_SumFreeSize < allocSize + 2 * VMA_DEBUG_MARGIN) + { + return false; + } + + // New algorithm, efficiently searching freeSuballocationsBySize. + const size_t freeSuballocCount = m_FreeSuballocationsBySize.size(); + if(freeSuballocCount > 0) + { + if(strategy == VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT) + { + // Find first free suballocation with size not less than allocSize + 2 * VMA_DEBUG_MARGIN. + VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess( + m_FreeSuballocationsBySize.data(), + m_FreeSuballocationsBySize.data() + freeSuballocCount, + allocSize + 2 * VMA_DEBUG_MARGIN, + VmaSuballocationItemSizeLess()); + size_t index = it - m_FreeSuballocationsBySize.data(); + for(; index < freeSuballocCount; ++index) + { + if(CheckAllocation( + currentFrameIndex, + frameInUseCount, + bufferImageGranularity, + allocSize, + allocAlignment, + allocType, + m_FreeSuballocationsBySize[index], + false, // canMakeOtherLost + &pAllocationRequest->offset, + &pAllocationRequest->itemsToMakeLostCount, + &pAllocationRequest->sumFreeSize, + &pAllocationRequest->sumItemSize)) + { + pAllocationRequest->item = m_FreeSuballocationsBySize[index]; + return true; + } + } + } + else if(strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET) + { + for(VmaSuballocationList::iterator it = m_Suballocations.begin(); + it != m_Suballocations.end(); + ++it) + { + if(it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation( + currentFrameIndex, + frameInUseCount, + bufferImageGranularity, + allocSize, + allocAlignment, + allocType, + it, + false, // canMakeOtherLost + &pAllocationRequest->offset, + &pAllocationRequest->itemsToMakeLostCount, + &pAllocationRequest->sumFreeSize, + &pAllocationRequest->sumItemSize)) + { + pAllocationRequest->item = it; + return true; + } + } + } + else // WORST_FIT, FIRST_FIT + { + // Search staring from biggest suballocations. + for(size_t index = freeSuballocCount; index--; ) + { + if(CheckAllocation( + currentFrameIndex, + frameInUseCount, + bufferImageGranularity, + allocSize, + allocAlignment, + allocType, + m_FreeSuballocationsBySize[index], + false, // canMakeOtherLost + &pAllocationRequest->offset, + &pAllocationRequest->itemsToMakeLostCount, + &pAllocationRequest->sumFreeSize, + &pAllocationRequest->sumItemSize)) + { + pAllocationRequest->item = m_FreeSuballocationsBySize[index]; + return true; + } + } + } + } + + if(canMakeOtherLost) + { + // Brute-force algorithm. TODO: Come up with something better. + + bool found = false; + VmaAllocationRequest tmpAllocRequest = {}; + tmpAllocRequest.type = VmaAllocationRequestType::Normal; + for(VmaSuballocationList::iterator suballocIt = m_Suballocations.begin(); + suballocIt != m_Suballocations.end(); + ++suballocIt) + { + if(suballocIt->type == VMA_SUBALLOCATION_TYPE_FREE || + suballocIt->hAllocation->CanBecomeLost()) + { + if(CheckAllocation( + currentFrameIndex, + frameInUseCount, + bufferImageGranularity, + allocSize, + allocAlignment, + allocType, + suballocIt, + canMakeOtherLost, + &tmpAllocRequest.offset, + &tmpAllocRequest.itemsToMakeLostCount, + &tmpAllocRequest.sumFreeSize, + &tmpAllocRequest.sumItemSize)) + { + if(strategy == VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT) + { + *pAllocationRequest = tmpAllocRequest; + pAllocationRequest->item = suballocIt; + break; + } + if(!found || tmpAllocRequest.CalcCost() < pAllocationRequest->CalcCost()) + { + *pAllocationRequest = tmpAllocRequest; + pAllocationRequest->item = suballocIt; + found = true; + } + } + } + } + + return found; + } + + return false; +} + +bool VmaBlockMetadata_Generic::MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(pAllocationRequest && pAllocationRequest->type == VmaAllocationRequestType::Normal); + + while(pAllocationRequest->itemsToMakeLostCount > 0) + { + if(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE) + { + ++pAllocationRequest->item; + } + VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end()); + VMA_ASSERT(pAllocationRequest->item->hAllocation != VK_NULL_HANDLE); + VMA_ASSERT(pAllocationRequest->item->hAllocation->CanBecomeLost()); + if(pAllocationRequest->item->hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) + { + pAllocationRequest->item = FreeSuballocation(pAllocationRequest->item); + --pAllocationRequest->itemsToMakeLostCount; + } + else + { + return false; + } + } + + VMA_HEAVY_ASSERT(Validate()); + VMA_ASSERT(pAllocationRequest->item != m_Suballocations.end()); + VMA_ASSERT(pAllocationRequest->item->type == VMA_SUBALLOCATION_TYPE_FREE); + + return true; +} + +uint32_t VmaBlockMetadata_Generic::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) +{ + uint32_t lostAllocationCount = 0; + for(VmaSuballocationList::iterator it = m_Suballocations.begin(); + it != m_Suballocations.end(); + ++it) + { + if(it->type != VMA_SUBALLOCATION_TYPE_FREE && + it->hAllocation->CanBecomeLost() && + it->hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) + { + it = FreeSuballocation(it); + ++lostAllocationCount; + } + } + return lostAllocationCount; +} + +VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData) +{ + for(VmaSuballocationList::iterator it = m_Suballocations.begin(); + it != m_Suballocations.end(); + ++it) + { + if(it->type != VMA_SUBALLOCATION_TYPE_FREE) + { + if(!VmaValidateMagicValue(pBlockData, it->offset - VMA_DEBUG_MARGIN)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!"); + return VK_ERROR_VALIDATION_FAILED_EXT; + } + if(!VmaValidateMagicValue(pBlockData, it->offset + it->size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_VALIDATION_FAILED_EXT; + } + } + } + + return VK_SUCCESS; +} + +void VmaBlockMetadata_Generic::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation) +{ + VMA_ASSERT(request.type == VmaAllocationRequestType::Normal); + VMA_ASSERT(request.item != m_Suballocations.end()); + VmaSuballocation& suballoc = *request.item; + // Given suballocation is a free block. + VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + // Given offset is inside this suballocation. + VMA_ASSERT(request.offset >= suballoc.offset); + const VkDeviceSize paddingBegin = request.offset - suballoc.offset; + VMA_ASSERT(suballoc.size >= paddingBegin + allocSize); + const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - allocSize; + + // Unregister this free suballocation from m_FreeSuballocationsBySize and update + // it to become used. + UnregisterFreeSuballocation(request.item); + + suballoc.offset = request.offset; + suballoc.size = allocSize; + suballoc.type = type; + suballoc.hAllocation = hAllocation; + + // If there are any free bytes remaining at the end, insert new free suballocation after current one. + if(paddingEnd) + { + VmaSuballocation paddingSuballoc = {}; + paddingSuballoc.offset = request.offset + allocSize; + paddingSuballoc.size = paddingEnd; + paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + VmaSuballocationList::iterator next = request.item; + ++next; + const VmaSuballocationList::iterator paddingEndItem = + m_Suballocations.insert(next, paddingSuballoc); + RegisterFreeSuballocation(paddingEndItem); + } + + // If there are any free bytes remaining at the beginning, insert new free suballocation before current one. + if(paddingBegin) + { + VmaSuballocation paddingSuballoc = {}; + paddingSuballoc.offset = request.offset - paddingBegin; + paddingSuballoc.size = paddingBegin; + paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + const VmaSuballocationList::iterator paddingBeginItem = + m_Suballocations.insert(request.item, paddingSuballoc); + RegisterFreeSuballocation(paddingBeginItem); + } + + // Update totals. + m_FreeCount = m_FreeCount - 1; + if(paddingBegin > 0) + { + ++m_FreeCount; + } + if(paddingEnd > 0) + { + ++m_FreeCount; + } + m_SumFreeSize -= allocSize; +} + +void VmaBlockMetadata_Generic::Free(const VmaAllocation allocation) +{ + for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin(); + suballocItem != m_Suballocations.end(); + ++suballocItem) + { + VmaSuballocation& suballoc = *suballocItem; + if(suballoc.hAllocation == allocation) + { + FreeSuballocation(suballocItem); + VMA_HEAVY_ASSERT(Validate()); + return; + } + } + VMA_ASSERT(0 && "Not found!"); +} + +void VmaBlockMetadata_Generic::FreeAtOffset(VkDeviceSize offset) +{ + for(VmaSuballocationList::iterator suballocItem = m_Suballocations.begin(); + suballocItem != m_Suballocations.end(); + ++suballocItem) + { + VmaSuballocation& suballoc = *suballocItem; + if(suballoc.offset == offset) + { + FreeSuballocation(suballocItem); + return; + } + } + VMA_ASSERT(0 && "Not found!"); +} + +bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const +{ + VkDeviceSize lastSize = 0; + for(size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i) + { + const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i]; + + VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE); + VMA_VALIDATE(it->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER); + VMA_VALIDATE(it->size >= lastSize); + lastSize = it->size; + } + return true; +} + +bool VmaBlockMetadata_Generic::CheckAllocation( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaSuballocationList::const_iterator suballocItem, + bool canMakeOtherLost, + VkDeviceSize* pOffset, + size_t* itemsToMakeLostCount, + VkDeviceSize* pSumFreeSize, + VkDeviceSize* pSumItemSize) const +{ + VMA_ASSERT(allocSize > 0); + VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(suballocItem != m_Suballocations.cend()); + VMA_ASSERT(pOffset != VMA_NULL); + + *itemsToMakeLostCount = 0; + *pSumFreeSize = 0; + *pSumItemSize = 0; + + if(canMakeOtherLost) + { + if(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { + *pSumFreeSize = suballocItem->size; + } + else + { + if(suballocItem->hAllocation->CanBecomeLost() && + suballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) + { + ++*itemsToMakeLostCount; + *pSumItemSize = suballocItem->size; + } + else + { + return false; + } + } + + // Remaining size is too small for this request: Early return. + if(GetSize() - suballocItem->offset < allocSize) + { + return false; + } + + // Start from offset equal to beginning of this suballocation. + *pOffset = suballocItem->offset; + + // Apply VMA_DEBUG_MARGIN at the beginning. + if(VMA_DEBUG_MARGIN > 0) + { + *pOffset += VMA_DEBUG_MARGIN; + } + + // Apply alignment. + *pOffset = VmaAlignUp(*pOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if(bufferImageGranularity > 1) + { + bool bufferImageGranularityConflict = false; + VmaSuballocationList::const_iterator prevSuballocItem = suballocItem; + while(prevSuballocItem != m_Suballocations.cbegin()) + { + --prevSuballocItem; + const VmaSuballocation& prevSuballoc = *prevSuballocItem; + if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if(bufferImageGranularityConflict) + { + *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity); + } + } + + // Now that we have final *pOffset, check if we are past suballocItem. + // If yes, return false - this function should be called for another suballocItem as starting point. + if(*pOffset >= suballocItem->offset + suballocItem->size) + { + return false; + } + + // Calculate padding at the beginning based on current offset. + const VkDeviceSize paddingBegin = *pOffset - suballocItem->offset; + + // Calculate required margin at the end. + const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN; + + const VkDeviceSize totalSize = paddingBegin + allocSize + requiredEndMargin; + // Another early return check. + if(suballocItem->offset + totalSize > GetSize()) + { + return false; + } + + // Advance lastSuballocItem until desired size is reached. + // Update itemsToMakeLostCount. + VmaSuballocationList::const_iterator lastSuballocItem = suballocItem; + if(totalSize > suballocItem->size) + { + VkDeviceSize remainingSize = totalSize - suballocItem->size; + while(remainingSize > 0) + { + ++lastSuballocItem; + if(lastSuballocItem == m_Suballocations.cend()) + { + return false; + } + if(lastSuballocItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { + *pSumFreeSize += lastSuballocItem->size; + } + else + { + VMA_ASSERT(lastSuballocItem->hAllocation != VK_NULL_HANDLE); + if(lastSuballocItem->hAllocation->CanBecomeLost() && + lastSuballocItem->hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) + { + ++*itemsToMakeLostCount; + *pSumItemSize += lastSuballocItem->size; + } + else + { + return false; + } + } + remainingSize = (lastSuballocItem->size < remainingSize) ? + remainingSize - lastSuballocItem->size : 0; + } + } + + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, we must mark more allocations lost or fail. + if(bufferImageGranularity > 1) + { + VmaSuballocationList::const_iterator nextSuballocItem = lastSuballocItem; + ++nextSuballocItem; + while(nextSuballocItem != m_Suballocations.cend()) + { + const VmaSuballocation& nextSuballoc = *nextSuballocItem; + if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + VMA_ASSERT(nextSuballoc.hAllocation != VK_NULL_HANDLE); + if(nextSuballoc.hAllocation->CanBecomeLost() && + nextSuballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) + { + ++*itemsToMakeLostCount; + } + else + { + return false; + } + } + } + else + { + // Already on next page. + break; + } + ++nextSuballocItem; + } + } + } + else + { + const VmaSuballocation& suballoc = *suballocItem; + VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + *pSumFreeSize = suballoc.size; + + // Size of this suballocation is too small for this request: Early return. + if(suballoc.size < allocSize) + { + return false; + } + + // Start from offset equal to beginning of this suballocation. + *pOffset = suballoc.offset; + + // Apply VMA_DEBUG_MARGIN at the beginning. + if(VMA_DEBUG_MARGIN > 0) + { + *pOffset += VMA_DEBUG_MARGIN; + } + + // Apply alignment. + *pOffset = VmaAlignUp(*pOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if(bufferImageGranularity > 1) + { + bool bufferImageGranularityConflict = false; + VmaSuballocationList::const_iterator prevSuballocItem = suballocItem; + while(prevSuballocItem != m_Suballocations.cbegin()) + { + --prevSuballocItem; + const VmaSuballocation& prevSuballoc = *prevSuballocItem; + if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, *pOffset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if(bufferImageGranularityConflict) + { + *pOffset = VmaAlignUp(*pOffset, bufferImageGranularity); + } + } + + // Calculate padding at the beginning based on current offset. + const VkDeviceSize paddingBegin = *pOffset - suballoc.offset; + + // Calculate required margin at the end. + const VkDeviceSize requiredEndMargin = VMA_DEBUG_MARGIN; + + // Fail if requested size plus margin before and after is bigger than size of this suballocation. + if(paddingBegin + allocSize + requiredEndMargin > suballoc.size) + { + return false; + } + + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if(bufferImageGranularity > 1) + { + VmaSuballocationList::const_iterator nextSuballocItem = suballocItem; + ++nextSuballocItem; + while(nextSuballocItem != m_Suballocations.cend()) + { + const VmaSuballocation& nextSuballoc = *nextSuballocItem; + if(VmaBlocksOnSamePage(*pOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + return false; + } + } + else + { + // Already on next page. + break; + } + ++nextSuballocItem; + } + } + } + + // All tests passed: Success. pOffset is already filled. + return true; +} + +void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item) +{ + VMA_ASSERT(item != m_Suballocations.end()); + VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); + + VmaSuballocationList::iterator nextItem = item; + ++nextItem; + VMA_ASSERT(nextItem != m_Suballocations.end()); + VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE); + + item->size += nextItem->size; + --m_FreeCount; + m_Suballocations.erase(nextItem); +} + +VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem) +{ + // Change this suballocation to be marked as free. + VmaSuballocation& suballoc = *suballocItem; + suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + suballoc.hAllocation = VK_NULL_HANDLE; + + // Update totals. + ++m_FreeCount; + m_SumFreeSize += suballoc.size; + + // Merge with previous and/or next suballocation if it's also free. + bool mergeWithNext = false; + bool mergeWithPrev = false; + + VmaSuballocationList::iterator nextItem = suballocItem; + ++nextItem; + if((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)) + { + mergeWithNext = true; + } + + VmaSuballocationList::iterator prevItem = suballocItem; + if(suballocItem != m_Suballocations.begin()) + { + --prevItem; + if(prevItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { + mergeWithPrev = true; + } + } + + if(mergeWithNext) + { + UnregisterFreeSuballocation(nextItem); + MergeFreeWithNext(suballocItem); + } + + if(mergeWithPrev) + { + UnregisterFreeSuballocation(prevItem); + MergeFreeWithNext(prevItem); + RegisterFreeSuballocation(prevItem); + return prevItem; + } + else + { + RegisterFreeSuballocation(suballocItem); + return suballocItem; + } +} + +void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item) +{ + VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(item->size > 0); + + // You may want to enable this validation at the beginning or at the end of + // this function, depending on what do you want to check. + VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); + + if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + if(m_FreeSuballocationsBySize.empty()) + { + m_FreeSuballocationsBySize.push_back(item); + } + else + { + VmaVectorInsertSorted(m_FreeSuballocationsBySize, item); + } + } + + //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); +} + + +void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item) +{ + VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(item->size > 0); + + // You may want to enable this validation at the beginning or at the end of + // this function, depending on what do you want to check. + VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); + + if(item->size >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess( + m_FreeSuballocationsBySize.data(), + m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(), + item, + VmaSuballocationItemSizeLess()); + for(size_t index = it - m_FreeSuballocationsBySize.data(); + index < m_FreeSuballocationsBySize.size(); + ++index) + { + if(m_FreeSuballocationsBySize[index] == item) + { + VmaVectorRemove(m_FreeSuballocationsBySize, index); + return; + } + VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found."); + } + VMA_ASSERT(0 && "Not found."); + } + + //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); +} + +bool VmaBlockMetadata_Generic::IsBufferImageGranularityConflictPossible( + VkDeviceSize bufferImageGranularity, + VmaSuballocationType& inOutPrevSuballocType) const +{ + if(bufferImageGranularity == 1 || IsEmpty()) + { + return false; + } + + VkDeviceSize minAlignment = VK_WHOLE_SIZE; + bool typeConflictFound = false; + for(VmaSuballocationList::const_iterator it = m_Suballocations.cbegin(); + it != m_Suballocations.cend(); + ++it) + { + const VmaSuballocationType suballocType = it->type; + if(suballocType != VMA_SUBALLOCATION_TYPE_FREE) + { + minAlignment = VMA_MIN(minAlignment, it->hAllocation->GetAlignment()); + if(VmaIsBufferImageGranularityConflict(inOutPrevSuballocType, suballocType)) + { + typeConflictFound = true; + } + inOutPrevSuballocType = suballocType; + } + } + + return typeConflictFound || minAlignment >= bufferImageGranularity; +} + +//////////////////////////////////////////////////////////////////////////////// +// class VmaBlockMetadata_Linear + +VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(VmaAllocator hAllocator) : + VmaBlockMetadata(hAllocator), + m_SumFreeSize(0), + m_Suballocations0(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + m_Suballocations1(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + m_1stVectorIndex(0), + m_2ndVectorMode(SECOND_VECTOR_EMPTY), + m_1stNullItemsBeginCount(0), + m_1stNullItemsMiddleCount(0), + m_2ndNullItemsCount(0) +{ +} + +VmaBlockMetadata_Linear::~VmaBlockMetadata_Linear() +{ +} + +void VmaBlockMetadata_Linear::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + m_SumFreeSize = size; +} + +bool VmaBlockMetadata_Linear::Validate() const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY)); + VMA_VALIDATE(!suballocations1st.empty() || + suballocations2nd.empty() || + m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER); + + if(!suballocations1st.empty()) + { + // Null item at the beginning should be accounted into m_1stNullItemsBeginCount. + VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].hAllocation != VK_NULL_HANDLE); + // Null item at the end should be just pop_back(). + VMA_VALIDATE(suballocations1st.back().hAllocation != VK_NULL_HANDLE); + } + if(!suballocations2nd.empty()) + { + // Null item at the end should be just pop_back(). + VMA_VALIDATE(suballocations2nd.back().hAllocation != VK_NULL_HANDLE); + } + + VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size()); + VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size()); + + VkDeviceSize sumUsedSize = 0; + const size_t suballoc1stCount = suballocations1st.size(); + VkDeviceSize offset = VMA_DEBUG_MARGIN; + + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const size_t suballoc2ndCount = suballocations2nd.size(); + size_t nullItem2ndCount = 0; + for(size_t i = 0; i < suballoc2ndCount; ++i) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE)); + VMA_VALIDATE(suballoc.offset >= offset); + + if(!currFree) + { + VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset); + VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size); + sumUsedSize += suballoc.size; + } + else + { + ++nullItem2ndCount; + } + + offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN; + } + + VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); + } + + for(size_t i = 0; i < m_1stNullItemsBeginCount; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE && + suballoc.hAllocation == VK_NULL_HANDLE); + } + + size_t nullItem1stCount = m_1stNullItemsBeginCount; + + for(size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE)); + VMA_VALIDATE(suballoc.offset >= offset); + VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree); + + if(!currFree) + { + VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset); + VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size); + sumUsedSize += suballoc.size; + } + else + { + ++nullItem1stCount; + } + + offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN; + } + VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount); + + if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + const size_t suballoc2ndCount = suballocations2nd.size(); + size_t nullItem2ndCount = 0; + for(size_t i = suballoc2ndCount; i--; ) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VMA_VALIDATE(currFree == (suballoc.hAllocation == VK_NULL_HANDLE)); + VMA_VALIDATE(suballoc.offset >= offset); + + if(!currFree) + { + VMA_VALIDATE(suballoc.hAllocation->GetOffset() == suballoc.offset); + VMA_VALIDATE(suballoc.hAllocation->GetSize() == suballoc.size); + sumUsedSize += suballoc.size; + } + else + { + ++nullItem2ndCount; + } + + offset = suballoc.offset + suballoc.size + VMA_DEBUG_MARGIN; + } + + VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); + } + + VMA_VALIDATE(offset <= GetSize()); + VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize); + + return true; +} + +size_t VmaBlockMetadata_Linear::GetAllocationCount() const +{ + return AccessSuballocations1st().size() - (m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount) + + AccessSuballocations2nd().size() - m_2ndNullItemsCount; +} + +VkDeviceSize VmaBlockMetadata_Linear::GetUnusedRangeSizeMax() const +{ + const VkDeviceSize size = GetSize(); + + /* + We don't consider gaps inside allocation vectors with freed allocations because + they are not suitable for reuse in linear allocator. We consider only space that + is available for new allocations. + */ + if(IsEmpty()) + { + return size; + } + + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + + switch(m_2ndVectorMode) + { + case SECOND_VECTOR_EMPTY: + /* + Available space is after end of 1st, as well as before beginning of 1st (which + whould make it a ring buffer). + */ + { + const size_t suballocations1stCount = suballocations1st.size(); + VMA_ASSERT(suballocations1stCount > m_1stNullItemsBeginCount); + const VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount]; + const VmaSuballocation& lastSuballoc = suballocations1st[suballocations1stCount - 1]; + return VMA_MAX( + firstSuballoc.offset, + size - (lastSuballoc.offset + lastSuballoc.size)); + } + break; + + case SECOND_VECTOR_RING_BUFFER: + /* + Available space is only between end of 2nd and beginning of 1st. + */ + { + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const VmaSuballocation& lastSuballoc2nd = suballocations2nd.back(); + const VmaSuballocation& firstSuballoc1st = suballocations1st[m_1stNullItemsBeginCount]; + return firstSuballoc1st.offset - (lastSuballoc2nd.offset + lastSuballoc2nd.size); + } + break; + + case SECOND_VECTOR_DOUBLE_STACK: + /* + Available space is only between end of 1st and top of 2nd. + */ + { + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const VmaSuballocation& topSuballoc2nd = suballocations2nd.back(); + const VmaSuballocation& lastSuballoc1st = suballocations1st.back(); + return topSuballoc2nd.offset - (lastSuballoc1st.offset + lastSuballoc1st.size); + } + break; + + default: + VMA_ASSERT(0); + return 0; + } +} + +void VmaBlockMetadata_Linear::CalcAllocationStatInfo(VmaStatInfo& outInfo) const +{ + const VkDeviceSize size = GetSize(); + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + outInfo.blockCount = 1; + outInfo.allocationCount = (uint32_t)GetAllocationCount(); + outInfo.unusedRangeCount = 0; + outInfo.usedBytes = 0; + outInfo.allocationSizeMin = UINT64_MAX; + outInfo.allocationSizeMax = 0; + outInfo.unusedRangeSizeMin = UINT64_MAX; + outInfo.unusedRangeSizeMax = 0; + + VkDeviceSize lastOffset = 0; + + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while(lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while(nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusedRangeSize; + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); + outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + outInfo.usedBytes += suballoc.size; + outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size); + outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + if(lastOffset < freeSpace2ndTo1stEnd) + { + const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusedRangeSize; + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); + outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while(lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while(nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if(nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusedRangeSize; + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); + outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + outInfo.usedBytes += suballoc.size; + outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size); + outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + if(lastOffset < freeSpace1stTo2ndEnd) + { + const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusedRangeSize; + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); + outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while(lastOffset < size) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while(nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusedRangeSize; + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); + outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + outInfo.usedBytes += suballoc.size; + outInfo.allocationSizeMin = VMA_MIN(outInfo.allocationSizeMin, suballoc.size); + outInfo.allocationSizeMax = VMA_MIN(outInfo.allocationSizeMax, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to size. + if(lastOffset < size) + { + const VkDeviceSize unusedRangeSize = size - lastOffset; + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusedRangeSize; + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusedRangeSize); + outInfo.unusedRangeSizeMax = VMA_MIN(outInfo.unusedRangeSizeMax, unusedRangeSize); + } + + // End of loop. + lastOffset = size; + } + } + } + + outInfo.unusedBytes = size - outInfo.usedBytes; +} + +void VmaBlockMetadata_Linear::AddPoolStats(VmaPoolStats& inoutStats) const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const VkDeviceSize size = GetSize(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + inoutStats.size += size; + + VkDeviceSize lastOffset = 0; + + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount; + while(lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while(nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + inoutStats.unusedSize += unusedRangeSize; + ++inoutStats.unusedRangeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + if(lastOffset < freeSpace2ndTo1stEnd) + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; + inoutStats.unusedSize += unusedRangeSize; + ++inoutStats.unusedRangeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while(lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while(nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if(nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + inoutStats.unusedSize += unusedRangeSize; + ++inoutStats.unusedRangeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + if(lastOffset < freeSpace1stTo2ndEnd) + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; + inoutStats.unusedSize += unusedRangeSize; + ++inoutStats.unusedRangeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while(lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while(nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + inoutStats.unusedSize += unusedRangeSize; + ++inoutStats.unusedRangeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + if(lastOffset < size) + { + // There is free space from lastOffset to size. + const VkDeviceSize unusedRangeSize = size - lastOffset; + inoutStats.unusedSize += unusedRangeSize; + ++inoutStats.unusedRangeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, unusedRangeSize); + } + + // End of loop. + lastOffset = size; + } + } + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const +{ + const VkDeviceSize size = GetSize(); + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + // FIRST PASS + + size_t unusedRangeCount = 0; + VkDeviceSize usedBytes = 0; + + VkDeviceSize lastOffset = 0; + + size_t alloc2ndCount = 0; + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while(lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while(nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc2ndCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + if(lastOffset < freeSpace2ndTo1stEnd) + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + size_t alloc1stCount = 0; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while(lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while(nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if(nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc1stCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + if(lastOffset < size) + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while(lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while(nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc2ndCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + if(lastOffset < size) + { + // There is free space from lastOffset to size. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = size; + } + } + } + + const VkDeviceSize unusedBytes = size - usedBytes; + PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount); + + // SECOND PASS + lastOffset = 0; + + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while(lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while(nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + if(lastOffset < freeSpace2ndTo1stEnd) + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + nextAlloc1stIndex = m_1stNullItemsBeginCount; + while(lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while(nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].hAllocation == VK_NULL_HANDLE) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if(nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + if(lastOffset < freeSpace1stTo2ndEnd) + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while(lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while(nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].hAllocation == VK_NULL_HANDLE) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if(nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if(lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.hAllocation); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + if(lastOffset < size) + { + // There is free space from lastOffset to size. + const VkDeviceSize unusedRangeSize = size - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = size; + } + } + } + + PrintDetailedMap_End(json); +} +#endif // #if VMA_STATS_STRING_ENABLED + +bool VmaBlockMetadata_Linear::CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(allocSize > 0); + VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(pAllocationRequest != VMA_NULL); + VMA_HEAVY_ASSERT(Validate()); + return upperAddress ? + CreateAllocationRequest_UpperAddress( + currentFrameIndex, frameInUseCount, bufferImageGranularity, + allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest) : + CreateAllocationRequest_LowerAddress( + currentFrameIndex, frameInUseCount, bufferImageGranularity, + allocSize, allocAlignment, allocType, canMakeOtherLost, strategy, pAllocationRequest); +} + +bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + const VkDeviceSize size = GetSize(); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer."); + return false; + } + + // Try to allocate before 2nd.back(), or end of block if 2nd.empty(). + if(allocSize > size) + { + return false; + } + VkDeviceSize resultBaseOffset = size - allocSize; + if(!suballocations2nd.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations2nd.back(); + resultBaseOffset = lastSuballoc.offset - allocSize; + if(allocSize > lastSuballoc.offset) + { + return false; + } + } + + // Start from offset equal to end of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + // Apply VMA_DEBUG_MARGIN at the end. + if(VMA_DEBUG_MARGIN > 0) + { + if(resultOffset < VMA_DEBUG_MARGIN) + { + return false; + } + resultOffset -= VMA_DEBUG_MARGIN; + } + + // Apply alignment. + resultOffset = VmaAlignDown(resultOffset, allocAlignment); + + // Check next suballocations from 2nd for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if(bufferImageGranularity > 1 && !suballocations2nd.empty()) + { + bool bufferImageGranularityConflict = false; + for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) + { + const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; + if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if(bufferImageGranularityConflict) + { + resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity); + } + } + + // There is enough free space. + const VkDeviceSize endOf1st = !suballocations1st.empty() ? + suballocations1st.back().offset + suballocations1st.back().size : + 0; + if(endOf1st + VMA_DEBUG_MARGIN <= resultOffset) + { + // Check previous suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if(bufferImageGranularity > 1) + { + for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; + if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type)) + { + return false; + } + } + else + { + // Already on next page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->offset = resultOffset; + pAllocationRequest->sumFreeSize = resultBaseOffset + allocSize - endOf1st; + pAllocationRequest->sumItemSize = 0; + // pAllocationRequest->item unused. + pAllocationRequest->itemsToMakeLostCount = 0; + pAllocationRequest->type = VmaAllocationRequestType::UpperAddress; + return true; + } + + return false; +} + +bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + const VkDeviceSize size = GetSize(); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + // Try to allocate at the end of 1st vector. + + VkDeviceSize resultBaseOffset = 0; + if(!suballocations1st.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations1st.back(); + resultBaseOffset = lastSuballoc.offset + lastSuballoc.size; + } + + // Start from offset equal to beginning of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + // Apply VMA_DEBUG_MARGIN at the beginning. + if(VMA_DEBUG_MARGIN > 0) + { + resultOffset += VMA_DEBUG_MARGIN; + } + + // Apply alignment. + resultOffset = VmaAlignUp(resultOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if(bufferImageGranularity > 1 && !suballocations1st.empty()) + { + bool bufferImageGranularityConflict = false; + for(size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; + if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if(bufferImageGranularityConflict) + { + resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); + } + } + + const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? + suballocations2nd.back().offset : size; + + // There is enough free space at the end after alignment. + if(resultOffset + allocSize + VMA_DEBUG_MARGIN <= freeSpaceEnd) + { + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if(bufferImageGranularity > 1 && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + for(size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) + { + const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; + if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + return false; + } + } + else + { + // Already on previous page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->offset = resultOffset; + pAllocationRequest->sumFreeSize = freeSpaceEnd - resultBaseOffset; + pAllocationRequest->sumItemSize = 0; + // pAllocationRequest->item, customData unused. + pAllocationRequest->type = VmaAllocationRequestType::EndOf1st; + pAllocationRequest->itemsToMakeLostCount = 0; + return true; + } + } + + // Wrap-around to end of 2nd vector. Try to allocate there, watching for the + // beginning of 1st vector as the end of free space. + if(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + VMA_ASSERT(!suballocations1st.empty()); + + VkDeviceSize resultBaseOffset = 0; + if(!suballocations2nd.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations2nd.back(); + resultBaseOffset = lastSuballoc.offset + lastSuballoc.size; + } + + // Start from offset equal to beginning of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + // Apply VMA_DEBUG_MARGIN at the beginning. + if(VMA_DEBUG_MARGIN > 0) + { + resultOffset += VMA_DEBUG_MARGIN; + } + + // Apply alignment. + resultOffset = VmaAlignUp(resultOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if(bufferImageGranularity > 1 && !suballocations2nd.empty()) + { + bool bufferImageGranularityConflict = false; + for(size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex]; + if(VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if(bufferImageGranularityConflict) + { + resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); + } + } + + pAllocationRequest->itemsToMakeLostCount = 0; + pAllocationRequest->sumItemSize = 0; + size_t index1st = m_1stNullItemsBeginCount; + + if(canMakeOtherLost) + { + while(index1st < suballocations1st.size() && + resultOffset + allocSize + VMA_DEBUG_MARGIN > suballocations1st[index1st].offset) + { + // Next colliding allocation at the beginning of 1st vector found. Try to make it lost. + const VmaSuballocation& suballoc = suballocations1st[index1st]; + if(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE) + { + // No problem. + } + else + { + VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE); + if(suballoc.hAllocation->CanBecomeLost() && + suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) + { + ++pAllocationRequest->itemsToMakeLostCount; + pAllocationRequest->sumItemSize += suballoc.size; + } + else + { + return false; + } + } + ++index1st; + } + + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, we must mark more allocations lost or fail. + if(bufferImageGranularity > 1) + { + while(index1st < suballocations1st.size()) + { + const VmaSuballocation& suballoc = suballocations1st[index1st]; + if(VmaBlocksOnSamePage(resultOffset, allocSize, suballoc.offset, bufferImageGranularity)) + { + if(suballoc.hAllocation != VK_NULL_HANDLE) + { + // Not checking actual VmaIsBufferImageGranularityConflict(allocType, suballoc.type). + if(suballoc.hAllocation->CanBecomeLost() && + suballoc.hAllocation->GetLastUseFrameIndex() + frameInUseCount < currentFrameIndex) + { + ++pAllocationRequest->itemsToMakeLostCount; + pAllocationRequest->sumItemSize += suballoc.size; + } + else + { + return false; + } + } + } + else + { + // Already on next page. + break; + } + ++index1st; + } + } + + // Special case: There is not enough room at the end for this allocation, even after making all from the 1st lost. + if(index1st == suballocations1st.size() && + resultOffset + allocSize + VMA_DEBUG_MARGIN > size) + { + // TODO: This is a known bug that it's not yet implemented and the allocation is failing. + VMA_DEBUG_LOG("Unsupported special case in custom pool with linear allocation algorithm used as ring buffer with allocations that can be lost."); + } + } + + // There is enough free space at the end after alignment. + if((index1st == suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= size) || + (index1st < suballocations1st.size() && resultOffset + allocSize + VMA_DEBUG_MARGIN <= suballocations1st[index1st].offset)) + { + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if(bufferImageGranularity > 1) + { + for(size_t nextSuballocIndex = index1st; + nextSuballocIndex < suballocations1st.size(); + nextSuballocIndex++) + { + const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex]; + if(VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if(VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + return false; + } + } + else + { + // Already on next page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->offset = resultOffset; + pAllocationRequest->sumFreeSize = + (index1st < suballocations1st.size() ? suballocations1st[index1st].offset : size) + - resultBaseOffset + - pAllocationRequest->sumItemSize; + pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd; + // pAllocationRequest->item, customData unused. + return true; + } + } + + return false; +} + +bool VmaBlockMetadata_Linear::MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest) +{ + if(pAllocationRequest->itemsToMakeLostCount == 0) + { + return true; + } + + VMA_ASSERT(m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER); + + // We always start from 1st. + SuballocationVectorType* suballocations = &AccessSuballocations1st(); + size_t index = m_1stNullItemsBeginCount; + size_t madeLostCount = 0; + while(madeLostCount < pAllocationRequest->itemsToMakeLostCount) + { + if(index == suballocations->size()) + { + index = 0; + // If we get to the end of 1st, we wrap around to beginning of 2nd of 1st. + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + suballocations = &AccessSuballocations2nd(); + } + // else: m_2ndVectorMode == SECOND_VECTOR_EMPTY: + // suballocations continues pointing at AccessSuballocations1st(). + VMA_ASSERT(!suballocations->empty()); + } + VmaSuballocation& suballoc = (*suballocations)[index]; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + VMA_ASSERT(suballoc.hAllocation != VK_NULL_HANDLE); + VMA_ASSERT(suballoc.hAllocation->CanBecomeLost()); + if(suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) + { + suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + suballoc.hAllocation = VK_NULL_HANDLE; + m_SumFreeSize += suballoc.size; + if(suballocations == &AccessSuballocations1st()) + { + ++m_1stNullItemsMiddleCount; + } + else + { + ++m_2ndNullItemsCount; + } + ++madeLostCount; + } + else + { + return false; + } + } + ++index; + } + + CleanupAfterFree(); + //VMA_HEAVY_ASSERT(Validate()); // Already called by ClanupAfterFree(). + + return true; +} + +uint32_t VmaBlockMetadata_Linear::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) +{ + uint32_t lostAllocationCount = 0; + + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i) + { + VmaSuballocation& suballoc = suballocations1st[i]; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE && + suballoc.hAllocation->CanBecomeLost() && + suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) + { + suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + suballoc.hAllocation = VK_NULL_HANDLE; + ++m_1stNullItemsMiddleCount; + m_SumFreeSize += suballoc.size; + ++lostAllocationCount; + } + } + + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i) + { + VmaSuballocation& suballoc = suballocations2nd[i]; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE && + suballoc.hAllocation->CanBecomeLost() && + suballoc.hAllocation->MakeLost(currentFrameIndex, frameInUseCount)) + { + suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + suballoc.hAllocation = VK_NULL_HANDLE; + ++m_2ndNullItemsCount; + m_SumFreeSize += suballoc.size; + ++lostAllocationCount; + } + } + + if(lostAllocationCount) + { + CleanupAfterFree(); + } + + return lostAllocationCount; +} + +VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData) +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + for(size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!"); + return VK_ERROR_VALIDATION_FAILED_EXT; + } + if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_VALIDATION_FAILED_EXT; + } + } + } + + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + for(size_t i = 0, count = suballocations2nd.size(); i < count; ++i) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + if(suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + if(!VmaValidateMagicValue(pBlockData, suballoc.offset - VMA_DEBUG_MARGIN)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE VALIDATED ALLOCATION!"); + return VK_ERROR_VALIDATION_FAILED_EXT; + } + if(!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_VALIDATION_FAILED_EXT; + } + } + } + + return VK_SUCCESS; +} + +void VmaBlockMetadata_Linear::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation) +{ + const VmaSuballocation newSuballoc = { request.offset, allocSize, hAllocation, type }; + + switch(request.type) + { + case VmaAllocationRequestType::UpperAddress: + { + VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER && + "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer."); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + suballocations2nd.push_back(newSuballoc); + m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK; + } + break; + case VmaAllocationRequestType::EndOf1st: + { + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + + VMA_ASSERT(suballocations1st.empty() || + request.offset >= suballocations1st.back().offset + suballocations1st.back().size); + // Check if it fits before the end of the block. + VMA_ASSERT(request.offset + allocSize <= GetSize()); + + suballocations1st.push_back(newSuballoc); + } + break; + case VmaAllocationRequestType::EndOf2nd: + { + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector. + VMA_ASSERT(!suballocations1st.empty() && + request.offset + allocSize <= suballocations1st[m_1stNullItemsBeginCount].offset); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + switch(m_2ndVectorMode) + { + case SECOND_VECTOR_EMPTY: + // First allocation from second part ring buffer. + VMA_ASSERT(suballocations2nd.empty()); + m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER; + break; + case SECOND_VECTOR_RING_BUFFER: + // 2-part ring buffer is already started. + VMA_ASSERT(!suballocations2nd.empty()); + break; + case SECOND_VECTOR_DOUBLE_STACK: + VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack."); + break; + default: + VMA_ASSERT(0); + } + + suballocations2nd.push_back(newSuballoc); + } + break; + default: + VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR."); + } + + m_SumFreeSize -= newSuballoc.size; +} + +void VmaBlockMetadata_Linear::Free(const VmaAllocation allocation) +{ + FreeAtOffset(allocation->GetOffset()); +} + +void VmaBlockMetadata_Linear::FreeAtOffset(VkDeviceSize offset) +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if(!suballocations1st.empty()) + { + // First allocation: Mark it as next empty at the beginning. + VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount]; + if(firstSuballoc.offset == offset) + { + firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + firstSuballoc.hAllocation = VK_NULL_HANDLE; + m_SumFreeSize += firstSuballoc.size; + ++m_1stNullItemsBeginCount; + CleanupAfterFree(); + return; + } + } + + // Last allocation in 2-part ring buffer or top of upper stack (same logic). + if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER || + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + VmaSuballocation& lastSuballoc = suballocations2nd.back(); + if(lastSuballoc.offset == offset) + { + m_SumFreeSize += lastSuballoc.size; + suballocations2nd.pop_back(); + CleanupAfterFree(); + return; + } + } + // Last allocation in 1st vector. + else if(m_2ndVectorMode == SECOND_VECTOR_EMPTY) + { + VmaSuballocation& lastSuballoc = suballocations1st.back(); + if(lastSuballoc.offset == offset) + { + m_SumFreeSize += lastSuballoc.size; + suballocations1st.pop_back(); + CleanupAfterFree(); + return; + } + } + + // Item from the middle of 1st vector. + { + VmaSuballocation refSuballoc; + refSuballoc.offset = offset; + // Rest of members stays uninitialized intentionally for better performance. + SuballocationVectorType::iterator it = VmaBinaryFindSorted( + suballocations1st.begin() + m_1stNullItemsBeginCount, + suballocations1st.end(), + refSuballoc, + VmaSuballocationOffsetLess()); + if(it != suballocations1st.end()) + { + it->type = VMA_SUBALLOCATION_TYPE_FREE; + it->hAllocation = VK_NULL_HANDLE; + ++m_1stNullItemsMiddleCount; + m_SumFreeSize += it->size; + CleanupAfterFree(); + return; + } + } + + if(m_2ndVectorMode != SECOND_VECTOR_EMPTY) + { + // Item from the middle of 2nd vector. + VmaSuballocation refSuballoc; + refSuballoc.offset = offset; + // Rest of members stays uninitialized intentionally for better performance. + SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); + if(it != suballocations2nd.end()) + { + it->type = VMA_SUBALLOCATION_TYPE_FREE; + it->hAllocation = VK_NULL_HANDLE; + ++m_2ndNullItemsCount; + m_SumFreeSize += it->size; + CleanupAfterFree(); + return; + } + } + + VMA_ASSERT(0 && "Allocation to free not found in linear allocator!"); +} + +bool VmaBlockMetadata_Linear::ShouldCompact1st() const +{ + const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; + const size_t suballocCount = AccessSuballocations1st().size(); + return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3; +} + +void VmaBlockMetadata_Linear::CleanupAfterFree() +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if(IsEmpty()) + { + suballocations1st.clear(); + suballocations2nd.clear(); + m_1stNullItemsBeginCount = 0; + m_1stNullItemsMiddleCount = 0; + m_2ndNullItemsCount = 0; + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + } + else + { + const size_t suballoc1stCount = suballocations1st.size(); + const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; + VMA_ASSERT(nullItem1stCount <= suballoc1stCount); + + // Find more null items at the beginning of 1st vector. + while(m_1stNullItemsBeginCount < suballoc1stCount && + suballocations1st[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE) + { + ++m_1stNullItemsBeginCount; + --m_1stNullItemsMiddleCount; + } + + // Find more null items at the end of 1st vector. + while(m_1stNullItemsMiddleCount > 0 && + suballocations1st.back().hAllocation == VK_NULL_HANDLE) + { + --m_1stNullItemsMiddleCount; + suballocations1st.pop_back(); + } + + // Find more null items at the end of 2nd vector. + while(m_2ndNullItemsCount > 0 && + suballocations2nd.back().hAllocation == VK_NULL_HANDLE) + { + --m_2ndNullItemsCount; + suballocations2nd.pop_back(); + } + + // Find more null items at the beginning of 2nd vector. + while(m_2ndNullItemsCount > 0 && + suballocations2nd[0].hAllocation == VK_NULL_HANDLE) + { + --m_2ndNullItemsCount; + VmaVectorRemove(suballocations2nd, 0); + } + + if(ShouldCompact1st()) + { + const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount; + size_t srcIndex = m_1stNullItemsBeginCount; + for(size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex) + { + while(suballocations1st[srcIndex].hAllocation == VK_NULL_HANDLE) + { + ++srcIndex; + } + if(dstIndex != srcIndex) + { + suballocations1st[dstIndex] = suballocations1st[srcIndex]; + } + ++srcIndex; + } + suballocations1st.resize(nonNullItemCount); + m_1stNullItemsBeginCount = 0; + m_1stNullItemsMiddleCount = 0; + } + + // 2nd vector became empty. + if(suballocations2nd.empty()) + { + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + } + + // 1st vector became empty. + if(suballocations1st.size() - m_1stNullItemsBeginCount == 0) + { + suballocations1st.clear(); + m_1stNullItemsBeginCount = 0; + + if(!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + // Swap 1st with 2nd. Now 2nd is empty. + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + m_1stNullItemsMiddleCount = m_2ndNullItemsCount; + while(m_1stNullItemsBeginCount < suballocations2nd.size() && + suballocations2nd[m_1stNullItemsBeginCount].hAllocation == VK_NULL_HANDLE) + { + ++m_1stNullItemsBeginCount; + --m_1stNullItemsMiddleCount; + } + m_2ndNullItemsCount = 0; + m_1stVectorIndex ^= 1; + } + } + } + + VMA_HEAVY_ASSERT(Validate()); +} + + +//////////////////////////////////////////////////////////////////////////////// +// class VmaBlockMetadata_Buddy + +VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(VmaAllocator hAllocator) : + VmaBlockMetadata(hAllocator), + m_Root(VMA_NULL), + m_AllocationCount(0), + m_FreeCount(1), + m_SumFreeSize(0) +{ + memset(m_FreeList, 0, sizeof(m_FreeList)); +} + +VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy() +{ + DeleteNode(m_Root); +} + +void VmaBlockMetadata_Buddy::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + + m_UsableSize = VmaPrevPow2(size); + m_SumFreeSize = m_UsableSize; + + // Calculate m_LevelCount. + m_LevelCount = 1; + while(m_LevelCount < MAX_LEVELS && + LevelToNodeSize(m_LevelCount) >= MIN_NODE_SIZE) + { + ++m_LevelCount; + } + + Node* rootNode = vma_new(GetAllocationCallbacks(), Node)(); + rootNode->offset = 0; + rootNode->type = Node::TYPE_FREE; + rootNode->parent = VMA_NULL; + rootNode->buddy = VMA_NULL; + + m_Root = rootNode; + AddToFreeListFront(0, rootNode); +} + +bool VmaBlockMetadata_Buddy::Validate() const +{ + // Validate tree. + ValidationContext ctx; + if(!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0))) + { + VMA_VALIDATE(false && "ValidateNode failed."); + } + VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount); + VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize); + + // Validate free node lists. + for(uint32_t level = 0; level < m_LevelCount; ++level) + { + VMA_VALIDATE(m_FreeList[level].front == VMA_NULL || + m_FreeList[level].front->free.prev == VMA_NULL); + + for(Node* node = m_FreeList[level].front; + node != VMA_NULL; + node = node->free.next) + { + VMA_VALIDATE(node->type == Node::TYPE_FREE); + + if(node->free.next == VMA_NULL) + { + VMA_VALIDATE(m_FreeList[level].back == node); + } + else + { + VMA_VALIDATE(node->free.next->free.prev == node); + } + } + } + + // Validate that free lists ar higher levels are empty. + for(uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level) + { + VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL); + } + + return true; +} + +VkDeviceSize VmaBlockMetadata_Buddy::GetUnusedRangeSizeMax() const +{ + for(uint32_t level = 0; level < m_LevelCount; ++level) + { + if(m_FreeList[level].front != VMA_NULL) + { + return LevelToNodeSize(level); + } + } + return 0; +} + +void VmaBlockMetadata_Buddy::CalcAllocationStatInfo(VmaStatInfo& outInfo) const +{ + const VkDeviceSize unusableSize = GetUnusableSize(); + + outInfo.blockCount = 1; + + outInfo.allocationCount = outInfo.unusedRangeCount = 0; + outInfo.usedBytes = outInfo.unusedBytes = 0; + + outInfo.allocationSizeMax = outInfo.unusedRangeSizeMax = 0; + outInfo.allocationSizeMin = outInfo.unusedRangeSizeMin = UINT64_MAX; + outInfo.allocationSizeAvg = outInfo.unusedRangeSizeAvg = 0; // Unused. + + CalcAllocationStatInfoNode(outInfo, m_Root, LevelToNodeSize(0)); + + if(unusableSize > 0) + { + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusableSize; + outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusableSize); + outInfo.unusedRangeSizeMin = VMA_MIN(outInfo.unusedRangeSizeMin, unusableSize); + } +} + +void VmaBlockMetadata_Buddy::AddPoolStats(VmaPoolStats& inoutStats) const +{ + const VkDeviceSize unusableSize = GetUnusableSize(); + + inoutStats.size += GetSize(); + inoutStats.unusedSize += m_SumFreeSize + unusableSize; + inoutStats.allocationCount += m_AllocationCount; + inoutStats.unusedRangeCount += m_FreeCount; + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, GetUnusedRangeSizeMax()); + + if(unusableSize > 0) + { + ++inoutStats.unusedRangeCount; + // Not updating inoutStats.unusedRangeSizeMax with unusableSize because this space is not available for allocations. + } +} + +#if VMA_STATS_STRING_ENABLED + +void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json) const +{ + // TODO optimize + VmaStatInfo stat; + CalcAllocationStatInfo(stat); + + PrintDetailedMap_Begin( + json, + stat.unusedBytes, + stat.allocationCount, + stat.unusedRangeCount); + + PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0)); + + const VkDeviceSize unusableSize = GetUnusableSize(); + if(unusableSize > 0) + { + PrintDetailedMap_UnusedRange(json, + m_UsableSize, // offset + unusableSize); // size + } + + PrintDetailedMap_End(json); +} + +#endif // #if VMA_STATS_STRING_ENABLED + +bool VmaBlockMetadata_Buddy::CreateAllocationRequest( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VkDeviceSize bufferImageGranularity, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + bool canMakeOtherLost, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm."); + + // Simple way to respect bufferImageGranularity. May be optimized some day. + // Whenever it might be an OPTIMAL image... + if(allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL) + { + allocAlignment = VMA_MAX(allocAlignment, bufferImageGranularity); + allocSize = VMA_MAX(allocSize, bufferImageGranularity); + } + + if(allocSize > m_UsableSize) + { + return false; + } + + const uint32_t targetLevel = AllocSizeToLevel(allocSize); + for(uint32_t level = targetLevel + 1; level--; ) + { + for(Node* freeNode = m_FreeList[level].front; + freeNode != VMA_NULL; + freeNode = freeNode->free.next) + { + if(freeNode->offset % allocAlignment == 0) + { + pAllocationRequest->type = VmaAllocationRequestType::Normal; + pAllocationRequest->offset = freeNode->offset; + pAllocationRequest->sumFreeSize = LevelToNodeSize(level); + pAllocationRequest->sumItemSize = 0; + pAllocationRequest->itemsToMakeLostCount = 0; + pAllocationRequest->customData = (void*)(uintptr_t)level; + return true; + } + } + } + + return false; +} + +bool VmaBlockMetadata_Buddy::MakeRequestedAllocationsLost( + uint32_t currentFrameIndex, + uint32_t frameInUseCount, + VmaAllocationRequest* pAllocationRequest) +{ + /* + Lost allocations are not supported in buddy allocator at the moment. + Support might be added in the future. + */ + return pAllocationRequest->itemsToMakeLostCount == 0; +} + +uint32_t VmaBlockMetadata_Buddy::MakeAllocationsLost(uint32_t currentFrameIndex, uint32_t frameInUseCount) +{ + /* + Lost allocations are not supported in buddy allocator at the moment. + Support might be added in the future. + */ + return 0; +} + +void VmaBlockMetadata_Buddy::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + VkDeviceSize allocSize, + VmaAllocation hAllocation) +{ + VMA_ASSERT(request.type == VmaAllocationRequestType::Normal); + + const uint32_t targetLevel = AllocSizeToLevel(allocSize); + uint32_t currLevel = (uint32_t)(uintptr_t)request.customData; + + Node* currNode = m_FreeList[currLevel].front; + VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE); + while(currNode->offset != request.offset) + { + currNode = currNode->free.next; + VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE); + } + + // Go down, splitting free nodes. + while(currLevel < targetLevel) + { + // currNode is already first free node at currLevel. + // Remove it from list of free nodes at this currLevel. + RemoveFromFreeList(currLevel, currNode); + + const uint32_t childrenLevel = currLevel + 1; + + // Create two free sub-nodes. + Node* leftChild = vma_new(GetAllocationCallbacks(), Node)(); + Node* rightChild = vma_new(GetAllocationCallbacks(), Node)(); + + leftChild->offset = currNode->offset; + leftChild->type = Node::TYPE_FREE; + leftChild->parent = currNode; + leftChild->buddy = rightChild; + + rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel); + rightChild->type = Node::TYPE_FREE; + rightChild->parent = currNode; + rightChild->buddy = leftChild; + + // Convert current currNode to split type. + currNode->type = Node::TYPE_SPLIT; + currNode->split.leftChild = leftChild; + + // Add child nodes to free list. Order is important! + AddToFreeListFront(childrenLevel, rightChild); + AddToFreeListFront(childrenLevel, leftChild); + + ++m_FreeCount; + //m_SumFreeSize -= LevelToNodeSize(currLevel) % 2; // Useful only when level node sizes can be non power of 2. + ++currLevel; + currNode = m_FreeList[currLevel].front; + + /* + We can be sure that currNode, as left child of node previously split, + also fullfills the alignment requirement. + */ + } + + // Remove from free list. + VMA_ASSERT(currLevel == targetLevel && + currNode != VMA_NULL && + currNode->type == Node::TYPE_FREE); + RemoveFromFreeList(currLevel, currNode); + + // Convert to allocation node. + currNode->type = Node::TYPE_ALLOCATION; + currNode->allocation.alloc = hAllocation; + + ++m_AllocationCount; + --m_FreeCount; + m_SumFreeSize -= allocSize; +} + +void VmaBlockMetadata_Buddy::DeleteNode(Node* node) +{ + if(node->type == Node::TYPE_SPLIT) + { + DeleteNode(node->split.leftChild->buddy); + DeleteNode(node->split.leftChild); + } + + vma_delete(GetAllocationCallbacks(), node); +} + +bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const +{ + VMA_VALIDATE(level < m_LevelCount); + VMA_VALIDATE(curr->parent == parent); + VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL)); + VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr); + switch(curr->type) + { + case Node::TYPE_FREE: + // curr->free.prev, next are validated separately. + ctx.calculatedSumFreeSize += levelNodeSize; + ++ctx.calculatedFreeCount; + break; + case Node::TYPE_ALLOCATION: + ++ctx.calculatedAllocationCount; + ctx.calculatedSumFreeSize += levelNodeSize - curr->allocation.alloc->GetSize(); + VMA_VALIDATE(curr->allocation.alloc != VK_NULL_HANDLE); + break; + case Node::TYPE_SPLIT: + { + const uint32_t childrenLevel = level + 1; + const VkDeviceSize childrenLevelNodeSize = levelNodeSize / 2; + const Node* const leftChild = curr->split.leftChild; + VMA_VALIDATE(leftChild != VMA_NULL); + VMA_VALIDATE(leftChild->offset == curr->offset); + if(!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize)) + { + VMA_VALIDATE(false && "ValidateNode for left child failed."); + } + const Node* const rightChild = leftChild->buddy; + VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize); + if(!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize)) + { + VMA_VALIDATE(false && "ValidateNode for right child failed."); + } + } + break; + default: + return false; + } + + return true; +} + +uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const +{ + // I know this could be optimized somehow e.g. by using std::log2p1 from C++20. + uint32_t level = 0; + VkDeviceSize currLevelNodeSize = m_UsableSize; + VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1; + while(allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount) + { + ++level; + currLevelNodeSize = nextLevelNodeSize; + nextLevelNodeSize = currLevelNodeSize >> 1; + } + return level; +} + +void VmaBlockMetadata_Buddy::FreeAtOffset(VmaAllocation alloc, VkDeviceSize offset) +{ + // Find node and level. + Node* node = m_Root; + VkDeviceSize nodeOffset = 0; + uint32_t level = 0; + VkDeviceSize levelNodeSize = LevelToNodeSize(0); + while(node->type == Node::TYPE_SPLIT) + { + const VkDeviceSize nextLevelSize = levelNodeSize >> 1; + if(offset < nodeOffset + nextLevelSize) + { + node = node->split.leftChild; + } + else + { + node = node->split.leftChild->buddy; + nodeOffset += nextLevelSize; + } + ++level; + levelNodeSize = nextLevelSize; + } + + VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION); + VMA_ASSERT(alloc == VK_NULL_HANDLE || node->allocation.alloc == alloc); + + ++m_FreeCount; + --m_AllocationCount; + m_SumFreeSize += alloc->GetSize(); + + node->type = Node::TYPE_FREE; + + // Join free nodes if possible. + while(level > 0 && node->buddy->type == Node::TYPE_FREE) + { + RemoveFromFreeList(level, node->buddy); + Node* const parent = node->parent; + + vma_delete(GetAllocationCallbacks(), node->buddy); + vma_delete(GetAllocationCallbacks(), node); + parent->type = Node::TYPE_FREE; + + node = parent; + --level; + //m_SumFreeSize += LevelToNodeSize(level) % 2; // Useful only when level node sizes can be non power of 2. + --m_FreeCount; + } + + AddToFreeListFront(level, node); +} + +void VmaBlockMetadata_Buddy::CalcAllocationStatInfoNode(VmaStatInfo& outInfo, const Node* node, VkDeviceSize levelNodeSize) const +{ + switch(node->type) + { + case Node::TYPE_FREE: + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += levelNodeSize; + outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, levelNodeSize); + outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, levelNodeSize); + break; + case Node::TYPE_ALLOCATION: + { + const VkDeviceSize allocSize = node->allocation.alloc->GetSize(); + ++outInfo.allocationCount; + outInfo.usedBytes += allocSize; + outInfo.allocationSizeMax = VMA_MAX(outInfo.allocationSizeMax, allocSize); + outInfo.allocationSizeMin = VMA_MAX(outInfo.allocationSizeMin, allocSize); + + const VkDeviceSize unusedRangeSize = levelNodeSize - allocSize; + if(unusedRangeSize > 0) + { + ++outInfo.unusedRangeCount; + outInfo.unusedBytes += unusedRangeSize; + outInfo.unusedRangeSizeMax = VMA_MAX(outInfo.unusedRangeSizeMax, unusedRangeSize); + outInfo.unusedRangeSizeMin = VMA_MAX(outInfo.unusedRangeSizeMin, unusedRangeSize); + } + } + break; + case Node::TYPE_SPLIT: + { + const VkDeviceSize childrenNodeSize = levelNodeSize / 2; + const Node* const leftChild = node->split.leftChild; + CalcAllocationStatInfoNode(outInfo, leftChild, childrenNodeSize); + const Node* const rightChild = leftChild->buddy; + CalcAllocationStatInfoNode(outInfo, rightChild, childrenNodeSize); + } + break; + default: + VMA_ASSERT(0); + } +} + +void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node) +{ + VMA_ASSERT(node->type == Node::TYPE_FREE); + + // List is empty. + Node* const frontNode = m_FreeList[level].front; + if(frontNode == VMA_NULL) + { + VMA_ASSERT(m_FreeList[level].back == VMA_NULL); + node->free.prev = node->free.next = VMA_NULL; + m_FreeList[level].front = m_FreeList[level].back = node; + } + else + { + VMA_ASSERT(frontNode->free.prev == VMA_NULL); + node->free.prev = VMA_NULL; + node->free.next = frontNode; + frontNode->free.prev = node; + m_FreeList[level].front = node; + } +} + +void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node) +{ + VMA_ASSERT(m_FreeList[level].front != VMA_NULL); + + // It is at the front. + if(node->free.prev == VMA_NULL) + { + VMA_ASSERT(m_FreeList[level].front == node); + m_FreeList[level].front = node->free.next; + } + else + { + Node* const prevFreeNode = node->free.prev; + VMA_ASSERT(prevFreeNode->free.next == node); + prevFreeNode->free.next = node->free.next; + } + + // It is at the back. + if(node->free.next == VMA_NULL) + { + VMA_ASSERT(m_FreeList[level].back == node); + m_FreeList[level].back = node->free.prev; + } + else + { + Node* const nextFreeNode = node->free.next; + VMA_ASSERT(nextFreeNode->free.prev == node); + nextFreeNode->free.prev = node->free.prev; + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const +{ + switch(node->type) + { + case Node::TYPE_FREE: + PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize); + break; + case Node::TYPE_ALLOCATION: + { + PrintDetailedMap_Allocation(json, node->offset, node->allocation.alloc); + const VkDeviceSize allocSize = node->allocation.alloc->GetSize(); + if(allocSize < levelNodeSize) + { + PrintDetailedMap_UnusedRange(json, node->offset + allocSize, levelNodeSize - allocSize); + } + } + break; + case Node::TYPE_SPLIT: + { + const VkDeviceSize childrenNodeSize = levelNodeSize / 2; + const Node* const leftChild = node->split.leftChild; + PrintDetailedMapNode(json, leftChild, childrenNodeSize); + const Node* const rightChild = leftChild->buddy; + PrintDetailedMapNode(json, rightChild, childrenNodeSize); + } + break; + default: + VMA_ASSERT(0); + } +} +#endif // #if VMA_STATS_STRING_ENABLED + + +//////////////////////////////////////////////////////////////////////////////// +// class VmaDeviceMemoryBlock + +VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) : + m_pMetadata(VMA_NULL), + m_MemoryTypeIndex(UINT32_MAX), + m_Id(0), + m_hMemory(VK_NULL_HANDLE), + m_MapCount(0), + m_pMappedData(VMA_NULL) +{ +} + +void VmaDeviceMemoryBlock::Init( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t newMemoryTypeIndex, + VkDeviceMemory newMemory, + VkDeviceSize newSize, + uint32_t id, + uint32_t algorithm) +{ + VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); + + m_hParentPool = hParentPool; + m_MemoryTypeIndex = newMemoryTypeIndex; + m_Id = id; + m_hMemory = newMemory; + + switch(algorithm) + { + case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator); + break; + case VMA_POOL_CREATE_BUDDY_ALGORITHM_BIT: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Buddy)(hAllocator); + break; + default: + VMA_ASSERT(0); + // Fall-through. + case 0: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Generic)(hAllocator); + } + m_pMetadata->Init(newSize); +} + +void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator) +{ + // This is the most important assert in the entire library. + // Hitting it means you have some memory leak - unreleased VmaAllocation objects. + VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!"); + + VMA_ASSERT(m_hMemory != VK_NULL_HANDLE); + allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory); + m_hMemory = VK_NULL_HANDLE; + + vma_delete(allocator, m_pMetadata); + m_pMetadata = VMA_NULL; +} + +bool VmaDeviceMemoryBlock::Validate() const +{ + VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) && + (m_pMetadata->GetSize() != 0)); + + return m_pMetadata->Validate(); +} + +VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator) +{ + void* pData = nullptr; + VkResult res = Map(hAllocator, 1, &pData); + if(res != VK_SUCCESS) + { + return res; + } + + res = m_pMetadata->CheckCorruption(pData); + + Unmap(hAllocator, 1); + + return res; +} + +VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData) +{ + if(count == 0) + { + return VK_SUCCESS; + } + + VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex); + if(m_MapCount != 0) + { + m_MapCount += count; + VMA_ASSERT(m_pMappedData != VMA_NULL); + if(ppData != VMA_NULL) + { + *ppData = m_pMappedData; + } + return VK_SUCCESS; + } + else + { + VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( + hAllocator->m_hDevice, + m_hMemory, + 0, // offset + VK_WHOLE_SIZE, + 0, // flags + &m_pMappedData); + if(result == VK_SUCCESS) + { + if(ppData != VMA_NULL) + { + *ppData = m_pMappedData; + } + m_MapCount = count; + } + return result; + } +} + +void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count) +{ + if(count == 0) + { + return; + } + + VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex); + if(m_MapCount >= count) + { + m_MapCount -= count; + if(m_MapCount == 0) + { + m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); + } + } + else + { + VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped."); + } +} + +VkResult VmaDeviceMemoryBlock::WriteMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) +{ + VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); + VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN); + + void* pData; + VkResult res = Map(hAllocator, 1, &pData); + if(res != VK_SUCCESS) + { + return res; + } + + VmaWriteMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN); + VmaWriteMagicValue(pData, allocOffset + allocSize); + + Unmap(hAllocator, 1); + + return VK_SUCCESS; +} + +VkResult VmaDeviceMemoryBlock::ValidateMagicValueAroundAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) +{ + VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); + VMA_ASSERT(allocOffset >= VMA_DEBUG_MARGIN); + + void* pData; + VkResult res = Map(hAllocator, 1, &pData); + if(res != VK_SUCCESS) + { + return res; + } + + if(!VmaValidateMagicValue(pData, allocOffset - VMA_DEBUG_MARGIN)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED BEFORE FREED ALLOCATION!"); + } + else if(!VmaValidateMagicValue(pData, allocOffset + allocSize)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!"); + } + + Unmap(hAllocator, 1); + + return VK_SUCCESS; +} + +VkResult VmaDeviceMemoryBlock::BindBufferMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext) +{ + VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && + hAllocation->GetBlock() == this); + VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && + "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); + const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; + // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. + VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex); + return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext); +} + +VkResult VmaDeviceMemoryBlock::BindImageMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext) +{ + VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && + hAllocation->GetBlock() == this); + VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && + "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); + const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; + // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. + VmaMutexLock lock(m_Mutex, hAllocator->m_UseMutex); + return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext); +} + +static void InitStatInfo(VmaStatInfo& outInfo) +{ + memset(&outInfo, 0, sizeof(outInfo)); + outInfo.allocationSizeMin = UINT64_MAX; + outInfo.unusedRangeSizeMin = UINT64_MAX; +} + +// Adds statistics srcInfo into inoutInfo, like: inoutInfo += srcInfo. +static void VmaAddStatInfo(VmaStatInfo& inoutInfo, const VmaStatInfo& srcInfo) +{ + inoutInfo.blockCount += srcInfo.blockCount; + inoutInfo.allocationCount += srcInfo.allocationCount; + inoutInfo.unusedRangeCount += srcInfo.unusedRangeCount; + inoutInfo.usedBytes += srcInfo.usedBytes; + inoutInfo.unusedBytes += srcInfo.unusedBytes; + inoutInfo.allocationSizeMin = VMA_MIN(inoutInfo.allocationSizeMin, srcInfo.allocationSizeMin); + inoutInfo.allocationSizeMax = VMA_MAX(inoutInfo.allocationSizeMax, srcInfo.allocationSizeMax); + inoutInfo.unusedRangeSizeMin = VMA_MIN(inoutInfo.unusedRangeSizeMin, srcInfo.unusedRangeSizeMin); + inoutInfo.unusedRangeSizeMax = VMA_MAX(inoutInfo.unusedRangeSizeMax, srcInfo.unusedRangeSizeMax); +} + +static void VmaPostprocessCalcStatInfo(VmaStatInfo& inoutInfo) +{ + inoutInfo.allocationSizeAvg = (inoutInfo.allocationCount > 0) ? + VmaRoundDiv(inoutInfo.usedBytes, inoutInfo.allocationCount) : 0; + inoutInfo.unusedRangeSizeAvg = (inoutInfo.unusedRangeCount > 0) ? + VmaRoundDiv(inoutInfo.unusedBytes, inoutInfo.unusedRangeCount) : 0; +} + +VmaPool_T::VmaPool_T( + VmaAllocator hAllocator, + const VmaPoolCreateInfo& createInfo, + VkDeviceSize preferredBlockSize) : + m_BlockVector( + hAllocator, + this, // hParentPool + createInfo.memoryTypeIndex, + createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize, + createInfo.minBlockCount, + createInfo.maxBlockCount, + (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(), + createInfo.frameInUseCount, + createInfo.blockSize != 0, // explicitBlockSize + createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, + createInfo.priority), // algorithm + m_Id(0), + m_Name(VMA_NULL) +{ +} + +VmaPool_T::~VmaPool_T() +{ +} + +void VmaPool_T::SetName(const char* pName) +{ + const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks(); + VmaFreeString(allocs, m_Name); + + if(pName != VMA_NULL) + { + m_Name = VmaCreateStringCopy(allocs, pName); + } + else + { + m_Name = VMA_NULL; + } +} + +#if VMA_STATS_STRING_ENABLED + +#endif // #if VMA_STATS_STRING_ENABLED + +VmaBlockVector::VmaBlockVector( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceSize preferredBlockSize, + size_t minBlockCount, + size_t maxBlockCount, + VkDeviceSize bufferImageGranularity, + uint32_t frameInUseCount, + bool explicitBlockSize, + uint32_t algorithm, + float priority) : + m_hAllocator(hAllocator), + m_hParentPool(hParentPool), + m_MemoryTypeIndex(memoryTypeIndex), + m_PreferredBlockSize(preferredBlockSize), + m_MinBlockCount(minBlockCount), + m_MaxBlockCount(maxBlockCount), + m_BufferImageGranularity(bufferImageGranularity), + m_FrameInUseCount(frameInUseCount), + m_ExplicitBlockSize(explicitBlockSize), + m_Algorithm(algorithm), + m_Priority(priority), + m_HasEmptyBlock(false), + m_Blocks(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + m_NextBlockId(0) +{ +} + +VmaBlockVector::~VmaBlockVector() +{ + for(size_t i = m_Blocks.size(); i--; ) + { + m_Blocks[i]->Destroy(m_hAllocator); + vma_delete(m_hAllocator, m_Blocks[i]); + } +} + +VkResult VmaBlockVector::CreateMinBlocks() +{ + for(size_t i = 0; i < m_MinBlockCount; ++i) + { + VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL); + if(res != VK_SUCCESS) + { + return res; + } + } + return VK_SUCCESS; +} + +void VmaBlockVector::GetPoolStats(VmaPoolStats* pStats) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + const size_t blockCount = m_Blocks.size(); + + pStats->size = 0; + pStats->unusedSize = 0; + pStats->allocationCount = 0; + pStats->unusedRangeCount = 0; + pStats->unusedRangeSizeMax = 0; + pStats->blockCount = blockCount; + + for(uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VMA_HEAVY_ASSERT(pBlock->Validate()); + pBlock->m_pMetadata->AddPoolStats(*pStats); + } +} + +bool VmaBlockVector::IsEmpty() +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + return m_Blocks.empty(); +} + +bool VmaBlockVector::IsCorruptionDetectionEnabled() const +{ + const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + return (VMA_DEBUG_DETECT_CORRUPTION != 0) && + (VMA_DEBUG_MARGIN > 0) && + (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) && + (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags; +} + +static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32; + +VkResult VmaBlockVector::Allocate( + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + size_t allocIndex; + VkResult res = VK_SUCCESS; + + if(IsCorruptionDetectionEnabled()) + { + size = VmaAlignUp(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); + alignment = VmaAlignUp(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); + } + + { + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + res = AllocatePage( + currentFrameIndex, + size, + alignment, + createInfo, + suballocType, + pAllocations + allocIndex); + if(res != VK_SUCCESS) + { + break; + } + } + } + + if(res != VK_SUCCESS) + { + // Free all already created allocations. + while(allocIndex--) + { + Free(pAllocations[allocIndex]); + } + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + } + + return res; +} + +VkResult VmaBlockVector::AllocatePage( + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation) +{ + const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; + bool canMakeOtherLost = (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) != 0; + const bool mapped = (createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; + const bool isUserDataString = (createInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0; + + VkDeviceSize freeMemory; + { + const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); + VmaBudget heapBudget = {}; + m_hAllocator->GetBudget(&heapBudget, heapIndex, 1); + freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0; + } + + const bool canFallbackToDedicated = !IsCustomPool(); + const bool canCreateNewBlock = + ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) && + (m_Blocks.size() < m_MaxBlockCount) && + (freeMemory >= size || !canFallbackToDedicated); + uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK; + + // If linearAlgorithm is used, canMakeOtherLost is available only when used as ring buffer. + // Which in turn is available only when maxBlockCount = 1. + if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT && m_MaxBlockCount > 1) + { + canMakeOtherLost = false; + } + + // Upper address can only be used with linear allocator and within single memory block. + if(isUpperAddress && + (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1)) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + // Validate strategy. + switch(strategy) + { + case 0: + strategy = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT; + break; + case VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT: + case VMA_ALLOCATION_CREATE_STRATEGY_WORST_FIT_BIT: + case VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT: + break; + default: + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + // Early reject: requested allocation size is larger that maximum block size for this block vector. + if(size + 2 * VMA_DEBUG_MARGIN > m_PreferredBlockSize) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + + /* + Under certain condition, this whole section can be skipped for optimization, so + we move on directly to trying to allocate with canMakeOtherLost. That's the case + e.g. for custom pools with linear algorithm. + */ + if(!canMakeOtherLost || canCreateNewBlock) + { + // 1. Search existing allocations. Try to allocate without making other allocations lost. + VmaAllocationCreateFlags allocFlagsCopy = createInfo.flags; + allocFlagsCopy &= ~VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT; + + if(m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + { + // Use only last block. + if(!m_Blocks.empty()) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back(); + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock( + pCurrBlock, + currentFrameIndex, + size, + alignment, + allocFlagsCopy, + createInfo.pUserData, + suballocType, + strategy, + pAllocation); + if(res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Returned from last block #%u", pCurrBlock->GetId()); + return VK_SUCCESS; + } + } + } + else + { + if(strategy == VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT) + { + // Forward order in m_Blocks - prefer blocks with smallest amount of free space. + for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex ) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock( + pCurrBlock, + currentFrameIndex, + size, + alignment, + allocFlagsCopy, + createInfo.pUserData, + suballocType, + strategy, + pAllocation); + if(res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId()); + return VK_SUCCESS; + } + } + } + else // WORST_FIT, FIRST_FIT + { + // Backward order in m_Blocks - prefer blocks with largest amount of free space. + for(size_t blockIndex = m_Blocks.size(); blockIndex--; ) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock( + pCurrBlock, + currentFrameIndex, + size, + alignment, + allocFlagsCopy, + createInfo.pUserData, + suballocType, + strategy, + pAllocation); + if(res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Returned from existing block #%u", pCurrBlock->GetId()); + return VK_SUCCESS; + } + } + } + } + + // 2. Try to create new block. + if(canCreateNewBlock) + { + // Calculate optimal size for new block. + VkDeviceSize newBlockSize = m_PreferredBlockSize; + uint32_t newBlockSizeShift = 0; + const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3; + + if(!m_ExplicitBlockSize) + { + // Allocate 1/8, 1/4, 1/2 as first blocks. + const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize(); + for(uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i) + { + const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; + if(smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2) + { + newBlockSize = smallerNewBlockSize; + ++newBlockSizeShift; + } + else + { + break; + } + } + } + + size_t newBlockIndex = 0; + VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? + CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; + // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize. + if(!m_ExplicitBlockSize) + { + while(res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX) + { + const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; + if(smallerNewBlockSize >= size) + { + newBlockSize = smallerNewBlockSize; + ++newBlockSizeShift; + res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? + CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + else + { + break; + } + } + } + + if(res == VK_SUCCESS) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex]; + VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size); + + res = AllocateFromBlock( + pBlock, + currentFrameIndex, + size, + alignment, + allocFlagsCopy, + createInfo.pUserData, + suballocType, + strategy, + pAllocation); + if(res == VK_SUCCESS) + { + VMA_DEBUG_LOG(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize); + return VK_SUCCESS; + } + else + { + // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment. + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + } + } + + // 3. Try to allocate from existing blocks with making other allocations lost. + if(canMakeOtherLost) + { + uint32_t tryIndex = 0; + for(; tryIndex < VMA_ALLOCATION_TRY_COUNT; ++tryIndex) + { + VmaDeviceMemoryBlock* pBestRequestBlock = VMA_NULL; + VmaAllocationRequest bestRequest = {}; + VkDeviceSize bestRequestCost = VK_WHOLE_SIZE; + + // 1. Search existing allocations. + if(strategy == VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT) + { + // Forward order in m_Blocks - prefer blocks with smallest amount of free space. + for(size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex ) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VmaAllocationRequest currRequest = {}; + if(pCurrBlock->m_pMetadata->CreateAllocationRequest( + currentFrameIndex, + m_FrameInUseCount, + m_BufferImageGranularity, + size, + alignment, + (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, + suballocType, + canMakeOtherLost, + strategy, + &currRequest)) + { + const VkDeviceSize currRequestCost = currRequest.CalcCost(); + if(pBestRequestBlock == VMA_NULL || + currRequestCost < bestRequestCost) + { + pBestRequestBlock = pCurrBlock; + bestRequest = currRequest; + bestRequestCost = currRequestCost; + + if(bestRequestCost == 0) + { + break; + } + } + } + } + } + else // WORST_FIT, FIRST_FIT + { + // Backward order in m_Blocks - prefer blocks with largest amount of free space. + for(size_t blockIndex = m_Blocks.size(); blockIndex--; ) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VmaAllocationRequest currRequest = {}; + if(pCurrBlock->m_pMetadata->CreateAllocationRequest( + currentFrameIndex, + m_FrameInUseCount, + m_BufferImageGranularity, + size, + alignment, + (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, + suballocType, + canMakeOtherLost, + strategy, + &currRequest)) + { + const VkDeviceSize currRequestCost = currRequest.CalcCost(); + if(pBestRequestBlock == VMA_NULL || + currRequestCost < bestRequestCost || + strategy == VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT) + { + pBestRequestBlock = pCurrBlock; + bestRequest = currRequest; + bestRequestCost = currRequestCost; + + if(bestRequestCost == 0 || + strategy == VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT) + { + break; + } + } + } + } + } + + if(pBestRequestBlock != VMA_NULL) + { + if(mapped) + { + VkResult res = pBestRequestBlock->Map(m_hAllocator, 1, VMA_NULL); + if(res != VK_SUCCESS) + { + return res; + } + } + + if(pBestRequestBlock->m_pMetadata->MakeRequestedAllocationsLost( + currentFrameIndex, + m_FrameInUseCount, + &bestRequest)) + { + // Allocate from this pBlock. + *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString); + pBestRequestBlock->m_pMetadata->Alloc(bestRequest, suballocType, size, *pAllocation); + UpdateHasEmptyBlock(); + (*pAllocation)->InitBlockAllocation( + pBestRequestBlock, + bestRequest.offset, + alignment, + size, + m_MemoryTypeIndex, + suballocType, + mapped, + (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0); + VMA_HEAVY_ASSERT(pBestRequestBlock->Validate()); + VMA_DEBUG_LOG(" Returned from existing block"); + (*pAllocation)->SetUserData(m_hAllocator, createInfo.pUserData); + m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size); + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); + } + if(IsCorruptionDetectionEnabled()) + { + VkResult res = pBestRequestBlock->WriteMagicValueAroundAllocation(m_hAllocator, bestRequest.offset, size); + VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value."); + } + return VK_SUCCESS; + } + // else: Some allocations must have been touched while we are here. Next try. + } + else + { + // Could not find place in any of the blocks - break outer loop. + break; + } + } + /* Maximum number of tries exceeded - a very unlike event when many other + threads are simultaneously touching allocations making it impossible to make + lost at the same time as we try to allocate. */ + if(tryIndex == VMA_ALLOCATION_TRY_COUNT) + { + return VK_ERROR_TOO_MANY_OBJECTS; + } + } + + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +void VmaBlockVector::Free( + const VmaAllocation hAllocation) +{ + VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL; + + bool budgetExceeded = false; + { + const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); + VmaBudget heapBudget = {}; + m_hAllocator->GetBudget(&heapBudget, heapIndex, 1); + budgetExceeded = heapBudget.usage >= heapBudget.budget; + } + + // Scope for lock. + { + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + + VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); + + if(IsCorruptionDetectionEnabled()) + { + VkResult res = pBlock->ValidateMagicValueAroundAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize()); + VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value."); + } + + if(hAllocation->IsPersistentMap()) + { + pBlock->Unmap(m_hAllocator, 1); + } + + pBlock->m_pMetadata->Free(hAllocation); + VMA_HEAVY_ASSERT(pBlock->Validate()); + + VMA_DEBUG_LOG(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex); + + const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount; + // pBlock became empty after this deallocation. + if(pBlock->m_pMetadata->IsEmpty()) + { + // Already has empty block. We don't want to have two, so delete this one. + if((m_HasEmptyBlock || budgetExceeded) && canDeleteBlock) + { + pBlockToDelete = pBlock; + Remove(pBlock); + } + // else: We now have an empty block - leave it. + } + // pBlock didn't become empty, but we have another empty block - find and free that one. + // (This is optional, heuristics.) + else if(m_HasEmptyBlock && canDeleteBlock) + { + VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back(); + if(pLastBlock->m_pMetadata->IsEmpty()) + { + pBlockToDelete = pLastBlock; + m_Blocks.pop_back(); + } + } + + UpdateHasEmptyBlock(); + IncrementallySortBlocks(); + } + + // Destruction of a free block. Deferred until this point, outside of mutex + // lock, for performance reason. + if(pBlockToDelete != VMA_NULL) + { + VMA_DEBUG_LOG(" Deleted empty block"); + pBlockToDelete->Destroy(m_hAllocator); + vma_delete(m_hAllocator, pBlockToDelete); + } +} + +VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const +{ + VkDeviceSize result = 0; + for(size_t i = m_Blocks.size(); i--; ) + { + result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize()); + if(result >= m_PreferredBlockSize) + { + break; + } + } + return result; +} + +void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock) +{ + for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + if(m_Blocks[blockIndex] == pBlock) + { + VmaVectorRemove(m_Blocks, blockIndex); + return; + } + } + VMA_ASSERT(0); +} + +void VmaBlockVector::IncrementallySortBlocks() +{ + if(m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + { + // Bubble sort only until first swap. + for(size_t i = 1; i < m_Blocks.size(); ++i) + { + if(m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize()) + { + VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]); + return; + } + } + } +} + +VkResult VmaBlockVector::AllocateFromBlock( + VmaDeviceMemoryBlock* pBlock, + uint32_t currentFrameIndex, + VkDeviceSize size, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + uint32_t strategy, + VmaAllocation* pAllocation) +{ + VMA_ASSERT((allocFlags & VMA_ALLOCATION_CREATE_CAN_MAKE_OTHER_LOST_BIT) == 0); + const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; + const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; + const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0; + + VmaAllocationRequest currRequest = {}; + if(pBlock->m_pMetadata->CreateAllocationRequest( + currentFrameIndex, + m_FrameInUseCount, + m_BufferImageGranularity, + size, + alignment, + isUpperAddress, + suballocType, + false, // canMakeOtherLost + strategy, + &currRequest)) + { + // Allocate from pCurrBlock. + VMA_ASSERT(currRequest.itemsToMakeLostCount == 0); + + if(mapped) + { + VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL); + if(res != VK_SUCCESS) + { + return res; + } + } + + *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(currentFrameIndex, isUserDataString); + pBlock->m_pMetadata->Alloc(currRequest, suballocType, size, *pAllocation); + UpdateHasEmptyBlock(); + (*pAllocation)->InitBlockAllocation( + pBlock, + currRequest.offset, + alignment, + size, + m_MemoryTypeIndex, + suballocType, + mapped, + (allocFlags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0); + VMA_HEAVY_ASSERT(pBlock->Validate()); + (*pAllocation)->SetUserData(m_hAllocator, pUserData); + m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), size); + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); + } + if(IsCorruptionDetectionEnabled()) + { + VkResult res = pBlock->WriteMagicValueAroundAllocation(m_hAllocator, currRequest.offset, size); + VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value."); + } + return VK_SUCCESS; + } + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex) +{ + VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; + allocInfo.memoryTypeIndex = m_MemoryTypeIndex; + allocInfo.allocationSize = blockSize; + +#if VMA_BUFFER_DEVICE_ADDRESS + // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature. + VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; + if(m_hAllocator->m_UseKhrBufferDeviceAddress) + { + allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; + VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo); + } +#endif // #if VMA_BUFFER_DEVICE_ADDRESS + +#if VMA_MEMORY_PRIORITY + VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; + if(m_hAllocator->m_UseExtMemoryPriority) + { + priorityInfo.priority = m_Priority; + VmaPnextChainPushFront(&allocInfo, &priorityInfo); + } +#endif // #if VMA_MEMORY_PRIORITY + + VkDeviceMemory mem = VK_NULL_HANDLE; + VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem); + if(res < 0) + { + return res; + } + + // New VkDeviceMemory successfully created. + + // Create new Allocation for it. + VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator); + pBlock->Init( + m_hAllocator, + m_hParentPool, + m_MemoryTypeIndex, + mem, + allocInfo.allocationSize, + m_NextBlockId++, + m_Algorithm); + + m_Blocks.push_back(pBlock); + if(pNewBlockIndex != VMA_NULL) + { + *pNewBlockIndex = m_Blocks.size() - 1; + } + + return VK_SUCCESS; +} + +void VmaBlockVector::ApplyDefragmentationMovesCpu( + class VmaBlockVectorDefragmentationContext* pDefragCtx, + const VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves) +{ + const size_t blockCount = m_Blocks.size(); + const bool isNonCoherent = m_hAllocator->IsMemoryTypeNonCoherent(m_MemoryTypeIndex); + + enum BLOCK_FLAG + { + BLOCK_FLAG_USED = 0x00000001, + BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION = 0x00000002, + }; + + struct BlockInfo + { + uint32_t flags; + void* pMappedData; + }; + VmaVector< BlockInfo, VmaStlAllocator > + blockInfo(blockCount, BlockInfo(), VmaStlAllocator(m_hAllocator->GetAllocationCallbacks())); + memset(blockInfo.data(), 0, blockCount * sizeof(BlockInfo)); + + // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED. + const size_t moveCount = moves.size(); + for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) + { + const VmaDefragmentationMove& move = moves[moveIndex]; + blockInfo[move.srcBlockIndex].flags |= BLOCK_FLAG_USED; + blockInfo[move.dstBlockIndex].flags |= BLOCK_FLAG_USED; + } + + VMA_ASSERT(pDefragCtx->res == VK_SUCCESS); + + // Go over all blocks. Get mapped pointer or map if necessary. + for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex) + { + BlockInfo& currBlockInfo = blockInfo[blockIndex]; + VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex]; + if((currBlockInfo.flags & BLOCK_FLAG_USED) != 0) + { + currBlockInfo.pMappedData = pBlock->GetMappedData(); + // It is not originally mapped - map it. + if(currBlockInfo.pMappedData == VMA_NULL) + { + pDefragCtx->res = pBlock->Map(m_hAllocator, 1, &currBlockInfo.pMappedData); + if(pDefragCtx->res == VK_SUCCESS) + { + currBlockInfo.flags |= BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION; + } + } + } + } + + // Go over all moves. Do actual data transfer. + if(pDefragCtx->res == VK_SUCCESS) + { + const VkDeviceSize nonCoherentAtomSize = m_hAllocator->m_PhysicalDeviceProperties.limits.nonCoherentAtomSize; + VkMappedMemoryRange memRange = { VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE }; + + for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) + { + const VmaDefragmentationMove& move = moves[moveIndex]; + + const BlockInfo& srcBlockInfo = blockInfo[move.srcBlockIndex]; + const BlockInfo& dstBlockInfo = blockInfo[move.dstBlockIndex]; + + VMA_ASSERT(srcBlockInfo.pMappedData && dstBlockInfo.pMappedData); + + // Invalidate source. + if(isNonCoherent) + { + VmaDeviceMemoryBlock* const pSrcBlock = m_Blocks[move.srcBlockIndex]; + memRange.memory = pSrcBlock->GetDeviceMemory(); + memRange.offset = VmaAlignDown(move.srcOffset, nonCoherentAtomSize); + memRange.size = VMA_MIN( + VmaAlignUp(move.size + (move.srcOffset - memRange.offset), nonCoherentAtomSize), + pSrcBlock->m_pMetadata->GetSize() - memRange.offset); + (*m_hAllocator->GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange); + } + + // THE PLACE WHERE ACTUAL DATA COPY HAPPENS. + memmove( + reinterpret_cast(dstBlockInfo.pMappedData) + move.dstOffset, + reinterpret_cast(srcBlockInfo.pMappedData) + move.srcOffset, + static_cast(move.size)); + + if(IsCorruptionDetectionEnabled()) + { + VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset - VMA_DEBUG_MARGIN); + VmaWriteMagicValue(dstBlockInfo.pMappedData, move.dstOffset + move.size); + } + + // Flush destination. + if(isNonCoherent) + { + VmaDeviceMemoryBlock* const pDstBlock = m_Blocks[move.dstBlockIndex]; + memRange.memory = pDstBlock->GetDeviceMemory(); + memRange.offset = VmaAlignDown(move.dstOffset, nonCoherentAtomSize); + memRange.size = VMA_MIN( + VmaAlignUp(move.size + (move.dstOffset - memRange.offset), nonCoherentAtomSize), + pDstBlock->m_pMetadata->GetSize() - memRange.offset); + (*m_hAllocator->GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hAllocator->m_hDevice, 1, &memRange); + } + } + } + + // Go over all blocks in reverse order. Unmap those that were mapped just for defragmentation. + // Regardless of pCtx->res == VK_SUCCESS. + for(size_t blockIndex = blockCount; blockIndex--; ) + { + const BlockInfo& currBlockInfo = blockInfo[blockIndex]; + if((currBlockInfo.flags & BLOCK_FLAG_MAPPED_FOR_DEFRAGMENTATION) != 0) + { + VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex]; + pBlock->Unmap(m_hAllocator, 1); + } + } +} + +void VmaBlockVector::ApplyDefragmentationMovesGpu( + class VmaBlockVectorDefragmentationContext* pDefragCtx, + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkCommandBuffer commandBuffer) +{ + const size_t blockCount = m_Blocks.size(); + + pDefragCtx->blockContexts.resize(blockCount); + memset(pDefragCtx->blockContexts.data(), 0, blockCount * sizeof(VmaBlockDefragmentationContext)); + + // Go over all moves. Mark blocks that are used with BLOCK_FLAG_USED. + const size_t moveCount = moves.size(); + for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) + { + const VmaDefragmentationMove& move = moves[moveIndex]; + + //if(move.type == VMA_ALLOCATION_TYPE_UNKNOWN) + { + // Old school move still require us to map the whole block + pDefragCtx->blockContexts[move.srcBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED; + pDefragCtx->blockContexts[move.dstBlockIndex].flags |= VmaBlockDefragmentationContext::BLOCK_FLAG_USED; + } + } + + VMA_ASSERT(pDefragCtx->res == VK_SUCCESS); + + // Go over all blocks. Create and bind buffer for whole block if necessary. + { + VkBufferCreateInfo bufCreateInfo; + VmaFillGpuDefragmentationBufferCreateInfo(bufCreateInfo); + + for(size_t blockIndex = 0; pDefragCtx->res == VK_SUCCESS && blockIndex < blockCount; ++blockIndex) + { + VmaBlockDefragmentationContext& currBlockCtx = pDefragCtx->blockContexts[blockIndex]; + VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex]; + if((currBlockCtx.flags & VmaBlockDefragmentationContext::BLOCK_FLAG_USED) != 0) + { + bufCreateInfo.size = pBlock->m_pMetadata->GetSize(); + pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkCreateBuffer)( + m_hAllocator->m_hDevice, &bufCreateInfo, m_hAllocator->GetAllocationCallbacks(), &currBlockCtx.hBuffer); + if(pDefragCtx->res == VK_SUCCESS) + { + pDefragCtx->res = (*m_hAllocator->GetVulkanFunctions().vkBindBufferMemory)( + m_hAllocator->m_hDevice, currBlockCtx.hBuffer, pBlock->GetDeviceMemory(), 0); + } + } + } + } + + // Go over all moves. Post data transfer commands to command buffer. + if(pDefragCtx->res == VK_SUCCESS) + { + for(size_t moveIndex = 0; moveIndex < moveCount; ++moveIndex) + { + const VmaDefragmentationMove& move = moves[moveIndex]; + + const VmaBlockDefragmentationContext& srcBlockCtx = pDefragCtx->blockContexts[move.srcBlockIndex]; + const VmaBlockDefragmentationContext& dstBlockCtx = pDefragCtx->blockContexts[move.dstBlockIndex]; + + VMA_ASSERT(srcBlockCtx.hBuffer && dstBlockCtx.hBuffer); + + VkBufferCopy region = { + move.srcOffset, + move.dstOffset, + move.size }; + (*m_hAllocator->GetVulkanFunctions().vkCmdCopyBuffer)( + commandBuffer, srcBlockCtx.hBuffer, dstBlockCtx.hBuffer, 1, ®ion); + } + } + + // Save buffers to defrag context for later destruction. + if(pDefragCtx->res == VK_SUCCESS && moveCount > 0) + { + pDefragCtx->res = VK_NOT_READY; + } +} + +void VmaBlockVector::FreeEmptyBlocks(VmaDefragmentationStats* pDefragmentationStats) +{ + for(size_t blockIndex = m_Blocks.size(); blockIndex--; ) + { + VmaDeviceMemoryBlock* pBlock = m_Blocks[blockIndex]; + if(pBlock->m_pMetadata->IsEmpty()) + { + if(m_Blocks.size() > m_MinBlockCount) + { + if(pDefragmentationStats != VMA_NULL) + { + ++pDefragmentationStats->deviceMemoryBlocksFreed; + pDefragmentationStats->bytesFreed += pBlock->m_pMetadata->GetSize(); + } + + VmaVectorRemove(m_Blocks, blockIndex); + pBlock->Destroy(m_hAllocator); + vma_delete(m_hAllocator, pBlock); + } + else + { + break; + } + } + } + UpdateHasEmptyBlock(); +} + +void VmaBlockVector::UpdateHasEmptyBlock() +{ + m_HasEmptyBlock = false; + for(size_t index = 0, count = m_Blocks.size(); index < count; ++index) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[index]; + if(pBlock->m_pMetadata->IsEmpty()) + { + m_HasEmptyBlock = true; + break; + } + } +} + +#if VMA_STATS_STRING_ENABLED + +void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + json.BeginObject(); + + if(IsCustomPool()) + { + const char* poolName = m_hParentPool->GetName(); + if(poolName != VMA_NULL && poolName[0] != '\0') + { + json.WriteString("Name"); + json.WriteString(poolName); + } + + json.WriteString("MemoryTypeIndex"); + json.WriteNumber(m_MemoryTypeIndex); + + json.WriteString("BlockSize"); + json.WriteNumber(m_PreferredBlockSize); + + json.WriteString("BlockCount"); + json.BeginObject(true); + if(m_MinBlockCount > 0) + { + json.WriteString("Min"); + json.WriteNumber((uint64_t)m_MinBlockCount); + } + if(m_MaxBlockCount < SIZE_MAX) + { + json.WriteString("Max"); + json.WriteNumber((uint64_t)m_MaxBlockCount); + } + json.WriteString("Cur"); + json.WriteNumber((uint64_t)m_Blocks.size()); + json.EndObject(); + + if(m_FrameInUseCount > 0) + { + json.WriteString("FrameInUseCount"); + json.WriteNumber(m_FrameInUseCount); + } + + if(m_Algorithm != 0) + { + json.WriteString("Algorithm"); + json.WriteString(VmaAlgorithmToStr(m_Algorithm)); + } + } + else + { + json.WriteString("PreferredBlockSize"); + json.WriteNumber(m_PreferredBlockSize); + } + + json.WriteString("Blocks"); + json.BeginObject(); + for(size_t i = 0; i < m_Blocks.size(); ++i) + { + json.BeginString(); + json.ContinueString(m_Blocks[i]->GetId()); + json.EndString(); + + m_Blocks[i]->m_pMetadata->PrintDetailedMap(json); + } + json.EndObject(); + + json.EndObject(); +} + +#endif // #if VMA_STATS_STRING_ENABLED + +void VmaBlockVector::Defragment( + class VmaBlockVectorDefragmentationContext* pCtx, + VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags, + VkDeviceSize& maxCpuBytesToMove, uint32_t& maxCpuAllocationsToMove, + VkDeviceSize& maxGpuBytesToMove, uint32_t& maxGpuAllocationsToMove, + VkCommandBuffer commandBuffer) +{ + pCtx->res = VK_SUCCESS; + + const VkMemoryPropertyFlags memPropFlags = + m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags; + const bool isHostVisible = (memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0; + + const bool canDefragmentOnCpu = maxCpuBytesToMove > 0 && maxCpuAllocationsToMove > 0 && + isHostVisible; + const bool canDefragmentOnGpu = maxGpuBytesToMove > 0 && maxGpuAllocationsToMove > 0 && + !IsCorruptionDetectionEnabled() && + ((1u << m_MemoryTypeIndex) & m_hAllocator->GetGpuDefragmentationMemoryTypeBits()) != 0; + + // There are options to defragment this memory type. + if(canDefragmentOnCpu || canDefragmentOnGpu) + { + bool defragmentOnGpu; + // There is only one option to defragment this memory type. + if(canDefragmentOnGpu != canDefragmentOnCpu) + { + defragmentOnGpu = canDefragmentOnGpu; + } + // Both options are available: Heuristics to choose the best one. + else + { + defragmentOnGpu = (memPropFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0 || + m_hAllocator->IsIntegratedGpu(); + } + + bool overlappingMoveSupported = !defragmentOnGpu; + + if(m_hAllocator->m_UseMutex) + { + if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL) + { + if(!m_Mutex.TryLockWrite()) + { + pCtx->res = VK_ERROR_INITIALIZATION_FAILED; + return; + } + } + else + { + m_Mutex.LockWrite(); + pCtx->mutexLocked = true; + } + } + + pCtx->Begin(overlappingMoveSupported, flags); + + // Defragment. + + const VkDeviceSize maxBytesToMove = defragmentOnGpu ? maxGpuBytesToMove : maxCpuBytesToMove; + const uint32_t maxAllocationsToMove = defragmentOnGpu ? maxGpuAllocationsToMove : maxCpuAllocationsToMove; + pCtx->res = pCtx->GetAlgorithm()->Defragment(pCtx->defragmentationMoves, maxBytesToMove, maxAllocationsToMove, flags); + + // Accumulate statistics. + if(pStats != VMA_NULL) + { + const VkDeviceSize bytesMoved = pCtx->GetAlgorithm()->GetBytesMoved(); + const uint32_t allocationsMoved = pCtx->GetAlgorithm()->GetAllocationsMoved(); + pStats->bytesMoved += bytesMoved; + pStats->allocationsMoved += allocationsMoved; + VMA_ASSERT(bytesMoved <= maxBytesToMove); + VMA_ASSERT(allocationsMoved <= maxAllocationsToMove); + if(defragmentOnGpu) + { + maxGpuBytesToMove -= bytesMoved; + maxGpuAllocationsToMove -= allocationsMoved; + } + else + { + maxCpuBytesToMove -= bytesMoved; + maxCpuAllocationsToMove -= allocationsMoved; + } + } + + if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL) + { + if(m_hAllocator->m_UseMutex) + m_Mutex.UnlockWrite(); + + if(pCtx->res >= VK_SUCCESS && !pCtx->defragmentationMoves.empty()) + pCtx->res = VK_NOT_READY; + + return; + } + + if(pCtx->res >= VK_SUCCESS) + { + if(defragmentOnGpu) + { + ApplyDefragmentationMovesGpu(pCtx, pCtx->defragmentationMoves, commandBuffer); + } + else + { + ApplyDefragmentationMovesCpu(pCtx, pCtx->defragmentationMoves); + } + } + } +} + +void VmaBlockVector::DefragmentationEnd( + class VmaBlockVectorDefragmentationContext* pCtx, + uint32_t flags, + VmaDefragmentationStats* pStats) +{ + if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL && m_hAllocator->m_UseMutex) + { + VMA_ASSERT(pCtx->mutexLocked == false); + + // Incremental defragmentation doesn't hold the lock, so when we enter here we don't actually have any + // lock protecting us. Since we mutate state here, we have to take the lock out now + m_Mutex.LockWrite(); + pCtx->mutexLocked = true; + } + + // If the mutex isn't locked we didn't do any work and there is nothing to delete. + if(pCtx->mutexLocked || !m_hAllocator->m_UseMutex) + { + // Destroy buffers. + for(size_t blockIndex = pCtx->blockContexts.size(); blockIndex--;) + { + VmaBlockDefragmentationContext &blockCtx = pCtx->blockContexts[blockIndex]; + if(blockCtx.hBuffer) + { + (*m_hAllocator->GetVulkanFunctions().vkDestroyBuffer)(m_hAllocator->m_hDevice, blockCtx.hBuffer, m_hAllocator->GetAllocationCallbacks()); + } + } + + if(pCtx->res >= VK_SUCCESS) + { + FreeEmptyBlocks(pStats); + } + } + + if(pCtx->mutexLocked) + { + VMA_ASSERT(m_hAllocator->m_UseMutex); + m_Mutex.UnlockWrite(); + } +} + +uint32_t VmaBlockVector::ProcessDefragmentations( + class VmaBlockVectorDefragmentationContext *pCtx, + VmaDefragmentationPassMoveInfo* pMove, uint32_t maxMoves) +{ + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + + const uint32_t moveCount = VMA_MIN(uint32_t(pCtx->defragmentationMoves.size()) - pCtx->defragmentationMovesProcessed, maxMoves); + + for(uint32_t i = 0; i < moveCount; ++ i) + { + VmaDefragmentationMove& move = pCtx->defragmentationMoves[pCtx->defragmentationMovesProcessed + i]; + + pMove->allocation = move.hAllocation; + pMove->memory = move.pDstBlock->GetDeviceMemory(); + pMove->offset = move.dstOffset; + + ++ pMove; + } + + pCtx->defragmentationMovesProcessed += moveCount; + + return moveCount; +} + +void VmaBlockVector::CommitDefragmentations( + class VmaBlockVectorDefragmentationContext *pCtx, + VmaDefragmentationStats* pStats) +{ + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + + for(uint32_t i = pCtx->defragmentationMovesCommitted; i < pCtx->defragmentationMovesProcessed; ++ i) + { + const VmaDefragmentationMove &move = pCtx->defragmentationMoves[i]; + + move.pSrcBlock->m_pMetadata->FreeAtOffset(move.srcOffset); + move.hAllocation->ChangeBlockAllocation(m_hAllocator, move.pDstBlock, move.dstOffset); + } + + pCtx->defragmentationMovesCommitted = pCtx->defragmentationMovesProcessed; + FreeEmptyBlocks(pStats); +} + +size_t VmaBlockVector::CalcAllocationCount() const +{ + size_t result = 0; + for(size_t i = 0; i < m_Blocks.size(); ++i) + { + result += m_Blocks[i]->m_pMetadata->GetAllocationCount(); + } + return result; +} + +bool VmaBlockVector::IsBufferImageGranularityConflictPossible() const +{ + if(m_BufferImageGranularity == 1) + { + return false; + } + VmaSuballocationType lastSuballocType = VMA_SUBALLOCATION_TYPE_FREE; + for(size_t i = 0, count = m_Blocks.size(); i < count; ++i) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[i]; + VMA_ASSERT(m_Algorithm == 0); + VmaBlockMetadata_Generic* const pMetadata = (VmaBlockMetadata_Generic*)pBlock->m_pMetadata; + if(pMetadata->IsBufferImageGranularityConflictPossible(m_BufferImageGranularity, lastSuballocType)) + { + return true; + } + } + return false; +} + +void VmaBlockVector::MakePoolAllocationsLost( + uint32_t currentFrameIndex, + size_t* pLostAllocationCount) +{ + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + size_t lostAllocationCount = 0; + for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + lostAllocationCount += pBlock->m_pMetadata->MakeAllocationsLost(currentFrameIndex, m_FrameInUseCount); + } + if(pLostAllocationCount != VMA_NULL) + { + *pLostAllocationCount = lostAllocationCount; + } +} + +VkResult VmaBlockVector::CheckCorruption() +{ + if(!IsCorruptionDetectionEnabled()) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VkResult res = pBlock->CheckCorruption(m_hAllocator); + if(res != VK_SUCCESS) + { + return res; + } + } + return VK_SUCCESS; +} + +void VmaBlockVector::AddStats(VmaStats* pStats) +{ + const uint32_t memTypeIndex = m_MemoryTypeIndex; + const uint32_t memHeapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(memTypeIndex); + + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + for(uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VMA_HEAVY_ASSERT(pBlock->Validate()); + VmaStatInfo allocationStatInfo; + pBlock->m_pMetadata->CalcAllocationStatInfo(allocationStatInfo); + VmaAddStatInfo(pStats->total, allocationStatInfo); + VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo); + VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo); + } +} + +//////////////////////////////////////////////////////////////////////////////// +// VmaDefragmentationAlgorithm_Generic members definition + +VmaDefragmentationAlgorithm_Generic::VmaDefragmentationAlgorithm_Generic( + VmaAllocator hAllocator, + VmaBlockVector* pBlockVector, + uint32_t currentFrameIndex, + bool overlappingMoveSupported) : + VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex), + m_AllocationCount(0), + m_AllAllocations(false), + m_BytesMoved(0), + m_AllocationsMoved(0), + m_Blocks(VmaStlAllocator(hAllocator->GetAllocationCallbacks())) +{ + // Create block info for each block. + const size_t blockCount = m_pBlockVector->m_Blocks.size(); + for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + BlockInfo* pBlockInfo = vma_new(m_hAllocator, BlockInfo)(m_hAllocator->GetAllocationCallbacks()); + pBlockInfo->m_OriginalBlockIndex = blockIndex; + pBlockInfo->m_pBlock = m_pBlockVector->m_Blocks[blockIndex]; + m_Blocks.push_back(pBlockInfo); + } + + // Sort them by m_pBlock pointer value. + VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockPointerLess()); +} + +VmaDefragmentationAlgorithm_Generic::~VmaDefragmentationAlgorithm_Generic() +{ + for(size_t i = m_Blocks.size(); i--; ) + { + vma_delete(m_hAllocator, m_Blocks[i]); + } +} + +void VmaDefragmentationAlgorithm_Generic::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) +{ + // Now as we are inside VmaBlockVector::m_Mutex, we can make final check if this allocation was not lost. + if(hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST) + { + VmaDeviceMemoryBlock* pBlock = hAlloc->GetBlock(); + BlockInfoVector::iterator it = VmaBinaryFindFirstNotLess(m_Blocks.begin(), m_Blocks.end(), pBlock, BlockPointerLess()); + if(it != m_Blocks.end() && (*it)->m_pBlock == pBlock) + { + AllocationInfo allocInfo = AllocationInfo(hAlloc, pChanged); + (*it)->m_Allocations.push_back(allocInfo); + } + else + { + VMA_ASSERT(0); + } + + ++m_AllocationCount; + } +} + +VkResult VmaDefragmentationAlgorithm_Generic::DefragmentRound( + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + bool freeOldAllocations) +{ + if(m_Blocks.empty()) + { + return VK_SUCCESS; + } + + // This is a choice based on research. + // Option 1: + uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT; + // Option 2: + //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT; + // Option 3: + //uint32_t strategy = VMA_ALLOCATION_CREATE_STRATEGY_MIN_FRAGMENTATION_BIT; + + size_t srcBlockMinIndex = 0; + // When FAST_ALGORITHM, move allocations from only last out of blocks that contain non-movable allocations. + /* + if(m_AlgorithmFlags & VMA_DEFRAGMENTATION_FAST_ALGORITHM_BIT) + { + const size_t blocksWithNonMovableCount = CalcBlocksWithNonMovableCount(); + if(blocksWithNonMovableCount > 0) + { + srcBlockMinIndex = blocksWithNonMovableCount - 1; + } + } + */ + + size_t srcBlockIndex = m_Blocks.size() - 1; + size_t srcAllocIndex = SIZE_MAX; + for(;;) + { + // 1. Find next allocation to move. + // 1.1. Start from last to first m_Blocks - they are sorted from most "destination" to most "source". + // 1.2. Then start from last to first m_Allocations. + while(srcAllocIndex >= m_Blocks[srcBlockIndex]->m_Allocations.size()) + { + if(m_Blocks[srcBlockIndex]->m_Allocations.empty()) + { + // Finished: no more allocations to process. + if(srcBlockIndex == srcBlockMinIndex) + { + return VK_SUCCESS; + } + else + { + --srcBlockIndex; + srcAllocIndex = SIZE_MAX; + } + } + else + { + srcAllocIndex = m_Blocks[srcBlockIndex]->m_Allocations.size() - 1; + } + } + + BlockInfo* pSrcBlockInfo = m_Blocks[srcBlockIndex]; + AllocationInfo& allocInfo = pSrcBlockInfo->m_Allocations[srcAllocIndex]; + + const VkDeviceSize size = allocInfo.m_hAllocation->GetSize(); + const VkDeviceSize srcOffset = allocInfo.m_hAllocation->GetOffset(); + const VkDeviceSize alignment = allocInfo.m_hAllocation->GetAlignment(); + const VmaSuballocationType suballocType = allocInfo.m_hAllocation->GetSuballocationType(); + + // 2. Try to find new place for this allocation in preceding or current block. + for(size_t dstBlockIndex = 0; dstBlockIndex <= srcBlockIndex; ++dstBlockIndex) + { + BlockInfo* pDstBlockInfo = m_Blocks[dstBlockIndex]; + VmaAllocationRequest dstAllocRequest; + if(pDstBlockInfo->m_pBlock->m_pMetadata->CreateAllocationRequest( + m_CurrentFrameIndex, + m_pBlockVector->GetFrameInUseCount(), + m_pBlockVector->GetBufferImageGranularity(), + size, + alignment, + false, // upperAddress + suballocType, + false, // canMakeOtherLost + strategy, + &dstAllocRequest) && + MoveMakesSense( + dstBlockIndex, dstAllocRequest.offset, srcBlockIndex, srcOffset)) + { + VMA_ASSERT(dstAllocRequest.itemsToMakeLostCount == 0); + + // Reached limit on number of allocations or bytes to move. + if((m_AllocationsMoved + 1 > maxAllocationsToMove) || + (m_BytesMoved + size > maxBytesToMove)) + { + return VK_SUCCESS; + } + + VmaDefragmentationMove move = {}; + move.srcBlockIndex = pSrcBlockInfo->m_OriginalBlockIndex; + move.dstBlockIndex = pDstBlockInfo->m_OriginalBlockIndex; + move.srcOffset = srcOffset; + move.dstOffset = dstAllocRequest.offset; + move.size = size; + move.hAllocation = allocInfo.m_hAllocation; + move.pSrcBlock = pSrcBlockInfo->m_pBlock; + move.pDstBlock = pDstBlockInfo->m_pBlock; + + moves.push_back(move); + + pDstBlockInfo->m_pBlock->m_pMetadata->Alloc( + dstAllocRequest, + suballocType, + size, + allocInfo.m_hAllocation); + + if(freeOldAllocations) + { + pSrcBlockInfo->m_pBlock->m_pMetadata->FreeAtOffset(srcOffset); + allocInfo.m_hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlockInfo->m_pBlock, dstAllocRequest.offset); + } + + if(allocInfo.m_pChanged != VMA_NULL) + { + *allocInfo.m_pChanged = VK_TRUE; + } + + ++m_AllocationsMoved; + m_BytesMoved += size; + + VmaVectorRemove(pSrcBlockInfo->m_Allocations, srcAllocIndex); + + break; + } + } + + // If not processed, this allocInfo remains in pBlockInfo->m_Allocations for next round. + + if(srcAllocIndex > 0) + { + --srcAllocIndex; + } + else + { + if(srcBlockIndex > 0) + { + --srcBlockIndex; + srcAllocIndex = SIZE_MAX; + } + else + { + return VK_SUCCESS; + } + } + } +} + +size_t VmaDefragmentationAlgorithm_Generic::CalcBlocksWithNonMovableCount() const +{ + size_t result = 0; + for(size_t i = 0; i < m_Blocks.size(); ++i) + { + if(m_Blocks[i]->m_HasNonMovableAllocations) + { + ++result; + } + } + return result; +} + +VkResult VmaDefragmentationAlgorithm_Generic::Defragment( + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + VmaDefragmentationFlags flags) +{ + if(!m_AllAllocations && m_AllocationCount == 0) + { + return VK_SUCCESS; + } + + const size_t blockCount = m_Blocks.size(); + for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + BlockInfo* pBlockInfo = m_Blocks[blockIndex]; + + if(m_AllAllocations) + { + VmaBlockMetadata_Generic* pMetadata = (VmaBlockMetadata_Generic*)pBlockInfo->m_pBlock->m_pMetadata; + for(VmaSuballocationList::const_iterator it = pMetadata->m_Suballocations.begin(); + it != pMetadata->m_Suballocations.end(); + ++it) + { + if(it->type != VMA_SUBALLOCATION_TYPE_FREE) + { + AllocationInfo allocInfo = AllocationInfo(it->hAllocation, VMA_NULL); + pBlockInfo->m_Allocations.push_back(allocInfo); + } + } + } + + pBlockInfo->CalcHasNonMovableAllocations(); + + // This is a choice based on research. + // Option 1: + pBlockInfo->SortAllocationsByOffsetDescending(); + // Option 2: + //pBlockInfo->SortAllocationsBySizeDescending(); + } + + // Sort m_Blocks this time by the main criterium, from most "destination" to most "source" blocks. + VMA_SORT(m_Blocks.begin(), m_Blocks.end(), BlockInfoCompareMoveDestination()); + + // This is a choice based on research. + const uint32_t roundCount = 2; + + // Execute defragmentation rounds (the main part). + VkResult result = VK_SUCCESS; + for(uint32_t round = 0; (round < roundCount) && (result == VK_SUCCESS); ++round) + { + result = DefragmentRound(moves, maxBytesToMove, maxAllocationsToMove, !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL)); + } + + return result; +} + +bool VmaDefragmentationAlgorithm_Generic::MoveMakesSense( + size_t dstBlockIndex, VkDeviceSize dstOffset, + size_t srcBlockIndex, VkDeviceSize srcOffset) +{ + if(dstBlockIndex < srcBlockIndex) + { + return true; + } + if(dstBlockIndex > srcBlockIndex) + { + return false; + } + if(dstOffset < srcOffset) + { + return true; + } + return false; +} + +//////////////////////////////////////////////////////////////////////////////// +// VmaDefragmentationAlgorithm_Fast + +VmaDefragmentationAlgorithm_Fast::VmaDefragmentationAlgorithm_Fast( + VmaAllocator hAllocator, + VmaBlockVector* pBlockVector, + uint32_t currentFrameIndex, + bool overlappingMoveSupported) : + VmaDefragmentationAlgorithm(hAllocator, pBlockVector, currentFrameIndex), + m_OverlappingMoveSupported(overlappingMoveSupported), + m_AllocationCount(0), + m_AllAllocations(false), + m_BytesMoved(0), + m_AllocationsMoved(0), + m_BlockInfos(VmaStlAllocator(hAllocator->GetAllocationCallbacks())) +{ + VMA_ASSERT(VMA_DEBUG_MARGIN == 0); + +} + +VmaDefragmentationAlgorithm_Fast::~VmaDefragmentationAlgorithm_Fast() +{ +} + +VkResult VmaDefragmentationAlgorithm_Fast::Defragment( + VmaVector< VmaDefragmentationMove, VmaStlAllocator >& moves, + VkDeviceSize maxBytesToMove, + uint32_t maxAllocationsToMove, + VmaDefragmentationFlags flags) +{ + VMA_ASSERT(m_AllAllocations || m_pBlockVector->CalcAllocationCount() == m_AllocationCount); + + const size_t blockCount = m_pBlockVector->GetBlockCount(); + if(blockCount == 0 || maxBytesToMove == 0 || maxAllocationsToMove == 0) + { + return VK_SUCCESS; + } + + PreprocessMetadata(); + + // Sort blocks in order from most destination. + + m_BlockInfos.resize(blockCount); + for(size_t i = 0; i < blockCount; ++i) + { + m_BlockInfos[i].origBlockIndex = i; + } + + VMA_SORT(m_BlockInfos.begin(), m_BlockInfos.end(), [this](const BlockInfo& lhs, const BlockInfo& rhs) -> bool { + return m_pBlockVector->GetBlock(lhs.origBlockIndex)->m_pMetadata->GetSumFreeSize() < + m_pBlockVector->GetBlock(rhs.origBlockIndex)->m_pMetadata->GetSumFreeSize(); + }); + + // THE MAIN ALGORITHM + + FreeSpaceDatabase freeSpaceDb; + + size_t dstBlockInfoIndex = 0; + size_t dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex; + VmaDeviceMemoryBlock* pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex); + VmaBlockMetadata_Generic* pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata; + VkDeviceSize dstBlockSize = pDstMetadata->GetSize(); + VkDeviceSize dstOffset = 0; + + bool end = false; + for(size_t srcBlockInfoIndex = 0; !end && srcBlockInfoIndex < blockCount; ++srcBlockInfoIndex) + { + const size_t srcOrigBlockIndex = m_BlockInfos[srcBlockInfoIndex].origBlockIndex; + VmaDeviceMemoryBlock* const pSrcBlock = m_pBlockVector->GetBlock(srcOrigBlockIndex); + VmaBlockMetadata_Generic* const pSrcMetadata = (VmaBlockMetadata_Generic*)pSrcBlock->m_pMetadata; + for(VmaSuballocationList::iterator srcSuballocIt = pSrcMetadata->m_Suballocations.begin(); + !end && srcSuballocIt != pSrcMetadata->m_Suballocations.end(); ) + { + VmaAllocation_T* const pAlloc = srcSuballocIt->hAllocation; + const VkDeviceSize srcAllocAlignment = pAlloc->GetAlignment(); + const VkDeviceSize srcAllocSize = srcSuballocIt->size; + if(m_AllocationsMoved == maxAllocationsToMove || + m_BytesMoved + srcAllocSize > maxBytesToMove) + { + end = true; + break; + } + const VkDeviceSize srcAllocOffset = srcSuballocIt->offset; + + VmaDefragmentationMove move = {}; + // Try to place it in one of free spaces from the database. + size_t freeSpaceInfoIndex; + VkDeviceSize dstAllocOffset; + if(freeSpaceDb.Fetch(srcAllocAlignment, srcAllocSize, + freeSpaceInfoIndex, dstAllocOffset)) + { + size_t freeSpaceOrigBlockIndex = m_BlockInfos[freeSpaceInfoIndex].origBlockIndex; + VmaDeviceMemoryBlock* pFreeSpaceBlock = m_pBlockVector->GetBlock(freeSpaceOrigBlockIndex); + VmaBlockMetadata_Generic* pFreeSpaceMetadata = (VmaBlockMetadata_Generic*)pFreeSpaceBlock->m_pMetadata; + + // Same block + if(freeSpaceInfoIndex == srcBlockInfoIndex) + { + VMA_ASSERT(dstAllocOffset <= srcAllocOffset); + + // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset. + + VmaSuballocation suballoc = *srcSuballocIt; + suballoc.offset = dstAllocOffset; + suballoc.hAllocation->ChangeOffset(dstAllocOffset); + m_BytesMoved += srcAllocSize; + ++m_AllocationsMoved; + + VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt; + ++nextSuballocIt; + pSrcMetadata->m_Suballocations.erase(srcSuballocIt); + srcSuballocIt = nextSuballocIt; + + InsertSuballoc(pFreeSpaceMetadata, suballoc); + + move.srcBlockIndex = srcOrigBlockIndex; + move.dstBlockIndex = freeSpaceOrigBlockIndex; + move.srcOffset = srcAllocOffset; + move.dstOffset = dstAllocOffset; + move.size = srcAllocSize; + + moves.push_back(move); + } + // Different block + else + { + // MOVE OPTION 2: Move the allocation to a different block. + + VMA_ASSERT(freeSpaceInfoIndex < srcBlockInfoIndex); + + VmaSuballocation suballoc = *srcSuballocIt; + suballoc.offset = dstAllocOffset; + suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pFreeSpaceBlock, dstAllocOffset); + m_BytesMoved += srcAllocSize; + ++m_AllocationsMoved; + + VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt; + ++nextSuballocIt; + pSrcMetadata->m_Suballocations.erase(srcSuballocIt); + srcSuballocIt = nextSuballocIt; + + InsertSuballoc(pFreeSpaceMetadata, suballoc); + + move.srcBlockIndex = srcOrigBlockIndex; + move.dstBlockIndex = freeSpaceOrigBlockIndex; + move.srcOffset = srcAllocOffset; + move.dstOffset = dstAllocOffset; + move.size = srcAllocSize; + + moves.push_back(move); + } + } + else + { + dstAllocOffset = VmaAlignUp(dstOffset, srcAllocAlignment); + + // If the allocation doesn't fit before the end of dstBlock, forward to next block. + while(dstBlockInfoIndex < srcBlockInfoIndex && + dstAllocOffset + srcAllocSize > dstBlockSize) + { + // But before that, register remaining free space at the end of dst block. + freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, dstBlockSize - dstOffset); + + ++dstBlockInfoIndex; + dstOrigBlockIndex = m_BlockInfos[dstBlockInfoIndex].origBlockIndex; + pDstBlock = m_pBlockVector->GetBlock(dstOrigBlockIndex); + pDstMetadata = (VmaBlockMetadata_Generic*)pDstBlock->m_pMetadata; + dstBlockSize = pDstMetadata->GetSize(); + dstOffset = 0; + dstAllocOffset = 0; + } + + // Same block + if(dstBlockInfoIndex == srcBlockInfoIndex) + { + VMA_ASSERT(dstAllocOffset <= srcAllocOffset); + + const bool overlap = dstAllocOffset + srcAllocSize > srcAllocOffset; + + bool skipOver = overlap; + if(overlap && m_OverlappingMoveSupported && dstAllocOffset < srcAllocOffset) + { + // If destination and source place overlap, skip if it would move it + // by only < 1/64 of its size. + skipOver = (srcAllocOffset - dstAllocOffset) * 64 < srcAllocSize; + } + + if(skipOver) + { + freeSpaceDb.Register(dstBlockInfoIndex, dstOffset, srcAllocOffset - dstOffset); + + dstOffset = srcAllocOffset + srcAllocSize; + ++srcSuballocIt; + } + // MOVE OPTION 1: Move the allocation inside the same block by decreasing offset. + else + { + srcSuballocIt->offset = dstAllocOffset; + srcSuballocIt->hAllocation->ChangeOffset(dstAllocOffset); + dstOffset = dstAllocOffset + srcAllocSize; + m_BytesMoved += srcAllocSize; + ++m_AllocationsMoved; + ++srcSuballocIt; + + move.srcBlockIndex = srcOrigBlockIndex; + move.dstBlockIndex = dstOrigBlockIndex; + move.srcOffset = srcAllocOffset; + move.dstOffset = dstAllocOffset; + move.size = srcAllocSize; + + moves.push_back(move); + } + } + // Different block + else + { + // MOVE OPTION 2: Move the allocation to a different block. + + VMA_ASSERT(dstBlockInfoIndex < srcBlockInfoIndex); + VMA_ASSERT(dstAllocOffset + srcAllocSize <= dstBlockSize); + + VmaSuballocation suballoc = *srcSuballocIt; + suballoc.offset = dstAllocOffset; + suballoc.hAllocation->ChangeBlockAllocation(m_hAllocator, pDstBlock, dstAllocOffset); + dstOffset = dstAllocOffset + srcAllocSize; + m_BytesMoved += srcAllocSize; + ++m_AllocationsMoved; + + VmaSuballocationList::iterator nextSuballocIt = srcSuballocIt; + ++nextSuballocIt; + pSrcMetadata->m_Suballocations.erase(srcSuballocIt); + srcSuballocIt = nextSuballocIt; + + pDstMetadata->m_Suballocations.push_back(suballoc); + + move.srcBlockIndex = srcOrigBlockIndex; + move.dstBlockIndex = dstOrigBlockIndex; + move.srcOffset = srcAllocOffset; + move.dstOffset = dstAllocOffset; + move.size = srcAllocSize; + + moves.push_back(move); + } + } + } + } + + m_BlockInfos.clear(); + + PostprocessMetadata(); + + return VK_SUCCESS; +} + +void VmaDefragmentationAlgorithm_Fast::PreprocessMetadata() +{ + const size_t blockCount = m_pBlockVector->GetBlockCount(); + for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + VmaBlockMetadata_Generic* const pMetadata = + (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata; + pMetadata->m_FreeCount = 0; + pMetadata->m_SumFreeSize = pMetadata->GetSize(); + pMetadata->m_FreeSuballocationsBySize.clear(); + for(VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin(); + it != pMetadata->m_Suballocations.end(); ) + { + if(it->type == VMA_SUBALLOCATION_TYPE_FREE) + { + VmaSuballocationList::iterator nextIt = it; + ++nextIt; + pMetadata->m_Suballocations.erase(it); + it = nextIt; + } + else + { + ++it; + } + } + } +} + +void VmaDefragmentationAlgorithm_Fast::PostprocessMetadata() +{ + const size_t blockCount = m_pBlockVector->GetBlockCount(); + for(size_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + VmaBlockMetadata_Generic* const pMetadata = + (VmaBlockMetadata_Generic*)m_pBlockVector->GetBlock(blockIndex)->m_pMetadata; + const VkDeviceSize blockSize = pMetadata->GetSize(); + + // No allocations in this block - entire area is free. + if(pMetadata->m_Suballocations.empty()) + { + pMetadata->m_FreeCount = 1; + //pMetadata->m_SumFreeSize is already set to blockSize. + VmaSuballocation suballoc = { + 0, // offset + blockSize, // size + VMA_NULL, // hAllocation + VMA_SUBALLOCATION_TYPE_FREE }; + pMetadata->m_Suballocations.push_back(suballoc); + pMetadata->RegisterFreeSuballocation(pMetadata->m_Suballocations.begin()); + } + // There are some allocations in this block. + else + { + VkDeviceSize offset = 0; + VmaSuballocationList::iterator it; + for(it = pMetadata->m_Suballocations.begin(); + it != pMetadata->m_Suballocations.end(); + ++it) + { + VMA_ASSERT(it->type != VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(it->offset >= offset); + + // Need to insert preceding free space. + if(it->offset > offset) + { + ++pMetadata->m_FreeCount; + const VkDeviceSize freeSize = it->offset - offset; + VmaSuballocation suballoc = { + offset, // offset + freeSize, // size + VMA_NULL, // hAllocation + VMA_SUBALLOCATION_TYPE_FREE }; + VmaSuballocationList::iterator precedingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc); + if(freeSize >= VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + pMetadata->m_FreeSuballocationsBySize.push_back(precedingFreeIt); + } + } + + pMetadata->m_SumFreeSize -= it->size; + offset = it->offset + it->size; + } + + // Need to insert trailing free space. + if(offset < blockSize) + { + ++pMetadata->m_FreeCount; + const VkDeviceSize freeSize = blockSize - offset; + VmaSuballocation suballoc = { + offset, // offset + freeSize, // size + VMA_NULL, // hAllocation + VMA_SUBALLOCATION_TYPE_FREE }; + VMA_ASSERT(it == pMetadata->m_Suballocations.end()); + VmaSuballocationList::iterator trailingFreeIt = pMetadata->m_Suballocations.insert(it, suballoc); + if(freeSize > VMA_MIN_FREE_SUBALLOCATION_SIZE_TO_REGISTER) + { + pMetadata->m_FreeSuballocationsBySize.push_back(trailingFreeIt); + } + } + + VMA_SORT( + pMetadata->m_FreeSuballocationsBySize.begin(), + pMetadata->m_FreeSuballocationsBySize.end(), + VmaSuballocationItemSizeLess()); + } + + VMA_HEAVY_ASSERT(pMetadata->Validate()); + } +} + +void VmaDefragmentationAlgorithm_Fast::InsertSuballoc(VmaBlockMetadata_Generic* pMetadata, const VmaSuballocation& suballoc) +{ + // TODO: Optimize somehow. Remember iterator instead of searching for it linearly. + VmaSuballocationList::iterator it = pMetadata->m_Suballocations.begin(); + while(it != pMetadata->m_Suballocations.end()) + { + if(it->offset < suballoc.offset) + { + ++it; + } + } + pMetadata->m_Suballocations.insert(it, suballoc); +} + +//////////////////////////////////////////////////////////////////////////////// +// VmaBlockVectorDefragmentationContext + +VmaBlockVectorDefragmentationContext::VmaBlockVectorDefragmentationContext( + VmaAllocator hAllocator, + VmaPool hCustomPool, + VmaBlockVector* pBlockVector, + uint32_t currFrameIndex) : + res(VK_SUCCESS), + mutexLocked(false), + blockContexts(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + defragmentationMoves(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + defragmentationMovesProcessed(0), + defragmentationMovesCommitted(0), + hasDefragmentationPlan(0), + m_hAllocator(hAllocator), + m_hCustomPool(hCustomPool), + m_pBlockVector(pBlockVector), + m_CurrFrameIndex(currFrameIndex), + m_pAlgorithm(VMA_NULL), + m_Allocations(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + m_AllAllocations(false) +{ +} + +VmaBlockVectorDefragmentationContext::~VmaBlockVectorDefragmentationContext() +{ + vma_delete(m_hAllocator, m_pAlgorithm); +} + +void VmaBlockVectorDefragmentationContext::AddAllocation(VmaAllocation hAlloc, VkBool32* pChanged) +{ + AllocInfo info = { hAlloc, pChanged }; + m_Allocations.push_back(info); +} + +void VmaBlockVectorDefragmentationContext::Begin(bool overlappingMoveSupported, VmaDefragmentationFlags flags) +{ + const bool allAllocations = m_AllAllocations || + m_Allocations.size() == m_pBlockVector->CalcAllocationCount(); + + /******************************** + HERE IS THE CHOICE OF DEFRAGMENTATION ALGORITHM. + ********************************/ + + /* + Fast algorithm is supported only when certain criteria are met: + - VMA_DEBUG_MARGIN is 0. + - All allocations in this block vector are moveable. + - There is no possibility of image/buffer granularity conflict. + - The defragmentation is not incremental + */ + if(VMA_DEBUG_MARGIN == 0 && + allAllocations && + !m_pBlockVector->IsBufferImageGranularityConflictPossible() && + !(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL)) + { + m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Fast)( + m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported); + } + else + { + m_pAlgorithm = vma_new(m_hAllocator, VmaDefragmentationAlgorithm_Generic)( + m_hAllocator, m_pBlockVector, m_CurrFrameIndex, overlappingMoveSupported); + } + + if(allAllocations) + { + m_pAlgorithm->AddAll(); + } + else + { + for(size_t i = 0, count = m_Allocations.size(); i < count; ++i) + { + m_pAlgorithm->AddAllocation(m_Allocations[i].hAlloc, m_Allocations[i].pChanged); + } + } +} + +//////////////////////////////////////////////////////////////////////////////// +// VmaDefragmentationContext + +VmaDefragmentationContext_T::VmaDefragmentationContext_T( + VmaAllocator hAllocator, + uint32_t currFrameIndex, + uint32_t flags, + VmaDefragmentationStats* pStats) : + m_hAllocator(hAllocator), + m_CurrFrameIndex(currFrameIndex), + m_Flags(flags), + m_pStats(pStats), + m_CustomPoolContexts(VmaStlAllocator(hAllocator->GetAllocationCallbacks())) +{ + memset(m_DefaultPoolContexts, 0, sizeof(m_DefaultPoolContexts)); +} + +VmaDefragmentationContext_T::~VmaDefragmentationContext_T() +{ + for(size_t i = m_CustomPoolContexts.size(); i--; ) + { + VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[i]; + pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats); + vma_delete(m_hAllocator, pBlockVectorCtx); + } + for(size_t i = m_hAllocator->m_MemProps.memoryTypeCount; i--; ) + { + VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[i]; + if(pBlockVectorCtx) + { + pBlockVectorCtx->GetBlockVector()->DefragmentationEnd(pBlockVectorCtx, m_Flags, m_pStats); + vma_delete(m_hAllocator, pBlockVectorCtx); + } + } +} + +void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, const VmaPool* pPools) +{ + for(uint32_t poolIndex = 0; poolIndex < poolCount; ++poolIndex) + { + VmaPool pool = pPools[poolIndex]; + VMA_ASSERT(pool); + // Pools with algorithm other than default are not defragmented. + if(pool->m_BlockVector.GetAlgorithm() == 0) + { + VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL; + + for(size_t i = m_CustomPoolContexts.size(); i--; ) + { + if(m_CustomPoolContexts[i]->GetCustomPool() == pool) + { + pBlockVectorDefragCtx = m_CustomPoolContexts[i]; + break; + } + } + + if(!pBlockVectorDefragCtx) + { + pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)( + m_hAllocator, + pool, + &pool->m_BlockVector, + m_CurrFrameIndex); + m_CustomPoolContexts.push_back(pBlockVectorDefragCtx); + } + + pBlockVectorDefragCtx->AddAll(); + } + } +} + +void VmaDefragmentationContext_T::AddAllocations( + uint32_t allocationCount, + const VmaAllocation* pAllocations, + VkBool32* pAllocationsChanged) +{ + // Dispatch pAllocations among defragmentators. Create them when necessary. + for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + const VmaAllocation hAlloc = pAllocations[allocIndex]; + VMA_ASSERT(hAlloc); + // DedicatedAlloc cannot be defragmented. + if((hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK) && + // Lost allocation cannot be defragmented. + (hAlloc->GetLastUseFrameIndex() != VMA_FRAME_INDEX_LOST)) + { + VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL; + + const VmaPool hAllocPool = hAlloc->GetBlock()->GetParentPool(); + // This allocation belongs to custom pool. + if(hAllocPool != VK_NULL_HANDLE) + { + // Pools with algorithm other than default are not defragmented. + if(hAllocPool->m_BlockVector.GetAlgorithm() == 0) + { + for(size_t i = m_CustomPoolContexts.size(); i--; ) + { + if(m_CustomPoolContexts[i]->GetCustomPool() == hAllocPool) + { + pBlockVectorDefragCtx = m_CustomPoolContexts[i]; + break; + } + } + if(!pBlockVectorDefragCtx) + { + pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)( + m_hAllocator, + hAllocPool, + &hAllocPool->m_BlockVector, + m_CurrFrameIndex); + m_CustomPoolContexts.push_back(pBlockVectorDefragCtx); + } + } + } + // This allocation belongs to default pool. + else + { + const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex(); + pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex]; + if(!pBlockVectorDefragCtx) + { + pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)( + m_hAllocator, + VMA_NULL, // hCustomPool + m_hAllocator->m_pBlockVectors[memTypeIndex], + m_CurrFrameIndex); + m_DefaultPoolContexts[memTypeIndex] = pBlockVectorDefragCtx; + } + } + + if(pBlockVectorDefragCtx) + { + VkBool32* const pChanged = (pAllocationsChanged != VMA_NULL) ? + &pAllocationsChanged[allocIndex] : VMA_NULL; + pBlockVectorDefragCtx->AddAllocation(hAlloc, pChanged); + } + } + } +} + +VkResult VmaDefragmentationContext_T::Defragment( + VkDeviceSize maxCpuBytesToMove, uint32_t maxCpuAllocationsToMove, + VkDeviceSize maxGpuBytesToMove, uint32_t maxGpuAllocationsToMove, + VkCommandBuffer commandBuffer, VmaDefragmentationStats* pStats, VmaDefragmentationFlags flags) +{ + if(pStats) + { + memset(pStats, 0, sizeof(VmaDefragmentationStats)); + } + + if(flags & VMA_DEFRAGMENTATION_FLAG_INCREMENTAL) + { + // For incremental defragmetnations, we just earmark how much we can move + // The real meat is in the defragmentation steps + m_MaxCpuBytesToMove = maxCpuBytesToMove; + m_MaxCpuAllocationsToMove = maxCpuAllocationsToMove; + + m_MaxGpuBytesToMove = maxGpuBytesToMove; + m_MaxGpuAllocationsToMove = maxGpuAllocationsToMove; + + if(m_MaxCpuBytesToMove == 0 && m_MaxCpuAllocationsToMove == 0 && + m_MaxGpuBytesToMove == 0 && m_MaxGpuAllocationsToMove == 0) + return VK_SUCCESS; + + return VK_NOT_READY; + } + + if(commandBuffer == VK_NULL_HANDLE) + { + maxGpuBytesToMove = 0; + maxGpuAllocationsToMove = 0; + } + + VkResult res = VK_SUCCESS; + + // Process default pools. + for(uint32_t memTypeIndex = 0; + memTypeIndex < m_hAllocator->GetMemoryTypeCount() && res >= VK_SUCCESS; + ++memTypeIndex) + { + VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex]; + if(pBlockVectorCtx) + { + VMA_ASSERT(pBlockVectorCtx->GetBlockVector()); + pBlockVectorCtx->GetBlockVector()->Defragment( + pBlockVectorCtx, + pStats, flags, + maxCpuBytesToMove, maxCpuAllocationsToMove, + maxGpuBytesToMove, maxGpuAllocationsToMove, + commandBuffer); + if(pBlockVectorCtx->res != VK_SUCCESS) + { + res = pBlockVectorCtx->res; + } + } + } + + // Process custom pools. + for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size(); + customCtxIndex < customCtxCount && res >= VK_SUCCESS; + ++customCtxIndex) + { + VmaBlockVectorDefragmentationContext* pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex]; + VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector()); + pBlockVectorCtx->GetBlockVector()->Defragment( + pBlockVectorCtx, + pStats, flags, + maxCpuBytesToMove, maxCpuAllocationsToMove, + maxGpuBytesToMove, maxGpuAllocationsToMove, + commandBuffer); + if(pBlockVectorCtx->res != VK_SUCCESS) + { + res = pBlockVectorCtx->res; + } + } + + return res; +} + +VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassInfo* pInfo) +{ + VmaDefragmentationPassMoveInfo* pCurrentMove = pInfo->pMoves; + uint32_t movesLeft = pInfo->moveCount; + + // Process default pools. + for(uint32_t memTypeIndex = 0; + memTypeIndex < m_hAllocator->GetMemoryTypeCount(); + ++memTypeIndex) + { + VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex]; + if(pBlockVectorCtx) + { + VMA_ASSERT(pBlockVectorCtx->GetBlockVector()); + + if(!pBlockVectorCtx->hasDefragmentationPlan) + { + pBlockVectorCtx->GetBlockVector()->Defragment( + pBlockVectorCtx, + m_pStats, m_Flags, + m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove, + m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove, + VK_NULL_HANDLE); + + if(pBlockVectorCtx->res < VK_SUCCESS) + continue; + + pBlockVectorCtx->hasDefragmentationPlan = true; + } + + const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations( + pBlockVectorCtx, + pCurrentMove, movesLeft); + + movesLeft -= processed; + pCurrentMove += processed; + } + } + + // Process custom pools. + for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size(); + customCtxIndex < customCtxCount; + ++customCtxIndex) + { + VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex]; + VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector()); + + if(!pBlockVectorCtx->hasDefragmentationPlan) + { + pBlockVectorCtx->GetBlockVector()->Defragment( + pBlockVectorCtx, + m_pStats, m_Flags, + m_MaxCpuBytesToMove, m_MaxCpuAllocationsToMove, + m_MaxGpuBytesToMove, m_MaxGpuAllocationsToMove, + VK_NULL_HANDLE); + + if(pBlockVectorCtx->res < VK_SUCCESS) + continue; + + pBlockVectorCtx->hasDefragmentationPlan = true; + } + + const uint32_t processed = pBlockVectorCtx->GetBlockVector()->ProcessDefragmentations( + pBlockVectorCtx, + pCurrentMove, movesLeft); + + movesLeft -= processed; + pCurrentMove += processed; + } + + pInfo->moveCount = pInfo->moveCount - movesLeft; + + return VK_SUCCESS; +} +VkResult VmaDefragmentationContext_T::DefragmentPassEnd() +{ + VkResult res = VK_SUCCESS; + + // Process default pools. + for(uint32_t memTypeIndex = 0; + memTypeIndex < m_hAllocator->GetMemoryTypeCount(); + ++memTypeIndex) + { + VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_DefaultPoolContexts[memTypeIndex]; + if(pBlockVectorCtx) + { + VMA_ASSERT(pBlockVectorCtx->GetBlockVector()); + + if(!pBlockVectorCtx->hasDefragmentationPlan) + { + res = VK_NOT_READY; + continue; + } + + pBlockVectorCtx->GetBlockVector()->CommitDefragmentations( + pBlockVectorCtx, m_pStats); + + if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted) + res = VK_NOT_READY; + } + } + + // Process custom pools. + for(size_t customCtxIndex = 0, customCtxCount = m_CustomPoolContexts.size(); + customCtxIndex < customCtxCount; + ++customCtxIndex) + { + VmaBlockVectorDefragmentationContext *pBlockVectorCtx = m_CustomPoolContexts[customCtxIndex]; + VMA_ASSERT(pBlockVectorCtx && pBlockVectorCtx->GetBlockVector()); + + if(!pBlockVectorCtx->hasDefragmentationPlan) + { + res = VK_NOT_READY; + continue; + } + + pBlockVectorCtx->GetBlockVector()->CommitDefragmentations( + pBlockVectorCtx, m_pStats); + + if(pBlockVectorCtx->defragmentationMoves.size() != pBlockVectorCtx->defragmentationMovesCommitted) + res = VK_NOT_READY; + } + + return res; +} + +//////////////////////////////////////////////////////////////////////////////// +// VmaRecorder + +#if VMA_RECORDING_ENABLED + +VmaRecorder::VmaRecorder() : + m_UseMutex(true), + m_Flags(0), + m_File(VMA_NULL), + m_RecordingStartTime(std::chrono::high_resolution_clock::now()) +{ +} + +VkResult VmaRecorder::Init(const VmaRecordSettings& settings, bool useMutex) +{ + m_UseMutex = useMutex; + m_Flags = settings.flags; + +#if defined(_WIN32) + // Open file for writing. + errno_t err = fopen_s(&m_File, settings.pFilePath, "wb"); + + if(err != 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } +#else + // Open file for writing. + m_File = fopen(settings.pFilePath, "wb"); + + if(m_File == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } +#endif + + // Write header. + fprintf(m_File, "%s\n", "Vulkan Memory Allocator,Calls recording"); + fprintf(m_File, "%s\n", "1,8"); + + return VK_SUCCESS; +} + +VmaRecorder::~VmaRecorder() +{ + if(m_File != VMA_NULL) + { + fclose(m_File); + } +} + +void VmaRecorder::RecordCreateAllocator(uint32_t frameIndex) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaCreateAllocator\n", callParams.threadId, callParams.time, frameIndex); + Flush(); +} + +void VmaRecorder::RecordDestroyAllocator(uint32_t frameIndex) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaDestroyAllocator\n", callParams.threadId, callParams.time, frameIndex); + Flush(); +} + +void VmaRecorder::RecordCreatePool(uint32_t frameIndex, const VmaPoolCreateInfo& createInfo, VmaPool pool) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaCreatePool,%u,%u,%llu,%llu,%llu,%u,%p\n", callParams.threadId, callParams.time, frameIndex, + createInfo.memoryTypeIndex, + createInfo.flags, + createInfo.blockSize, + (uint64_t)createInfo.minBlockCount, + (uint64_t)createInfo.maxBlockCount, + createInfo.frameInUseCount, + pool); + Flush(); +} + +void VmaRecorder::RecordDestroyPool(uint32_t frameIndex, VmaPool pool) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaDestroyPool,%p\n", callParams.threadId, callParams.time, frameIndex, + pool); + Flush(); +} + +void VmaRecorder::RecordAllocateMemory(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr(createInfo.flags, createInfo.pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemory,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + vkMemReq.size, + vkMemReq.alignment, + vkMemReq.memoryTypeBits, + createInfo.flags, + createInfo.usage, + createInfo.requiredFlags, + createInfo.preferredFlags, + createInfo.memoryTypeBits, + createInfo.pool, + allocation, + userDataStr.GetString()); + Flush(); +} + +void VmaRecorder::RecordAllocateMemoryPages(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + const VmaAllocationCreateInfo& createInfo, + uint64_t allocationCount, + const VmaAllocation* pAllocations) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr(createInfo.flags, createInfo.pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryPages,%llu,%llu,%u,%u,%u,%u,%u,%u,%p,", callParams.threadId, callParams.time, frameIndex, + vkMemReq.size, + vkMemReq.alignment, + vkMemReq.memoryTypeBits, + createInfo.flags, + createInfo.usage, + createInfo.requiredFlags, + createInfo.preferredFlags, + createInfo.memoryTypeBits, + createInfo.pool); + PrintPointerList(allocationCount, pAllocations); + fprintf(m_File, ",%s\n", userDataStr.GetString()); + Flush(); +} + +void VmaRecorder::RecordAllocateMemoryForBuffer(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr(createInfo.flags, createInfo.pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForBuffer,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + vkMemReq.size, + vkMemReq.alignment, + vkMemReq.memoryTypeBits, + requiresDedicatedAllocation ? 1 : 0, + prefersDedicatedAllocation ? 1 : 0, + createInfo.flags, + createInfo.usage, + createInfo.requiredFlags, + createInfo.preferredFlags, + createInfo.memoryTypeBits, + createInfo.pool, + allocation, + userDataStr.GetString()); + Flush(); +} + +void VmaRecorder::RecordAllocateMemoryForImage(uint32_t frameIndex, + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + const VmaAllocationCreateInfo& createInfo, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr(createInfo.flags, createInfo.pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaAllocateMemoryForImage,%llu,%llu,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + vkMemReq.size, + vkMemReq.alignment, + vkMemReq.memoryTypeBits, + requiresDedicatedAllocation ? 1 : 0, + prefersDedicatedAllocation ? 1 : 0, + createInfo.flags, + createInfo.usage, + createInfo.requiredFlags, + createInfo.preferredFlags, + createInfo.memoryTypeBits, + createInfo.pool, + allocation, + userDataStr.GetString()); + Flush(); +} + +void VmaRecorder::RecordFreeMemory(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaFreeMemory,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordFreeMemoryPages(uint32_t frameIndex, + uint64_t allocationCount, + const VmaAllocation* pAllocations) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaFreeMemoryPages,", callParams.threadId, callParams.time, frameIndex); + PrintPointerList(allocationCount, pAllocations); + fprintf(m_File, "\n"); + Flush(); +} + +void VmaRecorder::RecordSetAllocationUserData(uint32_t frameIndex, + VmaAllocation allocation, + const void* pUserData) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr( + allocation->IsUserDataString() ? VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT : 0, + pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaSetAllocationUserData,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + allocation, + userDataStr.GetString()); + Flush(); +} + +void VmaRecorder::RecordCreateLostAllocation(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaCreateLostAllocation,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordMapMemory(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaMapMemory,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordUnmapMemory(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaUnmapMemory,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordFlushAllocation(uint32_t frameIndex, + VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaFlushAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex, + allocation, + offset, + size); + Flush(); +} + +void VmaRecorder::RecordInvalidateAllocation(uint32_t frameIndex, + VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaInvalidateAllocation,%p,%llu,%llu\n", callParams.threadId, callParams.time, frameIndex, + allocation, + offset, + size); + Flush(); +} + +void VmaRecorder::RecordCreateBuffer(uint32_t frameIndex, + const VkBufferCreateInfo& bufCreateInfo, + const VmaAllocationCreateInfo& allocCreateInfo, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaCreateBuffer,%u,%llu,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + bufCreateInfo.flags, + bufCreateInfo.size, + bufCreateInfo.usage, + bufCreateInfo.sharingMode, + allocCreateInfo.flags, + allocCreateInfo.usage, + allocCreateInfo.requiredFlags, + allocCreateInfo.preferredFlags, + allocCreateInfo.memoryTypeBits, + allocCreateInfo.pool, + allocation, + userDataStr.GetString()); + Flush(); +} + +void VmaRecorder::RecordCreateImage(uint32_t frameIndex, + const VkImageCreateInfo& imageCreateInfo, + const VmaAllocationCreateInfo& allocCreateInfo, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + UserDataString userDataStr(allocCreateInfo.flags, allocCreateInfo.pUserData); + fprintf(m_File, "%u,%.3f,%u,vmaCreateImage,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%u,%p,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + imageCreateInfo.flags, + imageCreateInfo.imageType, + imageCreateInfo.format, + imageCreateInfo.extent.width, + imageCreateInfo.extent.height, + imageCreateInfo.extent.depth, + imageCreateInfo.mipLevels, + imageCreateInfo.arrayLayers, + imageCreateInfo.samples, + imageCreateInfo.tiling, + imageCreateInfo.usage, + imageCreateInfo.sharingMode, + imageCreateInfo.initialLayout, + allocCreateInfo.flags, + allocCreateInfo.usage, + allocCreateInfo.requiredFlags, + allocCreateInfo.preferredFlags, + allocCreateInfo.memoryTypeBits, + allocCreateInfo.pool, + allocation, + userDataStr.GetString()); + Flush(); +} + +void VmaRecorder::RecordDestroyBuffer(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaDestroyBuffer,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordDestroyImage(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaDestroyImage,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordTouchAllocation(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaTouchAllocation,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordGetAllocationInfo(uint32_t frameIndex, + VmaAllocation allocation) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaGetAllocationInfo,%p\n", callParams.threadId, callParams.time, frameIndex, + allocation); + Flush(); +} + +void VmaRecorder::RecordMakePoolAllocationsLost(uint32_t frameIndex, + VmaPool pool) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaMakePoolAllocationsLost,%p\n", callParams.threadId, callParams.time, frameIndex, + pool); + Flush(); +} + +void VmaRecorder::RecordDefragmentationBegin(uint32_t frameIndex, + const VmaDefragmentationInfo2& info, + VmaDefragmentationContext ctx) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationBegin,%u,", callParams.threadId, callParams.time, frameIndex, + info.flags); + PrintPointerList(info.allocationCount, info.pAllocations); + fprintf(m_File, ","); + PrintPointerList(info.poolCount, info.pPools); + fprintf(m_File, ",%llu,%u,%llu,%u,%p,%p\n", + info.maxCpuBytesToMove, + info.maxCpuAllocationsToMove, + info.maxGpuBytesToMove, + info.maxGpuAllocationsToMove, + info.commandBuffer, + ctx); + Flush(); +} + +void VmaRecorder::RecordDefragmentationEnd(uint32_t frameIndex, + VmaDefragmentationContext ctx) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaDefragmentationEnd,%p\n", callParams.threadId, callParams.time, frameIndex, + ctx); + Flush(); +} + +void VmaRecorder::RecordSetPoolName(uint32_t frameIndex, + VmaPool pool, + const char* name) +{ + CallParams callParams; + GetBasicParams(callParams); + + VmaMutexLock lock(m_FileMutex, m_UseMutex); + fprintf(m_File, "%u,%.3f,%u,vmaSetPoolName,%p,%s\n", callParams.threadId, callParams.time, frameIndex, + pool, name != VMA_NULL ? name : ""); + Flush(); +} + +VmaRecorder::UserDataString::UserDataString(VmaAllocationCreateFlags allocFlags, const void* pUserData) +{ + if(pUserData != VMA_NULL) + { + if((allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0) + { + m_Str = (const char*)pUserData; + } + else + { + // If VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is not specified, convert the string's memory address to a string and store it. + snprintf(m_PtrStr, 17, "%p", pUserData); + m_Str = m_PtrStr; + } + } + else + { + m_Str = ""; + } +} + +void VmaRecorder::WriteConfiguration( + const VkPhysicalDeviceProperties& devProps, + const VkPhysicalDeviceMemoryProperties& memProps, + uint32_t vulkanApiVersion, + bool dedicatedAllocationExtensionEnabled, + bool bindMemory2ExtensionEnabled, + bool memoryBudgetExtensionEnabled, + bool deviceCoherentMemoryExtensionEnabled) +{ + fprintf(m_File, "Config,Begin\n"); + + fprintf(m_File, "VulkanApiVersion,%u,%u\n", VK_VERSION_MAJOR(vulkanApiVersion), VK_VERSION_MINOR(vulkanApiVersion)); + + fprintf(m_File, "PhysicalDevice,apiVersion,%u\n", devProps.apiVersion); + fprintf(m_File, "PhysicalDevice,driverVersion,%u\n", devProps.driverVersion); + fprintf(m_File, "PhysicalDevice,vendorID,%u\n", devProps.vendorID); + fprintf(m_File, "PhysicalDevice,deviceID,%u\n", devProps.deviceID); + fprintf(m_File, "PhysicalDevice,deviceType,%u\n", devProps.deviceType); + fprintf(m_File, "PhysicalDevice,deviceName,%s\n", devProps.deviceName); + + fprintf(m_File, "PhysicalDeviceLimits,maxMemoryAllocationCount,%u\n", devProps.limits.maxMemoryAllocationCount); + fprintf(m_File, "PhysicalDeviceLimits,bufferImageGranularity,%llu\n", devProps.limits.bufferImageGranularity); + fprintf(m_File, "PhysicalDeviceLimits,nonCoherentAtomSize,%llu\n", devProps.limits.nonCoherentAtomSize); + + fprintf(m_File, "PhysicalDeviceMemory,HeapCount,%u\n", memProps.memoryHeapCount); + for(uint32_t i = 0; i < memProps.memoryHeapCount; ++i) + { + fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,size,%llu\n", i, memProps.memoryHeaps[i].size); + fprintf(m_File, "PhysicalDeviceMemory,Heap,%u,flags,%u\n", i, memProps.memoryHeaps[i].flags); + } + fprintf(m_File, "PhysicalDeviceMemory,TypeCount,%u\n", memProps.memoryTypeCount); + for(uint32_t i = 0; i < memProps.memoryTypeCount; ++i) + { + fprintf(m_File, "PhysicalDeviceMemory,Type,%u,heapIndex,%u\n", i, memProps.memoryTypes[i].heapIndex); + fprintf(m_File, "PhysicalDeviceMemory,Type,%u,propertyFlags,%u\n", i, memProps.memoryTypes[i].propertyFlags); + } + + fprintf(m_File, "Extension,VK_KHR_dedicated_allocation,%u\n", dedicatedAllocationExtensionEnabled ? 1 : 0); + fprintf(m_File, "Extension,VK_KHR_bind_memory2,%u\n", bindMemory2ExtensionEnabled ? 1 : 0); + fprintf(m_File, "Extension,VK_EXT_memory_budget,%u\n", memoryBudgetExtensionEnabled ? 1 : 0); + fprintf(m_File, "Extension,VK_AMD_device_coherent_memory,%u\n", deviceCoherentMemoryExtensionEnabled ? 1 : 0); + + fprintf(m_File, "Macro,VMA_DEBUG_ALWAYS_DEDICATED_MEMORY,%u\n", VMA_DEBUG_ALWAYS_DEDICATED_MEMORY ? 1 : 0); + fprintf(m_File, "Macro,VMA_DEBUG_ALIGNMENT,%llu\n", (VkDeviceSize)VMA_DEBUG_ALIGNMENT); + fprintf(m_File, "Macro,VMA_DEBUG_MARGIN,%llu\n", (VkDeviceSize)VMA_DEBUG_MARGIN); + fprintf(m_File, "Macro,VMA_DEBUG_INITIALIZE_ALLOCATIONS,%u\n", VMA_DEBUG_INITIALIZE_ALLOCATIONS ? 1 : 0); + fprintf(m_File, "Macro,VMA_DEBUG_DETECT_CORRUPTION,%u\n", VMA_DEBUG_DETECT_CORRUPTION ? 1 : 0); + fprintf(m_File, "Macro,VMA_DEBUG_GLOBAL_MUTEX,%u\n", VMA_DEBUG_GLOBAL_MUTEX ? 1 : 0); + fprintf(m_File, "Macro,VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY,%llu\n", (VkDeviceSize)VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY); + fprintf(m_File, "Macro,VMA_SMALL_HEAP_MAX_SIZE,%llu\n", (VkDeviceSize)VMA_SMALL_HEAP_MAX_SIZE); + fprintf(m_File, "Macro,VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE,%llu\n", (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE); + + fprintf(m_File, "Config,End\n"); +} + +void VmaRecorder::GetBasicParams(CallParams& outParams) +{ + #if defined(_WIN32) + outParams.threadId = GetCurrentThreadId(); + #else + // Use C++11 features to get thread id and convert it to uint32_t. + // There is room for optimization since sstream is quite slow. + // Is there a better way to convert std::this_thread::get_id() to uint32_t? + std::thread::id thread_id = std::this_thread::get_id(); + std::stringstream thread_id_to_string_converter; + thread_id_to_string_converter << thread_id; + std::string thread_id_as_string = thread_id_to_string_converter.str(); + outParams.threadId = static_cast(std::stoi(thread_id_as_string.c_str())); + #endif + + auto current_time = std::chrono::high_resolution_clock::now(); + + outParams.time = std::chrono::duration(current_time - m_RecordingStartTime).count(); +} + +void VmaRecorder::PrintPointerList(uint64_t count, const VmaAllocation* pItems) +{ + if(count) + { + fprintf(m_File, "%p", pItems[0]); + for(uint64_t i = 1; i < count; ++i) + { + fprintf(m_File, " %p", pItems[i]); + } + } +} + +void VmaRecorder::Flush() +{ + if((m_Flags & VMA_RECORD_FLUSH_AFTER_CALL_BIT) != 0) + { + fflush(m_File); + } +} + +#endif // #if VMA_RECORDING_ENABLED + +//////////////////////////////////////////////////////////////////////////////// +// VmaAllocationObjectAllocator + +VmaAllocationObjectAllocator::VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) : + m_Allocator(pAllocationCallbacks, 1024) +{ +} + +template VmaAllocation VmaAllocationObjectAllocator::Allocate(Types... args) +{ + VmaMutexLock mutexLock(m_Mutex); + return m_Allocator.Alloc(std::forward(args)...); +} + +void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc) +{ + VmaMutexLock mutexLock(m_Mutex); + m_Allocator.Free(hAlloc); +} + +//////////////////////////////////////////////////////////////////////////////// +// VmaAllocator_T + +VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) : + m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0), + m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0), + m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0), + m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0), + m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0), + m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0), + m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0), + m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0), + m_hDevice(pCreateInfo->device), + m_hInstance(pCreateInfo->instance), + m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL), + m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ? + *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks), + m_AllocationObjectAllocator(&m_AllocationCallbacks), + m_HeapSizeLimitMask(0), + m_PreferredLargeHeapBlockSize(0), + m_PhysicalDevice(pCreateInfo->physicalDevice), + m_CurrentFrameIndex(0), + m_GpuDefragmentationMemoryTypeBits(UINT32_MAX), + m_Pools(VmaStlAllocator(GetAllocationCallbacks())), + m_NextPoolId(0), + m_GlobalMemoryTypeBits(UINT32_MAX) +#if VMA_RECORDING_ENABLED + ,m_pRecorder(VMA_NULL) +#endif +{ + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + m_UseKhrDedicatedAllocation = false; + m_UseKhrBindMemory2 = false; + } + + if(VMA_DEBUG_DETECT_CORRUPTION) + { + // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it. + VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0); + } + + VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance); + + if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0)) + { +#if !(VMA_DEDICATED_ALLOCATION) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros."); + } +#endif +#if !(VMA_BIND_MEMORY2) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros."); + } +#endif + } +#if !(VMA_MEMORY_BUDGET) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros."); + } +#endif +#if !(VMA_BUFFER_DEVICE_ADDRESS) + if(m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif +#if VMA_VULKAN_VERSION < 1002000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0)) + { + VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros."); + } +#endif +#if VMA_VULKAN_VERSION < 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros."); + } +#endif +#if !(VMA_MEMORY_PRIORITY) + if(m_UseExtMemoryPriority) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif + + memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks)); + memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties)); + memset(&m_MemProps, 0, sizeof(m_MemProps)); + + memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors)); + memset(&m_pDedicatedAllocations, 0, sizeof(m_pDedicatedAllocations)); + memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions)); + + if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL) + { + m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData; + m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate; + m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree; + } + + ImportVulkanFunctions(pCreateInfo->pVulkanFunctions); + + (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties); + (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps); + + VMA_ASSERT(VmaIsPow2(VMA_DEBUG_ALIGNMENT)); + VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY)); + VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity)); + VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize)); + + m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ? + pCreateInfo->preferredLargeHeapBlockSize : static_cast(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE); + + m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits(); + + if(pCreateInfo->pHeapSizeLimit != VMA_NULL) + { + for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) + { + const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex]; + if(limit != VK_WHOLE_SIZE) + { + m_HeapSizeLimitMask |= 1u << heapIndex; + if(limit < m_MemProps.memoryHeaps[heapIndex].size) + { + m_MemProps.memoryHeaps[heapIndex].size = limit; + } + } + } + } + + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex); + + m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)( + this, + VK_NULL_HANDLE, // hParentPool + memTypeIndex, + preferredBlockSize, + 0, + SIZE_MAX, + GetBufferImageGranularity(), + pCreateInfo->frameInUseCount, + false, // explicitBlockSize + false, // linearAlgorithm + 0.5f); // priority (0.5 is the default per Vulkan spec) + // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here, + // becase minBlockCount is 0. + m_pDedicatedAllocations[memTypeIndex] = vma_new(this, AllocationVectorType)(VmaStlAllocator(GetAllocationCallbacks())); + + } +} + +VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo) +{ + VkResult res = VK_SUCCESS; + + if(pCreateInfo->pRecordSettings != VMA_NULL && + !VmaStrIsEmpty(pCreateInfo->pRecordSettings->pFilePath)) + { +#if VMA_RECORDING_ENABLED + m_pRecorder = vma_new(this, VmaRecorder)(); + res = m_pRecorder->Init(*pCreateInfo->pRecordSettings, m_UseMutex); + if(res != VK_SUCCESS) + { + return res; + } + m_pRecorder->WriteConfiguration( + m_PhysicalDeviceProperties, + m_MemProps, + m_VulkanApiVersion, + m_UseKhrDedicatedAllocation, + m_UseKhrBindMemory2, + m_UseExtMemoryBudget, + m_UseAmdDeviceCoherentMemory); + m_pRecorder->RecordCreateAllocator(GetCurrentFrameIndex()); +#else + VMA_ASSERT(0 && "VmaAllocatorCreateInfo::pRecordSettings used, but not supported due to VMA_RECORDING_ENABLED not defined to 1."); + return VK_ERROR_FEATURE_NOT_PRESENT; +#endif + } + +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + UpdateVulkanBudget(); + } +#endif // #if VMA_MEMORY_BUDGET + + return res; +} + +VmaAllocator_T::~VmaAllocator_T() +{ +#if VMA_RECORDING_ENABLED + if(m_pRecorder != VMA_NULL) + { + m_pRecorder->RecordDestroyAllocator(GetCurrentFrameIndex()); + vma_delete(this, m_pRecorder); + } +#endif + + VMA_ASSERT(m_Pools.empty()); + + for(size_t i = GetMemoryTypeCount(); i--; ) + { + if(m_pDedicatedAllocations[i] != VMA_NULL && !m_pDedicatedAllocations[i]->empty()) + { + VMA_ASSERT(0 && "Unfreed dedicated allocations found."); + } + + vma_delete(this, m_pDedicatedAllocations[i]); + vma_delete(this, m_pBlockVectors[i]); + } +} + +void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions) +{ +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + ImportVulkanFunctions_Static(); +#endif + + if(pVulkanFunctions != VMA_NULL) + { + ImportVulkanFunctions_Custom(pVulkanFunctions); + } + +#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + ImportVulkanFunctions_Dynamic(); +#endif + + ValidateVulkanFunctions(); +} + +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ImportVulkanFunctions_Static() +{ + // Vulkan 1.0 + m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties; + m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties; + m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory; + m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory; + m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory; + m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory; + m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges; + m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges; + m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory; + m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory; + m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements; + m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements; + m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer; + m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer; + m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage; + m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage; + m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer; + + // Vulkan 1.1 +#if VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2; + m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2; + m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2; + m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2; + m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2; + } +#endif +} + +#endif // #if VMA_STATIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions) +{ + VMA_ASSERT(pVulkanFunctions != VMA_NULL); + +#define VMA_COPY_IF_NOT_NULL(funcName) \ + if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName; + + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties); + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties); + VMA_COPY_IF_NOT_NULL(vkAllocateMemory); + VMA_COPY_IF_NOT_NULL(vkFreeMemory); + VMA_COPY_IF_NOT_NULL(vkMapMemory); + VMA_COPY_IF_NOT_NULL(vkUnmapMemory); + VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges); + VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges); + VMA_COPY_IF_NOT_NULL(vkBindBufferMemory); + VMA_COPY_IF_NOT_NULL(vkBindImageMemory); + VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkCreateBuffer); + VMA_COPY_IF_NOT_NULL(vkDestroyBuffer); + VMA_COPY_IF_NOT_NULL(vkCreateImage); + VMA_COPY_IF_NOT_NULL(vkDestroyImage); + VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer); + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR); + VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR); +#endif + +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR); + VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR); +#endif + +#if VMA_MEMORY_BUDGET + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR); +#endif + +#undef VMA_COPY_IF_NOT_NULL +} + +#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ImportVulkanFunctions_Dynamic() +{ +#define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \ + if(m_VulkanFunctions.memberName == VMA_NULL) \ + m_VulkanFunctions.memberName = \ + (functionPointerType)vkGetInstanceProcAddr(m_hInstance, functionNameString); +#define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \ + if(m_VulkanFunctions.memberName == VMA_NULL) \ + m_VulkanFunctions.memberName = \ + (functionPointerType)vkGetDeviceProcAddr(m_hDevice, functionNameString); + + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties"); + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties"); + VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory"); + VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory"); + VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory"); + VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory"); + VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges"); + VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges"); + VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory"); + VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory"); + VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements"); + VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements"); + VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer"); + VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer"); + VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage"); + VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage"); + VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer"); + +#if VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2"); + VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2"); + VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2"); + VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2"); + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2"); + } +#endif + +#if VMA_DEDICATED_ALLOCATION + if(m_UseKhrDedicatedAllocation) + { + VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR"); + VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR"); + } +#endif + +#if VMA_BIND_MEMORY2 + if(m_UseKhrBindMemory2) + { + VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR"); + VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR"); + } +#endif // #if VMA_BIND_MEMORY2 + +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR"); + } +#endif // #if VMA_MEMORY_BUDGET + +#undef VMA_FETCH_DEVICE_FUNC +#undef VMA_FETCH_INSTANCE_FUNC +} + +#endif // #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ValidateVulkanFunctions() +{ + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL); + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation) + { + VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL); + } +#endif + +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2) + { + VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL); + } +#endif + +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL); + } +#endif +} + +VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex) +{ + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); + const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; + const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE; + return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32); +} + +VkResult VmaAllocator_T::AllocateMemoryOfType( + VkDeviceSize size, + VkDeviceSize alignment, + bool dedicatedAllocation, + VkBuffer dedicatedBuffer, + VkBufferUsageFlags dedicatedBufferUsage, + VkImage dedicatedImage, + const VmaAllocationCreateInfo& createInfo, + uint32_t memTypeIndex, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + VMA_ASSERT(pAllocations != VMA_NULL); + VMA_DEBUG_LOG(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size); + + VmaAllocationCreateInfo finalCreateInfo = createInfo; + + // If memory type is not HOST_VISIBLE, disable MAPPED. + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && + (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + finalCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT; + } + // If memory is lazily allocated, it should be always dedicated. + if(finalCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED) + { + finalCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + } + + VmaBlockVector* const blockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(blockVector); + + const VkDeviceSize preferredBlockSize = blockVector->GetPreferredBlockSize(); + bool preferDedicatedMemory = + VMA_DEBUG_ALWAYS_DEDICATED_MEMORY || + dedicatedAllocation || + // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size. + size > preferredBlockSize / 2; + + if(preferDedicatedMemory && + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 && + finalCreateInfo.pool == VK_NULL_HANDLE) + { + finalCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + } + + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) + { + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + else + { + return AllocateDedicatedMemory( + size, + suballocType, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + finalCreateInfo.pUserData, + finalCreateInfo.priority, + dedicatedBuffer, + dedicatedBufferUsage, + dedicatedImage, + allocationCount, + pAllocations); + } + } + else + { + VkResult res = blockVector->Allocate( + m_CurrentFrameIndex.load(), + size, + alignment, + finalCreateInfo, + suballocType, + allocationCount, + pAllocations); + if(res == VK_SUCCESS) + { + return res; + } + + // 5. Try dedicated memory. + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + else + { + res = AllocateDedicatedMemory( + size, + suballocType, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + finalCreateInfo.pUserData, + finalCreateInfo.priority, + dedicatedBuffer, + dedicatedBufferUsage, + dedicatedImage, + allocationCount, + pAllocations); + if(res == VK_SUCCESS) + { + // Succeeded: AllocateDedicatedMemory function already filld pMemory, nothing more to do here. + VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); + return VK_SUCCESS; + } + else + { + // Everything failed: Return error code. + VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); + return res; + } + } + } +} + +VkResult VmaAllocator_T::AllocateDedicatedMemory( + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + bool withinBudget, + bool map, + bool isUserDataString, + void* pUserData, + float priority, + VkBuffer dedicatedBuffer, + VkBufferUsageFlags dedicatedBufferUsage, + VkImage dedicatedImage, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + VMA_ASSERT(allocationCount > 0 && pAllocations); + + if(withinBudget) + { + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); + VmaBudget heapBudget = {}; + GetBudget(&heapBudget, heapIndex, 1); + if(heapBudget.usage + size * allocationCount > heapBudget.budget) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + + VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; + allocInfo.memoryTypeIndex = memTypeIndex; + allocInfo.allocationSize = size; + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR }; + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + if(dedicatedBuffer != VK_NULL_HANDLE) + { + VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE); + dedicatedAllocInfo.buffer = dedicatedBuffer; + VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); + } + else if(dedicatedImage != VK_NULL_HANDLE) + { + dedicatedAllocInfo.image = dedicatedImage; + VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); + } + } +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + +#if VMA_BUFFER_DEVICE_ADDRESS + VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; + if(m_UseKhrBufferDeviceAddress) + { + bool canContainBufferWithDeviceAddress = true; + if(dedicatedBuffer != VK_NULL_HANDLE) + { + canContainBufferWithDeviceAddress = dedicatedBufferUsage == UINT32_MAX || // Usage flags unknown + (dedicatedBufferUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0; + } + else if(dedicatedImage != VK_NULL_HANDLE) + { + canContainBufferWithDeviceAddress = false; + } + if(canContainBufferWithDeviceAddress) + { + allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; + VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo); + } + } +#endif // #if VMA_BUFFER_DEVICE_ADDRESS + +#if VMA_MEMORY_PRIORITY + VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; + if(m_UseExtMemoryPriority) + { + priorityInfo.priority = priority; + VmaPnextChainPushFront(&allocInfo, &priorityInfo); + } +#endif // #if VMA_MEMORY_PRIORITY + + size_t allocIndex; + VkResult res = VK_SUCCESS; + for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + res = AllocateDedicatedMemoryPage( + size, + suballocType, + memTypeIndex, + allocInfo, + map, + isUserDataString, + pUserData, + pAllocations + allocIndex); + if(res != VK_SUCCESS) + { + break; + } + } + + if(res == VK_SUCCESS) + { + // Register them in m_pDedicatedAllocations. + { + VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); + AllocationVectorType* pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex]; + VMA_ASSERT(pDedicatedAllocations); + for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + VmaVectorInsertSorted(*pDedicatedAllocations, pAllocations[allocIndex]); + } + } + + VMA_DEBUG_LOG(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex); + } + else + { + // Free all already created allocations. + while(allocIndex--) + { + VmaAllocation currAlloc = pAllocations[allocIndex]; + VkDeviceMemory hMemory = currAlloc->GetMemory(); + + /* + There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory + before vkFreeMemory. + + if(currAlloc->GetMappedData() != VMA_NULL) + { + (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); + } + */ + + FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory); + m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize()); + currAlloc->SetUserData(this, VMA_NULL); + m_AllocationObjectAllocator.Free(currAlloc); + } + + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + } + + return res; +} + +VkResult VmaAllocator_T::AllocateDedicatedMemoryPage( + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + const VkMemoryAllocateInfo& allocInfo, + bool map, + bool isUserDataString, + void* pUserData, + VmaAllocation* pAllocation) +{ + VkDeviceMemory hMemory = VK_NULL_HANDLE; + VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory); + if(res < 0) + { + VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); + return res; + } + + void* pMappedData = VMA_NULL; + if(map) + { + res = (*m_VulkanFunctions.vkMapMemory)( + m_hDevice, + hMemory, + 0, + VK_WHOLE_SIZE, + 0, + &pMappedData); + if(res < 0) + { + VMA_DEBUG_LOG(" vkMapMemory FAILED"); + FreeVulkanMemory(memTypeIndex, size, hMemory); + return res; + } + } + + *pAllocation = m_AllocationObjectAllocator.Allocate(m_CurrentFrameIndex.load(), isUserDataString); + (*pAllocation)->InitDedicatedAllocation(memTypeIndex, hMemory, suballocType, pMappedData, size); + (*pAllocation)->SetUserData(this, pUserData); + m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size); + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); + } + + return VK_SUCCESS; +} + +void VmaAllocator_T::GetBufferMemoryRequirements( + VkBuffer hBuffer, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const +{ +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR }; + memReqInfo.buffer = hBuffer; + + VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; + + VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; + VmaPnextChainPushFront(&memReq2, &memDedicatedReq); + + (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); + + memReq = memReq2.memoryRequirements; + requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); + prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + } + else +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + { + (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq); + requiresDedicatedAllocation = false; + prefersDedicatedAllocation = false; + } +} + +void VmaAllocator_T::GetImageMemoryRequirements( + VkImage hImage, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const +{ +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR }; + memReqInfo.image = hImage; + + VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; + + VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; + VmaPnextChainPushFront(&memReq2, &memDedicatedReq); + + (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); + + memReq = memReq2.memoryRequirements; + requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); + prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + } + else +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + { + (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq); + requiresDedicatedAllocation = false; + prefersDedicatedAllocation = false; + } +} + +VkResult VmaAllocator_T::AllocateMemory( + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + VkBuffer dedicatedBuffer, + VkBufferUsageFlags dedicatedBufferUsage, + VkImage dedicatedImage, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + + VMA_ASSERT(VmaIsPow2(vkMemReq.alignment)); + + if(vkMemReq.size == 0) + { + return VK_ERROR_VALIDATION_FAILED_EXT; + } + if((createInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && + (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense."); + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + if((createInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && + (createInfo.flags & VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT) != 0) + { + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_MAPPED_BIT together with VMA_ALLOCATION_CREATE_CAN_BECOME_LOST_BIT is invalid."); + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + if(requiresDedicatedAllocation) + { + if((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT specified while dedicated allocation is required."); + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + if(createInfo.pool != VK_NULL_HANDLE) + { + VMA_ASSERT(0 && "Pool specified while dedicated allocation is required."); + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + if((createInfo.pool != VK_NULL_HANDLE) && + ((createInfo.flags & (VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT)) != 0)) + { + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT when pool != null is invalid."); + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + + if(createInfo.pool != VK_NULL_HANDLE) + { + const VkDeviceSize alignmentForPool = VMA_MAX( + vkMemReq.alignment, + GetMemoryTypeMinAlignment(createInfo.pool->m_BlockVector.GetMemoryTypeIndex())); + + VmaAllocationCreateInfo createInfoForPool = createInfo; + // If memory type is not HOST_VISIBLE, disable MAPPED. + if((createInfoForPool.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && + (m_MemProps.memoryTypes[createInfo.pool->m_BlockVector.GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + createInfoForPool.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT; + } + + return createInfo.pool->m_BlockVector.Allocate( + m_CurrentFrameIndex.load(), + vkMemReq.size, + alignmentForPool, + createInfoForPool, + suballocType, + allocationCount, + pAllocations); + } + else + { + // Bit mask of memory Vulkan types acceptable for this allocation. + uint32_t memoryTypeBits = vkMemReq.memoryTypeBits; + uint32_t memTypeIndex = UINT32_MAX; + VkResult res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex); + if(res == VK_SUCCESS) + { + VkDeviceSize alignmentForMemType = VMA_MAX( + vkMemReq.alignment, + GetMemoryTypeMinAlignment(memTypeIndex)); + + res = AllocateMemoryOfType( + vkMemReq.size, + alignmentForMemType, + requiresDedicatedAllocation || prefersDedicatedAllocation, + dedicatedBuffer, + dedicatedBufferUsage, + dedicatedImage, + createInfo, + memTypeIndex, + suballocType, + allocationCount, + pAllocations); + // Succeeded on first try. + if(res == VK_SUCCESS) + { + return res; + } + // Allocation from this memory type failed. Try other compatible memory types. + else + { + for(;;) + { + // Remove old memTypeIndex from list of possibilities. + memoryTypeBits &= ~(1u << memTypeIndex); + // Find alternative memTypeIndex. + res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfo, &memTypeIndex); + if(res == VK_SUCCESS) + { + alignmentForMemType = VMA_MAX( + vkMemReq.alignment, + GetMemoryTypeMinAlignment(memTypeIndex)); + + res = AllocateMemoryOfType( + vkMemReq.size, + alignmentForMemType, + requiresDedicatedAllocation || prefersDedicatedAllocation, + dedicatedBuffer, + dedicatedBufferUsage, + dedicatedImage, + createInfo, + memTypeIndex, + suballocType, + allocationCount, + pAllocations); + // Allocation from this alternative memory type succeeded. + if(res == VK_SUCCESS) + { + return res; + } + // else: Allocation from this memory type failed. Try next one - next loop iteration. + } + // No other matching memory type index could be found. + else + { + // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once. + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + } + } + // Can't find any single memory type maching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT. + else + return res; + } +} + +void VmaAllocator_T::FreeMemory( + size_t allocationCount, + const VmaAllocation* pAllocations) +{ + VMA_ASSERT(pAllocations); + + for(size_t allocIndex = allocationCount; allocIndex--; ) + { + VmaAllocation allocation = pAllocations[allocIndex]; + + if(allocation != VK_NULL_HANDLE) + { + if(TouchAllocation(allocation)) + { + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED); + } + + switch(allocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaBlockVector* pBlockVector = VMA_NULL; + VmaPool hPool = allocation->GetBlock()->GetParentPool(); + if(hPool != VK_NULL_HANDLE) + { + pBlockVector = &hPool->m_BlockVector; + } + else + { + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + pBlockVector = m_pBlockVectors[memTypeIndex]; + } + pBlockVector->Free(allocation); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + FreeDedicatedMemory(allocation); + break; + default: + VMA_ASSERT(0); + } + } + + // Do this regardless of whether the allocation is lost. Lost allocations still account to Budget.AllocationBytes. + m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize()); + allocation->SetUserData(this, VMA_NULL); + m_AllocationObjectAllocator.Free(allocation); + } + } +} + +VkResult VmaAllocator_T::ResizeAllocation( + const VmaAllocation alloc, + VkDeviceSize newSize) +{ + // This function is deprecated and so it does nothing. It's left for backward compatibility. + if(newSize == 0 || alloc->GetLastUseFrameIndex() == VMA_FRAME_INDEX_LOST) + { + return VK_ERROR_VALIDATION_FAILED_EXT; + } + if(newSize == alloc->GetSize()) + { + return VK_SUCCESS; + } + return VK_ERROR_OUT_OF_POOL_MEMORY; +} + +void VmaAllocator_T::CalculateStats(VmaStats* pStats) +{ + // Initialize. + InitStatInfo(pStats->total); + for(size_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) + InitStatInfo(pStats->memoryType[i]); + for(size_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i) + InitStatInfo(pStats->memoryHeap[i]); + + // Process default pools. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(pBlockVector); + pBlockVector->AddStats(pStats); + } + + // Process custom pools. + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex) + { + m_Pools[poolIndex]->m_BlockVector.AddStats(pStats); + } + } + + // Process dedicated allocations. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); + VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); + AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex]; + VMA_ASSERT(pDedicatedAllocVector); + for(size_t allocIndex = 0, allocCount = pDedicatedAllocVector->size(); allocIndex < allocCount; ++allocIndex) + { + VmaStatInfo allocationStatInfo; + (*pDedicatedAllocVector)[allocIndex]->DedicatedAllocCalcStatsInfo(allocationStatInfo); + VmaAddStatInfo(pStats->total, allocationStatInfo); + VmaAddStatInfo(pStats->memoryType[memTypeIndex], allocationStatInfo); + VmaAddStatInfo(pStats->memoryHeap[memHeapIndex], allocationStatInfo); + } + } + + // Postprocess. + VmaPostprocessCalcStatInfo(pStats->total); + for(size_t i = 0; i < GetMemoryTypeCount(); ++i) + VmaPostprocessCalcStatInfo(pStats->memoryType[i]); + for(size_t i = 0; i < GetMemoryHeapCount(); ++i) + VmaPostprocessCalcStatInfo(pStats->memoryHeap[i]); +} + +void VmaAllocator_T::GetBudget(VmaBudget* outBudget, uint32_t firstHeap, uint32_t heapCount) +{ +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + if(m_Budget.m_OperationsSinceBudgetFetch < 30) + { + VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex); + for(uint32_t i = 0; i < heapCount; ++i, ++outBudget) + { + const uint32_t heapIndex = firstHeap + i; + + outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex]; + outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; + + if(m_Budget.m_VulkanUsage[heapIndex] + outBudget->blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]) + { + outBudget->usage = m_Budget.m_VulkanUsage[heapIndex] + + outBudget->blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; + } + else + { + outBudget->usage = 0; + } + + // Have to take MIN with heap size because explicit HeapSizeLimit is included in it. + outBudget->budget = VMA_MIN( + m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size); + } + } + else + { + UpdateVulkanBudget(); // Outside of mutex lock + GetBudget(outBudget, firstHeap, heapCount); // Recursion + } + } + else +#endif + { + for(uint32_t i = 0; i < heapCount; ++i, ++outBudget) + { + const uint32_t heapIndex = firstHeap + i; + + outBudget->blockBytes = m_Budget.m_BlockBytes[heapIndex]; + outBudget->allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; + + outBudget->usage = outBudget->blockBytes; + outBudget->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. + } + } +} + +static const uint32_t VMA_VENDOR_ID_AMD = 4098; + +VkResult VmaAllocator_T::DefragmentationBegin( + const VmaDefragmentationInfo2& info, + VmaDefragmentationStats* pStats, + VmaDefragmentationContext* pContext) +{ + if(info.pAllocationsChanged != VMA_NULL) + { + memset(info.pAllocationsChanged, 0, info.allocationCount * sizeof(VkBool32)); + } + + *pContext = vma_new(this, VmaDefragmentationContext_T)( + this, m_CurrentFrameIndex.load(), info.flags, pStats); + + (*pContext)->AddPools(info.poolCount, info.pPools); + (*pContext)->AddAllocations( + info.allocationCount, info.pAllocations, info.pAllocationsChanged); + + VkResult res = (*pContext)->Defragment( + info.maxCpuBytesToMove, info.maxCpuAllocationsToMove, + info.maxGpuBytesToMove, info.maxGpuAllocationsToMove, + info.commandBuffer, pStats, info.flags); + + if(res != VK_NOT_READY) + { + vma_delete(this, *pContext); + *pContext = VMA_NULL; + } + + return res; +} + +VkResult VmaAllocator_T::DefragmentationEnd( + VmaDefragmentationContext context) +{ + vma_delete(this, context); + return VK_SUCCESS; +} + +VkResult VmaAllocator_T::DefragmentationPassBegin( + VmaDefragmentationPassInfo* pInfo, + VmaDefragmentationContext context) +{ + return context->DefragmentPassBegin(pInfo); +} +VkResult VmaAllocator_T::DefragmentationPassEnd( + VmaDefragmentationContext context) +{ + return context->DefragmentPassEnd(); + +} + +void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo) +{ + if(hAllocation->CanBecomeLost()) + { + /* + Warning: This is a carefully designed algorithm. + Do not modify unless you really know what you're doing :) + */ + const uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load(); + uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex(); + for(;;) + { + if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST) + { + pAllocationInfo->memoryType = UINT32_MAX; + pAllocationInfo->deviceMemory = VK_NULL_HANDLE; + pAllocationInfo->offset = 0; + pAllocationInfo->size = hAllocation->GetSize(); + pAllocationInfo->pMappedData = VMA_NULL; + pAllocationInfo->pUserData = hAllocation->GetUserData(); + return; + } + else if(localLastUseFrameIndex == localCurrFrameIndex) + { + pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex(); + pAllocationInfo->deviceMemory = hAllocation->GetMemory(); + pAllocationInfo->offset = hAllocation->GetOffset(); + pAllocationInfo->size = hAllocation->GetSize(); + pAllocationInfo->pMappedData = VMA_NULL; + pAllocationInfo->pUserData = hAllocation->GetUserData(); + return; + } + else // Last use time earlier than current time. + { + if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) + { + localLastUseFrameIndex = localCurrFrameIndex; + } + } + } + } + else + { +#if VMA_STATS_STRING_ENABLED + uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load(); + uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex(); + for(;;) + { + VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST); + if(localLastUseFrameIndex == localCurrFrameIndex) + { + break; + } + else // Last use time earlier than current time. + { + if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) + { + localLastUseFrameIndex = localCurrFrameIndex; + } + } + } +#endif + + pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex(); + pAllocationInfo->deviceMemory = hAllocation->GetMemory(); + pAllocationInfo->offset = hAllocation->GetOffset(); + pAllocationInfo->size = hAllocation->GetSize(); + pAllocationInfo->pMappedData = hAllocation->GetMappedData(); + pAllocationInfo->pUserData = hAllocation->GetUserData(); + } +} + +bool VmaAllocator_T::TouchAllocation(VmaAllocation hAllocation) +{ + // This is a stripped-down version of VmaAllocator_T::GetAllocationInfo. + if(hAllocation->CanBecomeLost()) + { + uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load(); + uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex(); + for(;;) + { + if(localLastUseFrameIndex == VMA_FRAME_INDEX_LOST) + { + return false; + } + else if(localLastUseFrameIndex == localCurrFrameIndex) + { + return true; + } + else // Last use time earlier than current time. + { + if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) + { + localLastUseFrameIndex = localCurrFrameIndex; + } + } + } + } + else + { +#if VMA_STATS_STRING_ENABLED + uint32_t localCurrFrameIndex = m_CurrentFrameIndex.load(); + uint32_t localLastUseFrameIndex = hAllocation->GetLastUseFrameIndex(); + for(;;) + { + VMA_ASSERT(localLastUseFrameIndex != VMA_FRAME_INDEX_LOST); + if(localLastUseFrameIndex == localCurrFrameIndex) + { + break; + } + else // Last use time earlier than current time. + { + if(hAllocation->CompareExchangeLastUseFrameIndex(localLastUseFrameIndex, localCurrFrameIndex)) + { + localLastUseFrameIndex = localCurrFrameIndex; + } + } + } +#endif + + return true; + } +} + +VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool) +{ + VMA_DEBUG_LOG(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags); + + VmaPoolCreateInfo newCreateInfo = *pCreateInfo; + + if(newCreateInfo.maxBlockCount == 0) + { + newCreateInfo.maxBlockCount = SIZE_MAX; + } + if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + // Memory type index out of range or forbidden. + if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() || + ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex); + + *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize); + + VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks(); + if(res != VK_SUCCESS) + { + vma_delete(this, *pPool); + *pPool = VMA_NULL; + return res; + } + + // Add to m_Pools. + { + VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); + (*pPool)->SetId(m_NextPoolId++); + VmaVectorInsertSorted(m_Pools, *pPool); + } + + return VK_SUCCESS; +} + +void VmaAllocator_T::DestroyPool(VmaPool pool) +{ + // Remove from m_Pools. + { + VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); + bool success = VmaVectorRemoveSorted(m_Pools, pool); + VMA_ASSERT(success && "Pool not found in Allocator."); + } + + vma_delete(this, pool); +} + +void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats) +{ + pool->m_BlockVector.GetPoolStats(pPoolStats); +} + +void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex) +{ + m_CurrentFrameIndex.store(frameIndex); + +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + UpdateVulkanBudget(); + } +#endif // #if VMA_MEMORY_BUDGET +} + +void VmaAllocator_T::MakePoolAllocationsLost( + VmaPool hPool, + size_t* pLostAllocationCount) +{ + hPool->m_BlockVector.MakePoolAllocationsLost( + m_CurrentFrameIndex.load(), + pLostAllocationCount); +} + +VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool) +{ + return hPool->m_BlockVector.CheckCorruption(); +} + +VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) +{ + VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT; + + // Process default pools. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + if(((1u << memTypeIndex) & memoryTypeBits) != 0) + { + VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(pBlockVector); + VkResult localRes = pBlockVector->CheckCorruption(); + switch(localRes) + { + case VK_ERROR_FEATURE_NOT_PRESENT: + break; + case VK_SUCCESS: + finalRes = VK_SUCCESS; + break; + default: + return localRes; + } + } + } + + // Process custom pools. + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + for(size_t poolIndex = 0, poolCount = m_Pools.size(); poolIndex < poolCount; ++poolIndex) + { + if(((1u << m_Pools[poolIndex]->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0) + { + VkResult localRes = m_Pools[poolIndex]->m_BlockVector.CheckCorruption(); + switch(localRes) + { + case VK_ERROR_FEATURE_NOT_PRESENT: + break; + case VK_SUCCESS: + finalRes = VK_SUCCESS; + break; + default: + return localRes; + } + } + } + } + + return finalRes; +} + +void VmaAllocator_T::CreateLostAllocation(VmaAllocation* pAllocation) +{ + *pAllocation = m_AllocationObjectAllocator.Allocate(VMA_FRAME_INDEX_LOST, false); + (*pAllocation)->InitLost(); +} + +VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory) +{ + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex); + + // HeapSizeLimit is in effect for this heap. + if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0) + { + const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; + VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex]; + for(;;) + { + const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize; + if(blockBytesAfterAllocation > heapSize) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation)) + { + break; + } + } + } + else + { + m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize; + } + + // VULKAN CALL vkAllocateMemory. + VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory); + + if(res == VK_SUCCESS) + { +#if VMA_MEMORY_BUDGET + ++m_Budget.m_OperationsSinceBudgetFetch; +#endif + + // Informative callback. + if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL) + { + (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData); + } + } + else + { + m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize; + } + + return res; +} + +void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory) +{ + // Informative callback. + if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL) + { + (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData); + } + + // VULKAN CALL vkFreeMemory. + (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks()); + + m_Budget.m_BlockBytes[MemoryTypeIndexToHeapIndex(memoryType)] -= size; +} + +VkResult VmaAllocator_T::BindVulkanBuffer( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkBuffer buffer, + const void* pNext) +{ + if(pNext != VMA_NULL) + { +#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && + m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL) + { + VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR }; + bindBufferMemoryInfo.pNext = pNext; + bindBufferMemoryInfo.buffer = buffer; + bindBufferMemoryInfo.memory = memory; + bindBufferMemoryInfo.memoryOffset = memoryOffset; + return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); + } + else +#endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + { + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + } + else + { + return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset); + } +} + +VkResult VmaAllocator_T::BindVulkanImage( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkImage image, + const void* pNext) +{ + if(pNext != VMA_NULL) + { +#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && + m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL) + { + VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR }; + bindBufferMemoryInfo.pNext = pNext; + bindBufferMemoryInfo.image = image; + bindBufferMemoryInfo.memory = memory; + bindBufferMemoryInfo.memoryOffset = memoryOffset; + return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); + } + else +#endif // #if VMA_BIND_MEMORY2 + { + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + } + else + { + return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset); + } +} + +VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData) +{ + if(hAllocation->CanBecomeLost()) + { + return VK_ERROR_MEMORY_MAP_FAILED; + } + + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + char *pBytes = VMA_NULL; + VkResult res = pBlock->Map(this, 1, (void**)&pBytes); + if(res == VK_SUCCESS) + { + *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset(); + hAllocation->BlockAllocMap(); + } + return res; + } + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + return hAllocation->DedicatedAllocMap(this, ppData); + default: + VMA_ASSERT(0); + return VK_ERROR_MEMORY_MAP_FAILED; + } +} + +void VmaAllocator_T::Unmap(VmaAllocation hAllocation) +{ + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + hAllocation->BlockAllocUnmap(); + pBlock->Unmap(this, 1); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + hAllocation->DedicatedAllocUnmap(this); + break; + default: + VMA_ASSERT(0); + } +} + +VkResult VmaAllocator_T::BindBufferMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext) +{ + VkResult res = VK_SUCCESS; + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext); + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block. Is the allocation lost?"); + res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext); + break; + } + default: + VMA_ASSERT(0); + } + return res; +} + +VkResult VmaAllocator_T::BindImageMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext) +{ + VkResult res = VK_SUCCESS; + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext); + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); + VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block. Is the allocation lost?"); + res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext); + break; + } + default: + VMA_ASSERT(0); + } + return res; +} + +VkResult VmaAllocator_T::FlushOrInvalidateAllocation( + VmaAllocation hAllocation, + VkDeviceSize offset, VkDeviceSize size, + VMA_CACHE_OPERATION op) +{ + VkResult res = VK_SUCCESS; + + VkMappedMemoryRange memRange = {}; + if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange)) + { + switch(op) + { + case VMA_CACHE_FLUSH: + res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange); + break; + case VMA_CACHE_INVALIDATE: + res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange); + break; + default: + VMA_ASSERT(0); + } + } + // else: Just ignore this call. + return res; +} + +VkResult VmaAllocator_T::FlushOrInvalidateAllocations( + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, const VkDeviceSize* sizes, + VMA_CACHE_OPERATION op) +{ + typedef VmaStlAllocator RangeAllocator; + typedef VmaSmallVector RangeVector; + RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks())); + + for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + const VmaAllocation alloc = allocations[allocIndex]; + const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0; + const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE; + VkMappedMemoryRange newRange; + if(GetFlushOrInvalidateRange(alloc, offset, size, newRange)) + { + ranges.push_back(newRange); + } + } + + VkResult res = VK_SUCCESS; + if(!ranges.empty()) + { + switch(op) + { + case VMA_CACHE_FLUSH: + res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data()); + break; + case VMA_CACHE_INVALIDATE: + res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data()); + break; + default: + VMA_ASSERT(0); + } + } + // else: Just ignore this call. + return res; +} + +void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation) +{ + VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + { + VmaMutexLockWrite lock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); + AllocationVectorType* const pDedicatedAllocations = m_pDedicatedAllocations[memTypeIndex]; + VMA_ASSERT(pDedicatedAllocations); + bool success = VmaVectorRemoveSorted(*pDedicatedAllocations, allocation); + VMA_ASSERT(success); + } + + VkDeviceMemory hMemory = allocation->GetMemory(); + + /* + There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory + before vkFreeMemory. + + if(allocation->GetMappedData() != VMA_NULL) + { + (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); + } + */ + + FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory); + + VMA_DEBUG_LOG(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex); +} + +uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const +{ + VkBufferCreateInfo dummyBufCreateInfo; + VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo); + + uint32_t memoryTypeBits = 0; + + // Create buffer. + VkBuffer buf = VK_NULL_HANDLE; + VkResult res = (*GetVulkanFunctions().vkCreateBuffer)( + m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf); + if(res == VK_SUCCESS) + { + // Query for supported memory types. + VkMemoryRequirements memReq; + (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq); + memoryTypeBits = memReq.memoryTypeBits; + + // Destroy buffer. + (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks()); + } + + return memoryTypeBits; +} + +uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const +{ + // Make sure memory information is already fetched. + VMA_ASSERT(GetMemoryTypeCount() > 0); + + uint32_t memoryTypeBits = UINT32_MAX; + + if(!m_UseAmdDeviceCoherentMemory) + { + // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0) + { + memoryTypeBits &= ~(1u << memTypeIndex); + } + } + } + + return memoryTypeBits; +} + +bool VmaAllocator_T::GetFlushOrInvalidateRange( + VmaAllocation allocation, + VkDeviceSize offset, VkDeviceSize size, + VkMappedMemoryRange& outRange) const +{ + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex)) + { + const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize; + const VkDeviceSize allocationSize = allocation->GetSize(); + VMA_ASSERT(offset <= allocationSize); + + outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; + outRange.pNext = VMA_NULL; + outRange.memory = allocation->GetMemory(); + + switch(allocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); + if(size == VK_WHOLE_SIZE) + { + outRange.size = allocationSize - outRange.offset; + } + else + { + VMA_ASSERT(offset + size <= allocationSize); + outRange.size = VMA_MIN( + VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize), + allocationSize - outRange.offset); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + // 1. Still within this allocation. + outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); + if(size == VK_WHOLE_SIZE) + { + size = allocationSize - offset; + } + else + { + VMA_ASSERT(offset + size <= allocationSize); + } + outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize); + + // 2. Adjust to whole block. + const VkDeviceSize allocationOffset = allocation->GetOffset(); + VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0); + const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize(); + outRange.offset += allocationOffset; + outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset); + + break; + } + default: + VMA_ASSERT(0); + } + return true; + } + return false; +} + +#if VMA_MEMORY_BUDGET + +void VmaAllocator_T::UpdateVulkanBudget() +{ + VMA_ASSERT(m_UseExtMemoryBudget); + + VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR }; + + VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT }; + VmaPnextChainPushFront(&memProps, &budgetProps); + + GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps); + + { + VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex); + + for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) + { + m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex]; + m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex]; + m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load(); + + // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size. + if(m_Budget.m_VulkanBudget[heapIndex] == 0) + { + m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. + } + else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size) + { + m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size; + } + if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0) + { + m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; + } + } + m_Budget.m_OperationsSinceBudgetFetch = 0; + } +} + +#endif // #if VMA_MEMORY_BUDGET + +void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern) +{ + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS && + !hAllocation->CanBecomeLost() && + (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) + { + void* pData = VMA_NULL; + VkResult res = Map(hAllocation, &pData); + if(res == VK_SUCCESS) + { + memset(pData, (int)pattern, (size_t)hAllocation->GetSize()); + FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH); + Unmap(hAllocation); + } + else + { + VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation."); + } + } +} + +uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits() +{ + uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load(); + if(memoryTypeBits == UINT32_MAX) + { + memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits(); + m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits); + } + return memoryTypeBits; +} + +#if VMA_STATS_STRING_ENABLED + +void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) +{ + bool dedicatedAllocationsStarted = false; + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaMutexLockRead dedicatedAllocationsLock(m_DedicatedAllocationsMutex[memTypeIndex], m_UseMutex); + AllocationVectorType* const pDedicatedAllocVector = m_pDedicatedAllocations[memTypeIndex]; + VMA_ASSERT(pDedicatedAllocVector); + if(pDedicatedAllocVector->empty() == false) + { + if(dedicatedAllocationsStarted == false) + { + dedicatedAllocationsStarted = true; + json.WriteString("DedicatedAllocations"); + json.BeginObject(); + } + + json.BeginString("Type "); + json.ContinueString(memTypeIndex); + json.EndString(); + + json.BeginArray(); + + for(size_t i = 0; i < pDedicatedAllocVector->size(); ++i) + { + json.BeginObject(true); + const VmaAllocation hAlloc = (*pDedicatedAllocVector)[i]; + hAlloc->PrintParameters(json); + json.EndObject(); + } + + json.EndArray(); + } + } + if(dedicatedAllocationsStarted) + { + json.EndObject(); + } + + { + bool allocationsStarted = false; + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + if(m_pBlockVectors[memTypeIndex]->IsEmpty() == false) + { + if(allocationsStarted == false) + { + allocationsStarted = true; + json.WriteString("DefaultPools"); + json.BeginObject(); + } + + json.BeginString("Type "); + json.ContinueString(memTypeIndex); + json.EndString(); + + m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json); + } + } + if(allocationsStarted) + { + json.EndObject(); + } + } + + // Custom pools + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + const size_t poolCount = m_Pools.size(); + if(poolCount > 0) + { + json.WriteString("Pools"); + json.BeginObject(); + for(size_t poolIndex = 0; poolIndex < poolCount; ++poolIndex) + { + json.BeginString(); + json.ContinueString(m_Pools[poolIndex]->GetId()); + json.EndString(); + + m_Pools[poolIndex]->m_BlockVector.PrintDetailedMap(json); + } + json.EndObject(); + } + } +} + +#endif // #if VMA_STATS_STRING_ENABLED + +//////////////////////////////////////////////////////////////////////////////// +// Public interface + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( + const VmaAllocatorCreateInfo* pCreateInfo, + VmaAllocator* pAllocator) +{ + VMA_ASSERT(pCreateInfo && pAllocator); + VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 || + (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 3)); + VMA_DEBUG_LOG("vmaCreateAllocator"); + *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo); + return (*pAllocator)->Init(pCreateInfo); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( + VmaAllocator allocator) +{ + if(allocator != VK_NULL_HANDLE) + { + VMA_DEBUG_LOG("vmaDestroyAllocator"); + VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; + vma_delete(&allocationCallbacks, allocator); + } +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo) +{ + VMA_ASSERT(allocator && pAllocatorInfo); + pAllocatorInfo->instance = allocator->m_hInstance; + pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice(); + pAllocatorInfo->device = allocator->m_hDevice; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( + VmaAllocator allocator, + const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties) +{ + VMA_ASSERT(allocator && ppPhysicalDeviceProperties); + *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( + VmaAllocator allocator, + const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties) +{ + VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties); + *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( + VmaAllocator allocator, + uint32_t memoryTypeIndex, + VkMemoryPropertyFlags* pFlags) +{ + VMA_ASSERT(allocator && pFlags); + VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount()); + *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( + VmaAllocator allocator, + uint32_t frameIndex) +{ + VMA_ASSERT(allocator); + VMA_ASSERT(frameIndex != VMA_FRAME_INDEX_LOST); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->SetCurrentFrameIndex(frameIndex); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStats( + VmaAllocator allocator, + VmaStats* pStats) +{ + VMA_ASSERT(allocator && pStats); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + allocator->CalculateStats(pStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetBudget( + VmaAllocator allocator, + VmaBudget* pBudget) +{ + VMA_ASSERT(allocator && pBudget); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + allocator->GetBudget(pBudget, 0, allocator->GetMemoryHeapCount()); +} + +#if VMA_STATS_STRING_ENABLED + +VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( + VmaAllocator allocator, + char** ppStatsString, + VkBool32 detailedMap) +{ + VMA_ASSERT(allocator && ppStatsString); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VmaStringBuilder sb(allocator); + { + VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb); + json.BeginObject(); + + VmaBudget budget[VK_MAX_MEMORY_HEAPS]; + allocator->GetBudget(budget, 0, allocator->GetMemoryHeapCount()); + + VmaStats stats; + allocator->CalculateStats(&stats); + + json.WriteString("Total"); + VmaPrintStatInfo(json, stats.total); + + for(uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex) + { + json.BeginString("Heap "); + json.ContinueString(heapIndex); + json.EndString(); + json.BeginObject(); + + json.WriteString("Size"); + json.WriteNumber(allocator->m_MemProps.memoryHeaps[heapIndex].size); + + json.WriteString("Flags"); + json.BeginArray(true); + if((allocator->m_MemProps.memoryHeaps[heapIndex].flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0) + { + json.WriteString("DEVICE_LOCAL"); + } + json.EndArray(); + + json.WriteString("Budget"); + json.BeginObject(); + { + json.WriteString("BlockBytes"); + json.WriteNumber(budget[heapIndex].blockBytes); + json.WriteString("AllocationBytes"); + json.WriteNumber(budget[heapIndex].allocationBytes); + json.WriteString("Usage"); + json.WriteNumber(budget[heapIndex].usage); + json.WriteString("Budget"); + json.WriteNumber(budget[heapIndex].budget); + } + json.EndObject(); + + if(stats.memoryHeap[heapIndex].blockCount > 0) + { + json.WriteString("Stats"); + VmaPrintStatInfo(json, stats.memoryHeap[heapIndex]); + } + + for(uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex) + { + if(allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex) + { + json.BeginString("Type "); + json.ContinueString(typeIndex); + json.EndString(); + + json.BeginObject(); + + json.WriteString("Flags"); + json.BeginArray(true); + VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags; + if((flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) != 0) + { + json.WriteString("DEVICE_LOCAL"); + } + if((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) + { + json.WriteString("HOST_VISIBLE"); + } + if((flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0) + { + json.WriteString("HOST_COHERENT"); + } + if((flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) != 0) + { + json.WriteString("HOST_CACHED"); + } + if((flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) != 0) + { + json.WriteString("LAZILY_ALLOCATED"); + } + if((flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) != 0) + { + json.WriteString(" PROTECTED"); + } + if((flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0) + { + json.WriteString(" DEVICE_COHERENT"); + } + if((flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) != 0) + { + json.WriteString(" DEVICE_UNCACHED"); + } + json.EndArray(); + + if(stats.memoryType[typeIndex].blockCount > 0) + { + json.WriteString("Stats"); + VmaPrintStatInfo(json, stats.memoryType[typeIndex]); + } + + json.EndObject(); + } + } + + json.EndObject(); + } + if(detailedMap == VK_TRUE) + { + allocator->PrintDetailedMap(json); + } + + json.EndObject(); + } + + const size_t len = sb.GetLength(); + char* const pChars = vma_new_array(allocator, char, len + 1); + if(len > 0) + { + memcpy(pChars, sb.GetData(), len); + } + pChars[len] = '\0'; + *ppStatsString = pChars; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( + VmaAllocator allocator, + char* pStatsString) +{ + if(pStatsString != VMA_NULL) + { + VMA_ASSERT(allocator); + size_t len = strlen(pStatsString); + vma_delete_array(allocator, pStatsString, len + 1); + } +} + +#endif // #if VMA_STATS_STRING_ENABLED + +/* +This function is not protected by any mutex because it just reads immutable data. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( + VmaAllocator allocator, + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + memoryTypeBits &= allocator->GetGlobalMemoryTypeBits(); + + if(pAllocationCreateInfo->memoryTypeBits != 0) + { + memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits; + } + + uint32_t requiredFlags = pAllocationCreateInfo->requiredFlags; + uint32_t preferredFlags = pAllocationCreateInfo->preferredFlags; + uint32_t notPreferredFlags = 0; + + // Convert usage to requiredFlags and preferredFlags. + switch(pAllocationCreateInfo->usage) + { + case VMA_MEMORY_USAGE_UNKNOWN: + break; + case VMA_MEMORY_USAGE_GPU_ONLY: + if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + case VMA_MEMORY_USAGE_CPU_ONLY: + requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + break; + case VMA_MEMORY_USAGE_CPU_TO_GPU: + requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + if(!allocator->IsIntegratedGpu() || (preferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + preferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + case VMA_MEMORY_USAGE_GPU_TO_CPU: + requiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + preferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + break; + case VMA_MEMORY_USAGE_CPU_COPY: + notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + break; + case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED: + requiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT; + break; + default: + VMA_ASSERT(0); + break; + } + + // Avoid DEVICE_COHERENT unless explicitly requested. + if(((pAllocationCreateInfo->requiredFlags | pAllocationCreateInfo->preferredFlags) & + (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0) + { + notPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY; + } + + *pMemoryTypeIndex = UINT32_MAX; + uint32_t minCost = UINT32_MAX; + for(uint32_t memTypeIndex = 0, memTypeBit = 1; + memTypeIndex < allocator->GetMemoryTypeCount(); + ++memTypeIndex, memTypeBit <<= 1) + { + // This memory type is acceptable according to memoryTypeBits bitmask. + if((memTypeBit & memoryTypeBits) != 0) + { + const VkMemoryPropertyFlags currFlags = + allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags; + // This memory type contains requiredFlags. + if((requiredFlags & ~currFlags) == 0) + { + // Calculate cost as number of bits from preferredFlags not present in this memory type. + uint32_t currCost = VmaCountBitsSet(preferredFlags & ~currFlags) + + VmaCountBitsSet(currFlags & notPreferredFlags); + // Remember memory type with lowest cost. + if(currCost < minCost) + { + *pMemoryTypeIndex = memTypeIndex; + if(currCost == 0) + { + return VK_SUCCESS; + } + minCost = currCost; + } + } + } + } + return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pBufferCreateInfo != VMA_NULL); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + const VkDevice hDev = allocator->m_hDevice; + VkBuffer hBuffer = VK_NULL_HANDLE; + VkResult res = allocator->GetVulkanFunctions().vkCreateBuffer( + hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer); + if(res == VK_SUCCESS) + { + VkMemoryRequirements memReq = {}; + allocator->GetVulkanFunctions().vkGetBufferMemoryRequirements( + hDev, hBuffer, &memReq); + + res = vmaFindMemoryTypeIndex( + allocator, + memReq.memoryTypeBits, + pAllocationCreateInfo, + pMemoryTypeIndex); + + allocator->GetVulkanFunctions().vkDestroyBuffer( + hDev, hBuffer, allocator->GetAllocationCallbacks()); + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pImageCreateInfo != VMA_NULL); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + const VkDevice hDev = allocator->m_hDevice; + VkImage hImage = VK_NULL_HANDLE; + VkResult res = allocator->GetVulkanFunctions().vkCreateImage( + hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage); + if(res == VK_SUCCESS) + { + VkMemoryRequirements memReq = {}; + allocator->GetVulkanFunctions().vkGetImageMemoryRequirements( + hDev, hImage, &memReq); + + res = vmaFindMemoryTypeIndex( + allocator, + memReq.memoryTypeBits, + pAllocationCreateInfo, + pMemoryTypeIndex); + + allocator->GetVulkanFunctions().vkDestroyImage( + hDev, hImage, allocator->GetAllocationCallbacks()); + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( + VmaAllocator allocator, + const VmaPoolCreateInfo* pCreateInfo, + VmaPool* pPool) +{ + VMA_ASSERT(allocator && pCreateInfo && pPool); + + VMA_DEBUG_LOG("vmaCreatePool"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult res = allocator->CreatePool(pCreateInfo, pPool); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordCreatePool(allocator->GetCurrentFrameIndex(), *pCreateInfo, *pPool); + } +#endif + + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( + VmaAllocator allocator, + VmaPool pool) +{ + VMA_ASSERT(allocator); + + if(pool == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyPool"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordDestroyPool(allocator->GetCurrentFrameIndex(), pool); + } +#endif + + allocator->DestroyPool(pool); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStats( + VmaAllocator allocator, + VmaPool pool, + VmaPoolStats* pPoolStats) +{ + VMA_ASSERT(allocator && pool && pPoolStats); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->GetPoolStats(pool, pPoolStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaMakePoolAllocationsLost( + VmaAllocator allocator, + VmaPool pool, + size_t* pLostAllocationCount) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordMakePoolAllocationsLost(allocator->GetCurrentFrameIndex(), pool); + } +#endif + + allocator->MakePoolAllocationsLost(pool, pLostAllocationCount); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VMA_DEBUG_LOG("vmaCheckPoolCorruption"); + + return allocator->CheckPoolCorruption(pool); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char** ppName) +{ + VMA_ASSERT(allocator && pool && ppName); + + VMA_DEBUG_LOG("vmaGetPoolName"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *ppName = pool->GetName(); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char* pName) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_LOG("vmaSetPoolName"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + pool->SetName(pName); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordSetPoolName(allocator->GetCurrentFrameIndex(), pool, pName); + } +#endif +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult result = allocator->AllocateMemory( + *pVkMemoryRequirements, + false, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation + VK_NULL_HANDLE, // dedicatedBuffer + UINT32_MAX, // dedicatedBufferUsage + VK_NULL_HANDLE, // dedicatedImage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_UNKNOWN, + 1, // allocationCount + pAllocation); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordAllocateMemory( + allocator->GetCurrentFrameIndex(), + *pVkMemoryRequirements, + *pCreateInfo, + *pAllocation); + } +#endif + + if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + size_t allocationCount, + VmaAllocation* pAllocations, + VmaAllocationInfo* pAllocationInfo) +{ + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations); + + VMA_DEBUG_LOG("vmaAllocateMemoryPages"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult result = allocator->AllocateMemory( + *pVkMemoryRequirements, + false, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation + VK_NULL_HANDLE, // dedicatedBuffer + UINT32_MAX, // dedicatedBufferUsage + VK_NULL_HANDLE, // dedicatedImage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_UNKNOWN, + allocationCount, + pAllocations); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordAllocateMemoryPages( + allocator->GetCurrentFrameIndex(), + *pVkMemoryRequirements, + *pCreateInfo, + (uint64_t)allocationCount, + pAllocations); + } +#endif + + if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + { + for(size_t i = 0; i < allocationCount; ++i) + { + allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i); + } + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( + VmaAllocator allocator, + VkBuffer buffer, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetBufferMemoryRequirements(buffer, vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation); + + VkResult result = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + buffer, // dedicatedBuffer + UINT32_MAX, // dedicatedBufferUsage + VK_NULL_HANDLE, // dedicatedImage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordAllocateMemoryForBuffer( + allocator->GetCurrentFrameIndex(), + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + *pCreateInfo, + *pAllocation); + } +#endif + + if(pAllocationInfo && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( + VmaAllocator allocator, + VkImage image, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemoryForImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetImageMemoryRequirements(image, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + VkResult result = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + VK_NULL_HANDLE, // dedicatedBuffer + UINT32_MAX, // dedicatedBufferUsage + image, // dedicatedImage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN, + 1, // allocationCount + pAllocation); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordAllocateMemoryForImage( + allocator->GetCurrentFrameIndex(), + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + *pCreateInfo, + *pAllocation); + } +#endif + + if(pAllocationInfo && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( + VmaAllocator allocator, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator); + + if(allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaFreeMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordFreeMemory( + allocator->GetCurrentFrameIndex(), + allocation); + } +#endif + + allocator->FreeMemory( + 1, // allocationCount + &allocation); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( + VmaAllocator allocator, + size_t allocationCount, + const VmaAllocation* pAllocations) +{ + if(allocationCount == 0) + { + return; + } + + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaFreeMemoryPages"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordFreeMemoryPages( + allocator->GetCurrentFrameIndex(), + (uint64_t)allocationCount, + pAllocations); + } +#endif + + allocator->FreeMemory(allocationCount, pAllocations); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaResizeAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize newSize) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_LOG("vmaResizeAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->ResizeAllocation(allocation, newSize); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( + VmaAllocator allocator, + VmaAllocation allocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && allocation && pAllocationInfo); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordGetAllocationInfo( + allocator->GetCurrentFrameIndex(), + allocation); + } +#endif + + allocator->GetAllocationInfo(allocation, pAllocationInfo); +} + +VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaTouchAllocation( + VmaAllocator allocator, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordTouchAllocation( + allocator->GetCurrentFrameIndex(), + allocation); + } +#endif + + return allocator->TouchAllocation(allocation); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( + VmaAllocator allocator, + VmaAllocation allocation, + void* pUserData) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocation->SetUserData(allocator, pUserData); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordSetAllocationUserData( + allocator->GetCurrentFrameIndex(), + allocation, + pUserData); + } +#endif +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCreateLostAllocation( + VmaAllocator allocator, + VmaAllocation* pAllocation) +{ + VMA_ASSERT(allocator && pAllocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + + allocator->CreateLostAllocation(pAllocation); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordCreateLostAllocation( + allocator->GetCurrentFrameIndex(), + *pAllocation); + } +#endif +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( + VmaAllocator allocator, + VmaAllocation allocation, + void** ppData) +{ + VMA_ASSERT(allocator && allocation && ppData); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult res = allocator->Map(allocation, ppData); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordMapMemory( + allocator->GetCurrentFrameIndex(), + allocation); + } +#endif + + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( + VmaAllocator allocator, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordUnmapMemory( + allocator->GetCurrentFrameIndex(), + allocation); + } +#endif + + allocator->Unmap(allocation); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_LOG("vmaFlushAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordFlushAllocation( + allocator->GetCurrentFrameIndex(), + allocation, offset, size); + } +#endif + + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(VmaAllocator allocator, VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_LOG("vmaInvalidateAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordInvalidateAllocation( + allocator->GetCurrentFrameIndex(), + allocation, offset, size); + } +#endif + + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( + VmaAllocator allocator, + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, + const VkDeviceSize* sizes) +{ + VMA_ASSERT(allocator); + + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocations); + + VMA_DEBUG_LOG("vmaFlushAllocations"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + //TODO + } +#endif + + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( + VmaAllocator allocator, + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, + const VkDeviceSize* sizes) +{ + VMA_ASSERT(allocator); + + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocations); + + VMA_DEBUG_LOG("vmaInvalidateAllocations"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + //TODO + } +#endif + + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(VmaAllocator allocator, uint32_t memoryTypeBits) +{ + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaCheckCorruption"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->CheckCorruption(memoryTypeBits); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragment( + VmaAllocator allocator, + const VmaAllocation* pAllocations, + size_t allocationCount, + VkBool32* pAllocationsChanged, + const VmaDefragmentationInfo *pDefragmentationInfo, + VmaDefragmentationStats* pDefragmentationStats) +{ + // Deprecated interface, reimplemented using new one. + + VmaDefragmentationInfo2 info2 = {}; + info2.allocationCount = (uint32_t)allocationCount; + info2.pAllocations = pAllocations; + info2.pAllocationsChanged = pAllocationsChanged; + if(pDefragmentationInfo != VMA_NULL) + { + info2.maxCpuAllocationsToMove = pDefragmentationInfo->maxAllocationsToMove; + info2.maxCpuBytesToMove = pDefragmentationInfo->maxBytesToMove; + } + else + { + info2.maxCpuAllocationsToMove = UINT32_MAX; + info2.maxCpuBytesToMove = VK_WHOLE_SIZE; + } + // info2.flags, maxGpuAllocationsToMove, maxGpuBytesToMove, commandBuffer deliberately left zero. + + VmaDefragmentationContext ctx; + VkResult res = vmaDefragmentationBegin(allocator, &info2, pDefragmentationStats, &ctx); + if(res == VK_NOT_READY) + { + res = vmaDefragmentationEnd( allocator, ctx); + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationBegin( + VmaAllocator allocator, + const VmaDefragmentationInfo2* pInfo, + VmaDefragmentationStats* pStats, + VmaDefragmentationContext *pContext) +{ + VMA_ASSERT(allocator && pInfo && pContext); + + // Degenerate case: Nothing to defragment. + if(pInfo->allocationCount == 0 && pInfo->poolCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(pInfo->allocationCount == 0 || pInfo->pAllocations != VMA_NULL); + VMA_ASSERT(pInfo->poolCount == 0 || pInfo->pPools != VMA_NULL); + VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->allocationCount, pInfo->pAllocations)); + VMA_HEAVY_ASSERT(VmaValidatePointerArray(pInfo->poolCount, pInfo->pPools)); + + VMA_DEBUG_LOG("vmaDefragmentationBegin"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult res = allocator->DefragmentationBegin(*pInfo, pStats, pContext); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordDefragmentationBegin( + allocator->GetCurrentFrameIndex(), *pInfo, *pContext); + } +#endif + + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaDefragmentationEnd( + VmaAllocator allocator, + VmaDefragmentationContext context) +{ + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaDefragmentationEnd"); + + if(context != VK_NULL_HANDLE) + { + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordDefragmentationEnd( + allocator->GetCurrentFrameIndex(), context); + } +#endif + + return allocator->DefragmentationEnd(context); + } + else + { + return VK_SUCCESS; + } +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( + VmaAllocator allocator, + VmaDefragmentationContext context, + VmaDefragmentationPassInfo* pInfo + ) +{ + VMA_ASSERT(allocator); + VMA_ASSERT(pInfo); + + VMA_DEBUG_LOG("vmaBeginDefragmentationPass"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if(context == VK_NULL_HANDLE) + { + pInfo->moveCount = 0; + return VK_SUCCESS; + } + + return allocator->DefragmentationPassBegin(pInfo, context); +} +VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( + VmaAllocator allocator, + VmaDefragmentationContext context) +{ + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaEndDefragmentationPass"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if(context == VK_NULL_HANDLE) + return VK_SUCCESS; + + return allocator->DefragmentationPassEnd(context); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkBuffer buffer) +{ + VMA_ASSERT(allocator && allocation && buffer); + + VMA_DEBUG_LOG("vmaBindBufferMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkBuffer buffer, + const void* pNext) +{ + VMA_ASSERT(allocator && allocation && buffer); + + VMA_DEBUG_LOG("vmaBindBufferMemory2"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkImage image) +{ + VMA_ASSERT(allocator && allocation && image); + + VMA_DEBUG_LOG("vmaBindImageMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindImageMemory(allocation, 0, image, VMA_NULL); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkImage image, + const void* pNext) +{ + VMA_ASSERT(allocator && allocation && image); + + VMA_DEBUG_LOG("vmaBindImageMemory2"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkBuffer* pBuffer, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation); + + if(pBufferCreateInfo->size == 0) + { + return VK_ERROR_VALIDATION_FAILED_EXT; + } + if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && + !allocator->m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); + return VK_ERROR_VALIDATION_FAILED_EXT; + } + + VMA_DEBUG_LOG("vmaCreateBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pBuffer = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + + // 1. Create VkBuffer. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( + allocator->m_hDevice, + pBufferCreateInfo, + allocator->GetAllocationCallbacks(), + pBuffer); + if(res >= 0) + { + // 2. vkGetBufferMemoryRequirements. + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + // 3. Allocate memory using allocator. + res = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + *pBuffer, // dedicatedBuffer + pBufferCreateInfo->usage, // dedicatedBufferUsage + VK_NULL_HANDLE, // dedicatedImage + *pAllocationCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordCreateBuffer( + allocator->GetCurrentFrameIndex(), + *pBufferCreateInfo, + *pAllocationCreateInfo, + *pAllocation); + } +#endif + + if(res >= 0) + { + // 3. Bind buffer with memory. + if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL); + } + if(res >= 0) + { + // All steps succeeded. + #if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage); + #endif + if(pAllocationInfo != VMA_NULL) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return VK_SUCCESS; + } + allocator->FreeMemory( + 1, // allocationCount + pAllocation); + *pAllocation = VK_NULL_HANDLE; + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( + VmaAllocator allocator, + VkBuffer buffer, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator); + + if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordDestroyBuffer( + allocator->GetCurrentFrameIndex(), + allocation); + } +#endif + + if(buffer != VK_NULL_HANDLE) + { + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks()); + } + + if(allocation != VK_NULL_HANDLE) + { + allocator->FreeMemory( + 1, // allocationCount + &allocation); + } +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkImage* pImage, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation); + + if(pImageCreateInfo->extent.width == 0 || + pImageCreateInfo->extent.height == 0 || + pImageCreateInfo->extent.depth == 0 || + pImageCreateInfo->mipLevels == 0 || + pImageCreateInfo->arrayLayers == 0) + { + return VK_ERROR_VALIDATION_FAILED_EXT; + } + + VMA_DEBUG_LOG("vmaCreateImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pImage = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + + // 1. Create VkImage. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)( + allocator->m_hDevice, + pImageCreateInfo, + allocator->GetAllocationCallbacks(), + pImage); + if(res >= 0) + { + VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ? + VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL : + VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR; + + // 2. Allocate memory using allocator. + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetImageMemoryRequirements(*pImage, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + res = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + VK_NULL_HANDLE, // dedicatedBuffer + UINT32_MAX, // dedicatedBufferUsage + *pImage, // dedicatedImage + *pAllocationCreateInfo, + suballocType, + 1, // allocationCount + pAllocation); + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordCreateImage( + allocator->GetCurrentFrameIndex(), + *pImageCreateInfo, + *pAllocationCreateInfo, + *pAllocation); + } +#endif + + if(res >= 0) + { + // 3. Bind image with memory. + if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL); + } + if(res >= 0) + { + // All steps succeeded. + #if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage); + #endif + if(pAllocationInfo != VMA_NULL) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return VK_SUCCESS; + } + allocator->FreeMemory( + 1, // allocationCount + pAllocation); + *pAllocation = VK_NULL_HANDLE; + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); + *pImage = VK_NULL_HANDLE; + return res; + } + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); + *pImage = VK_NULL_HANDLE; + return res; + } + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( + VmaAllocator allocator, + VkImage image, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator); + + if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + +#if VMA_RECORDING_ENABLED + if(allocator->GetRecorder() != VMA_NULL) + { + allocator->GetRecorder()->RecordDestroyImage( + allocator->GetCurrentFrameIndex(), + allocation); + } +#endif + + if(image != VK_NULL_HANDLE) + { + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks()); + } + if(allocation != VK_NULL_HANDLE) + { + allocator->FreeMemory( + 1, // allocationCount + &allocation); + } +} + +#endif // #ifdef VMA_IMPLEMENTATION diff --git a/vma-rs/vulkan/vk_icd.h b/vma-rs/vulkan/vk_icd.h new file mode 100644 index 0000000..5dff59a --- /dev/null +++ b/vma-rs/vulkan/vk_icd.h @@ -0,0 +1,183 @@ +// +// File: vk_icd.h +// +/* + * Copyright (c) 2015-2016 The Khronos Group Inc. + * Copyright (c) 2015-2016 Valve Corporation + * Copyright (c) 2015-2016 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +#ifndef VKICD_H +#define VKICD_H + +#include "vulkan.h" +#include + +// Loader-ICD version negotiation API. Versions add the following features: +// Version 0 - Initial. Doesn't support vk_icdGetInstanceProcAddr +// or vk_icdNegotiateLoaderICDInterfaceVersion. +// Version 1 - Add support for vk_icdGetInstanceProcAddr. +// Version 2 - Add Loader/ICD Interface version negotiation +// via vk_icdNegotiateLoaderICDInterfaceVersion. +// Version 3 - Add ICD creation/destruction of KHR_surface objects. +// Version 4 - Add unknown physical device extension qyering via +// vk_icdGetPhysicalDeviceProcAddr. +// Version 5 - Tells ICDs that the loader is now paying attention to the +// application version of Vulkan passed into the ApplicationInfo +// structure during vkCreateInstance. This will tell the ICD +// that if the loader is older, it should automatically fail a +// call for any API version > 1.0. Otherwise, the loader will +// manually determine if it can support the expected version. +#define CURRENT_LOADER_ICD_INTERFACE_VERSION 5 +#define MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION 0 +#define MIN_PHYS_DEV_EXTENSION_ICD_INTERFACE_VERSION 4 +typedef VkResult(VKAPI_PTR *PFN_vkNegotiateLoaderICDInterfaceVersion)(uint32_t *pVersion); + +// This is defined in vk_layer.h which will be found by the loader, but if an ICD is building against this +// file directly, it won't be found. +#ifndef PFN_GetPhysicalDeviceProcAddr +typedef PFN_vkVoidFunction(VKAPI_PTR *PFN_GetPhysicalDeviceProcAddr)(VkInstance instance, const char *pName); +#endif + +/* + * The ICD must reserve space for a pointer for the loader's dispatch + * table, at the start of . + * The ICD must initialize this variable using the SET_LOADER_MAGIC_VALUE macro. + */ + +#define ICD_LOADER_MAGIC 0x01CDC0DE + +typedef union { + uintptr_t loaderMagic; + void *loaderData; +} VK_LOADER_DATA; + +static inline void set_loader_magic_value(void *pNewObject) { + VK_LOADER_DATA *loader_info = (VK_LOADER_DATA *)pNewObject; + loader_info->loaderMagic = ICD_LOADER_MAGIC; +} + +static inline bool valid_loader_magic_value(void *pNewObject) { + const VK_LOADER_DATA *loader_info = (VK_LOADER_DATA *)pNewObject; + return (loader_info->loaderMagic & 0xffffffff) == ICD_LOADER_MAGIC; +} + +/* + * Windows and Linux ICDs will treat VkSurfaceKHR as a pointer to a struct that + * contains the platform-specific connection and surface information. + */ +typedef enum { + VK_ICD_WSI_PLATFORM_MIR, + VK_ICD_WSI_PLATFORM_WAYLAND, + VK_ICD_WSI_PLATFORM_WIN32, + VK_ICD_WSI_PLATFORM_XCB, + VK_ICD_WSI_PLATFORM_XLIB, + VK_ICD_WSI_PLATFORM_ANDROID, + VK_ICD_WSI_PLATFORM_MACOS, + VK_ICD_WSI_PLATFORM_IOS, + VK_ICD_WSI_PLATFORM_DISPLAY, + VK_ICD_WSI_PLATFORM_HEADLESS, + VK_ICD_WSI_PLATFORM_METAL, +} VkIcdWsiPlatform; + +typedef struct { + VkIcdWsiPlatform platform; +} VkIcdSurfaceBase; + +#ifdef VK_USE_PLATFORM_MIR_KHR +typedef struct { + VkIcdSurfaceBase base; + MirConnection *connection; + MirSurface *mirSurface; +} VkIcdSurfaceMir; +#endif // VK_USE_PLATFORM_MIR_KHR + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR +typedef struct { + VkIcdSurfaceBase base; + struct wl_display *display; + struct wl_surface *surface; +} VkIcdSurfaceWayland; +#endif // VK_USE_PLATFORM_WAYLAND_KHR + +#ifdef VK_USE_PLATFORM_WIN32_KHR +typedef struct { + VkIcdSurfaceBase base; + HINSTANCE hinstance; + HWND hwnd; +} VkIcdSurfaceWin32; +#endif // VK_USE_PLATFORM_WIN32_KHR + +#ifdef VK_USE_PLATFORM_XCB_KHR +typedef struct { + VkIcdSurfaceBase base; + xcb_connection_t *connection; + xcb_window_t window; +} VkIcdSurfaceXcb; +#endif // VK_USE_PLATFORM_XCB_KHR + +#ifdef VK_USE_PLATFORM_XLIB_KHR +typedef struct { + VkIcdSurfaceBase base; + Display *dpy; + Window window; +} VkIcdSurfaceXlib; +#endif // VK_USE_PLATFORM_XLIB_KHR + +#ifdef VK_USE_PLATFORM_ANDROID_KHR +typedef struct { + VkIcdSurfaceBase base; + struct ANativeWindow *window; +} VkIcdSurfaceAndroid; +#endif // VK_USE_PLATFORM_ANDROID_KHR + +#ifdef VK_USE_PLATFORM_MACOS_MVK +typedef struct { + VkIcdSurfaceBase base; + const void *pView; +} VkIcdSurfaceMacOS; +#endif // VK_USE_PLATFORM_MACOS_MVK + +#ifdef VK_USE_PLATFORM_IOS_MVK +typedef struct { + VkIcdSurfaceBase base; + const void *pView; +} VkIcdSurfaceIOS; +#endif // VK_USE_PLATFORM_IOS_MVK + +typedef struct { + VkIcdSurfaceBase base; + VkDisplayModeKHR displayMode; + uint32_t planeIndex; + uint32_t planeStackIndex; + VkSurfaceTransformFlagBitsKHR transform; + float globalAlpha; + VkDisplayPlaneAlphaFlagBitsKHR alphaMode; + VkExtent2D imageExtent; +} VkIcdSurfaceDisplay; + +typedef struct { + VkIcdSurfaceBase base; +} VkIcdSurfaceHeadless; + +#ifdef VK_USE_PLATFORM_METAL_EXT +typedef struct { + VkIcdSurfaceBase base; + const CAMetalLayer *pLayer; +} VkIcdSurfaceMetal; +#endif // VK_USE_PLATFORM_METAL_EXT + +#endif // VKICD_H diff --git a/vma-rs/vulkan/vk_layer.h b/vma-rs/vulkan/vk_layer.h new file mode 100644 index 0000000..fa76520 --- /dev/null +++ b/vma-rs/vulkan/vk_layer.h @@ -0,0 +1,202 @@ +// +// File: vk_layer.h +// +/* + * Copyright (c) 2015-2017 The Khronos Group Inc. + * Copyright (c) 2015-2017 Valve Corporation + * Copyright (c) 2015-2017 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* Need to define dispatch table + * Core struct can then have ptr to dispatch table at the top + * Along with object ptrs for current and next OBJ + */ +#pragma once + +#include "vulkan.h" +#if defined(__GNUC__) && __GNUC__ >= 4 +#define VK_LAYER_EXPORT __attribute__((visibility("default"))) +#elif defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590) +#define VK_LAYER_EXPORT __attribute__((visibility("default"))) +#else +#define VK_LAYER_EXPORT +#endif + +#define MAX_NUM_UNKNOWN_EXTS 250 + + // Loader-Layer version negotiation API. Versions add the following features: + // Versions 0/1 - Initial. Doesn't support vk_layerGetPhysicalDeviceProcAddr + // or vk_icdNegotiateLoaderLayerInterfaceVersion. + // Version 2 - Add support for vk_layerGetPhysicalDeviceProcAddr and + // vk_icdNegotiateLoaderLayerInterfaceVersion. +#define CURRENT_LOADER_LAYER_INTERFACE_VERSION 2 +#define MIN_SUPPORTED_LOADER_LAYER_INTERFACE_VERSION 1 + +#define VK_CURRENT_CHAIN_VERSION 1 + +// Typedef for use in the interfaces below +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_GetPhysicalDeviceProcAddr)(VkInstance instance, const char* pName); + +// Version negotiation values +typedef enum VkNegotiateLayerStructType { + LAYER_NEGOTIATE_UNINTIALIZED = 0, + LAYER_NEGOTIATE_INTERFACE_STRUCT = 1, +} VkNegotiateLayerStructType; + +// Version negotiation structures +typedef struct VkNegotiateLayerInterface { + VkNegotiateLayerStructType sType; + void *pNext; + uint32_t loaderLayerInterfaceVersion; + PFN_vkGetInstanceProcAddr pfnGetInstanceProcAddr; + PFN_vkGetDeviceProcAddr pfnGetDeviceProcAddr; + PFN_GetPhysicalDeviceProcAddr pfnGetPhysicalDeviceProcAddr; +} VkNegotiateLayerInterface; + +// Version negotiation functions +typedef VkResult (VKAPI_PTR *PFN_vkNegotiateLoaderLayerInterfaceVersion)(VkNegotiateLayerInterface *pVersionStruct); + +// Function prototype for unknown physical device extension command +typedef VkResult(VKAPI_PTR *PFN_PhysDevExt)(VkPhysicalDevice phys_device); + +// ------------------------------------------------------------------------------------------------ +// CreateInstance and CreateDevice support structures + +/* Sub type of structure for instance and device loader ext of CreateInfo. + * When sType == VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO + * or sType == VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO + * then VkLayerFunction indicates struct type pointed to by pNext + */ +typedef enum VkLayerFunction_ { + VK_LAYER_LINK_INFO = 0, + VK_LOADER_DATA_CALLBACK = 1, + VK_LOADER_LAYER_CREATE_DEVICE_CALLBACK = 2 +} VkLayerFunction; + +typedef struct VkLayerInstanceLink_ { + struct VkLayerInstanceLink_ *pNext; + PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr; + PFN_GetPhysicalDeviceProcAddr pfnNextGetPhysicalDeviceProcAddr; +} VkLayerInstanceLink; + +/* + * When creating the device chain the loader needs to pass + * down information about it's device structure needed at + * the end of the chain. Passing the data via the + * VkLayerDeviceInfo avoids issues with finding the + * exact instance being used. + */ +typedef struct VkLayerDeviceInfo_ { + void *device_info; + PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr; +} VkLayerDeviceInfo; + +typedef VkResult (VKAPI_PTR *PFN_vkSetInstanceLoaderData)(VkInstance instance, + void *object); +typedef VkResult (VKAPI_PTR *PFN_vkSetDeviceLoaderData)(VkDevice device, + void *object); +typedef VkResult (VKAPI_PTR *PFN_vkLayerCreateDevice)(VkInstance instance, VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo, + const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, PFN_vkGetInstanceProcAddr layerGIPA, PFN_vkGetDeviceProcAddr *nextGDPA); +typedef void (VKAPI_PTR *PFN_vkLayerDestroyDevice)(VkDevice physicalDevice, const VkAllocationCallbacks *pAllocator, PFN_vkDestroyDevice destroyFunction); +typedef struct { + VkStructureType sType; // VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO + const void *pNext; + VkLayerFunction function; + union { + VkLayerInstanceLink *pLayerInfo; + PFN_vkSetInstanceLoaderData pfnSetInstanceLoaderData; + struct { + PFN_vkLayerCreateDevice pfnLayerCreateDevice; + PFN_vkLayerDestroyDevice pfnLayerDestroyDevice; + } layerDevice; + } u; +} VkLayerInstanceCreateInfo; + +typedef struct VkLayerDeviceLink_ { + struct VkLayerDeviceLink_ *pNext; + PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr; + PFN_vkGetDeviceProcAddr pfnNextGetDeviceProcAddr; +} VkLayerDeviceLink; + +typedef struct { + VkStructureType sType; // VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO + const void *pNext; + VkLayerFunction function; + union { + VkLayerDeviceLink *pLayerInfo; + PFN_vkSetDeviceLoaderData pfnSetDeviceLoaderData; + } u; +} VkLayerDeviceCreateInfo; + +#ifdef __cplusplus +extern "C" { +#endif + +VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct); + +typedef enum VkChainType { + VK_CHAIN_TYPE_UNKNOWN = 0, + VK_CHAIN_TYPE_ENUMERATE_INSTANCE_EXTENSION_PROPERTIES = 1, + VK_CHAIN_TYPE_ENUMERATE_INSTANCE_LAYER_PROPERTIES = 2, + VK_CHAIN_TYPE_ENUMERATE_INSTANCE_VERSION = 3, +} VkChainType; + +typedef struct VkChainHeader { + VkChainType type; + uint32_t version; + uint32_t size; +} VkChainHeader; + +typedef struct VkEnumerateInstanceExtensionPropertiesChain { + VkChainHeader header; + VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceExtensionPropertiesChain *, const char *, uint32_t *, + VkExtensionProperties *); + const struct VkEnumerateInstanceExtensionPropertiesChain *pNextLink; + +#if defined(__cplusplus) + inline VkResult CallDown(const char *pLayerName, uint32_t *pPropertyCount, VkExtensionProperties *pProperties) const { + return pfnNextLayer(pNextLink, pLayerName, pPropertyCount, pProperties); + } +#endif +} VkEnumerateInstanceExtensionPropertiesChain; + +typedef struct VkEnumerateInstanceLayerPropertiesChain { + VkChainHeader header; + VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceLayerPropertiesChain *, uint32_t *, VkLayerProperties *); + const struct VkEnumerateInstanceLayerPropertiesChain *pNextLink; + +#if defined(__cplusplus) + inline VkResult CallDown(uint32_t *pPropertyCount, VkLayerProperties *pProperties) const { + return pfnNextLayer(pNextLink, pPropertyCount, pProperties); + } +#endif +} VkEnumerateInstanceLayerPropertiesChain; + +typedef struct VkEnumerateInstanceVersionChain { + VkChainHeader header; + VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceVersionChain *, uint32_t *); + const struct VkEnumerateInstanceVersionChain *pNextLink; + +#if defined(__cplusplus) + inline VkResult CallDown(uint32_t *pApiVersion) const { + return pfnNextLayer(pNextLink, pApiVersion); + } +#endif +} VkEnumerateInstanceVersionChain; + +#ifdef __cplusplus +} +#endif diff --git a/vma-rs/vulkan/vk_platform.h b/vma-rs/vulkan/vk_platform.h new file mode 100644 index 0000000..7289299 --- /dev/null +++ b/vma-rs/vulkan/vk_platform.h @@ -0,0 +1,92 @@ +// +// File: vk_platform.h +// +/* +** Copyright (c) 2014-2017 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + + +#ifndef VK_PLATFORM_H_ +#define VK_PLATFORM_H_ + +#ifdef __cplusplus +extern "C" +{ +#endif // __cplusplus + +/* +*************************************************************************************************** +* Platform-specific directives and type declarations +*************************************************************************************************** +*/ + +/* Platform-specific calling convention macros. + * + * Platforms should define these so that Vulkan clients call Vulkan commands + * with the same calling conventions that the Vulkan implementation expects. + * + * VKAPI_ATTR - Placed before the return type in function declarations. + * Useful for C++11 and GCC/Clang-style function attribute syntax. + * VKAPI_CALL - Placed after the return type in function declarations. + * Useful for MSVC-style calling convention syntax. + * VKAPI_PTR - Placed between the '(' and '*' in function pointer types. + * + * Function declaration: VKAPI_ATTR void VKAPI_CALL vkCommand(void); + * Function pointer type: typedef void (VKAPI_PTR *PFN_vkCommand)(void); + */ +#if defined(_WIN32) + // On Windows, Vulkan commands use the stdcall convention + #define VKAPI_ATTR + #define VKAPI_CALL __stdcall + #define VKAPI_PTR VKAPI_CALL +#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH < 7 + #error "Vulkan isn't supported for the 'armeabi' NDK ABI" +#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH >= 7 && defined(__ARM_32BIT_STATE) + // On Android 32-bit ARM targets, Vulkan functions use the "hardfloat" + // calling convention, i.e. float parameters are passed in registers. This + // is true even if the rest of the application passes floats on the stack, + // as it does by default when compiling for the armeabi-v7a NDK ABI. + #define VKAPI_ATTR __attribute__((pcs("aapcs-vfp"))) + #define VKAPI_CALL + #define VKAPI_PTR VKAPI_ATTR +#else + // On other platforms, use the default calling convention + #define VKAPI_ATTR + #define VKAPI_CALL + #define VKAPI_PTR +#endif + +#include + +#if !defined(VK_NO_STDINT_H) + #if defined(_MSC_VER) && (_MSC_VER < 1600) + typedef signed __int8 int8_t; + typedef unsigned __int8 uint8_t; + typedef signed __int16 int16_t; + typedef unsigned __int16 uint16_t; + typedef signed __int32 int32_t; + typedef unsigned __int32 uint32_t; + typedef signed __int64 int64_t; + typedef unsigned __int64 uint64_t; + #else + #include + #endif +#endif // !defined(VK_NO_STDINT_H) + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus + +#endif diff --git a/vma-rs/vulkan/vk_sdk_platform.h b/vma-rs/vulkan/vk_sdk_platform.h new file mode 100644 index 0000000..96d8676 --- /dev/null +++ b/vma-rs/vulkan/vk_sdk_platform.h @@ -0,0 +1,69 @@ +// +// File: vk_sdk_platform.h +// +/* + * Copyright (c) 2015-2016 The Khronos Group Inc. + * Copyright (c) 2015-2016 Valve Corporation + * Copyright (c) 2015-2016 LunarG, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef VK_SDK_PLATFORM_H +#define VK_SDK_PLATFORM_H + +#if defined(_WIN32) +#define NOMINMAX +#ifndef __cplusplus +#undef inline +#define inline __inline +#endif // __cplusplus + +#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) +// C99: +// Microsoft didn't implement C99 in Visual Studio; but started adding it with +// VS2013. However, VS2013 still didn't have snprintf(). The following is a +// work-around (Note: The _CRT_SECURE_NO_WARNINGS macro must be set in the +// "CMakeLists.txt" file). +// NOTE: This is fixed in Visual Studio 2015. +#define snprintf _snprintf +#endif + +#define strdup _strdup + +#endif // _WIN32 + +// Check for noexcept support using clang, with fallback to Windows or GCC version numbers +#ifndef NOEXCEPT +#if defined(__clang__) +#if __has_feature(cxx_noexcept) +#define HAS_NOEXCEPT +#endif +#else +#if defined(__GXX_EXPERIMENTAL_CXX0X__) && __GNUC__ * 10 + __GNUC_MINOR__ >= 46 +#define HAS_NOEXCEPT +#else +#if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023026 && defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS +#define HAS_NOEXCEPT +#endif +#endif +#endif + +#ifdef HAS_NOEXCEPT +#define NOEXCEPT noexcept +#else +#define NOEXCEPT +#endif +#endif + +#endif // VK_SDK_PLATFORM_H diff --git a/vma-rs/vulkan/vulkan.h b/vma-rs/vulkan/vulkan.h new file mode 100644 index 0000000..5f853f9 --- /dev/null +++ b/vma-rs/vulkan/vulkan.h @@ -0,0 +1,86 @@ +#ifndef VULKAN_H_ +#define VULKAN_H_ 1 + +/* +** Copyright (c) 2015-2019 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +#include "vk_platform.h" +#include "vulkan_core.h" + +#ifdef VK_USE_PLATFORM_ANDROID_KHR +#include "vulkan_android.h" +#endif + +#ifdef VK_USE_PLATFORM_FUCHSIA +#include +#include "vulkan_fuchsia.h" +#endif + +#ifdef VK_USE_PLATFORM_IOS_MVK +#include "vulkan_ios.h" +#endif + + +#ifdef VK_USE_PLATFORM_MACOS_MVK +#include "vulkan_macos.h" +#endif + +#ifdef VK_USE_PLATFORM_METAL_EXT +#include "vulkan_metal.h" +#endif + +#ifdef VK_USE_PLATFORM_VI_NN +#include "vulkan_vi.h" +#endif + + +#ifdef VK_USE_PLATFORM_WAYLAND_KHR +#include +#include "vulkan_wayland.h" +#endif + + +#ifdef VK_USE_PLATFORM_WIN32_KHR +#include +#include "vulkan_win32.h" +#endif + + +#ifdef VK_USE_PLATFORM_XCB_KHR +#include +#include "vulkan_xcb.h" +#endif + + +#ifdef VK_USE_PLATFORM_XLIB_KHR +#include +#include "vulkan_xlib.h" +#endif + + +#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT +#include +#include +#include "vulkan_xlib_xrandr.h" +#endif + + +#ifdef VK_USE_PLATFORM_GGP +#include +#include "vulkan_ggp.h" +#endif + +#endif // VULKAN_H_ diff --git a/vma-rs/vulkan/vulkan_android.h b/vma-rs/vulkan/vulkan_android.h new file mode 100644 index 0000000..9b8d3e2 --- /dev/null +++ b/vma-rs/vulkan/vulkan_android.h @@ -0,0 +1,122 @@ +#ifndef VULKAN_ANDROID_H_ +#define VULKAN_ANDROID_H_ 1 + +/* +** Copyright (c) 2015-2019 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_KHR_android_surface 1 +struct ANativeWindow; +#define VK_KHR_ANDROID_SURFACE_SPEC_VERSION 6 +#define VK_KHR_ANDROID_SURFACE_EXTENSION_NAME "VK_KHR_android_surface" +typedef VkFlags VkAndroidSurfaceCreateFlagsKHR; +typedef struct VkAndroidSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkAndroidSurfaceCreateFlagsKHR flags; + struct ANativeWindow* window; +} VkAndroidSurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateAndroidSurfaceKHR)(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateAndroidSurfaceKHR( + VkInstance instance, + const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + + +#define VK_ANDROID_external_memory_android_hardware_buffer 1 +struct AHardwareBuffer; +#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_SPEC_VERSION 3 +#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME "VK_ANDROID_external_memory_android_hardware_buffer" +typedef struct VkAndroidHardwareBufferUsageANDROID { + VkStructureType sType; + void* pNext; + uint64_t androidHardwareBufferUsage; +} VkAndroidHardwareBufferUsageANDROID; + +typedef struct VkAndroidHardwareBufferPropertiesANDROID { + VkStructureType sType; + void* pNext; + VkDeviceSize allocationSize; + uint32_t memoryTypeBits; +} VkAndroidHardwareBufferPropertiesANDROID; + +typedef struct VkAndroidHardwareBufferFormatPropertiesANDROID { + VkStructureType sType; + void* pNext; + VkFormat format; + uint64_t externalFormat; + VkFormatFeatureFlags formatFeatures; + VkComponentMapping samplerYcbcrConversionComponents; + VkSamplerYcbcrModelConversion suggestedYcbcrModel; + VkSamplerYcbcrRange suggestedYcbcrRange; + VkChromaLocation suggestedXChromaOffset; + VkChromaLocation suggestedYChromaOffset; +} VkAndroidHardwareBufferFormatPropertiesANDROID; + +typedef struct VkImportAndroidHardwareBufferInfoANDROID { + VkStructureType sType; + const void* pNext; + struct AHardwareBuffer* buffer; +} VkImportAndroidHardwareBufferInfoANDROID; + +typedef struct VkMemoryGetAndroidHardwareBufferInfoANDROID { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; +} VkMemoryGetAndroidHardwareBufferInfoANDROID; + +typedef struct VkExternalFormatANDROID { + VkStructureType sType; + void* pNext; + uint64_t externalFormat; +} VkExternalFormatANDROID; + +typedef VkResult (VKAPI_PTR *PFN_vkGetAndroidHardwareBufferPropertiesANDROID)(VkDevice device, const struct AHardwareBuffer* buffer, VkAndroidHardwareBufferPropertiesANDROID* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryAndroidHardwareBufferANDROID)(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetAndroidHardwareBufferPropertiesANDROID( + VkDevice device, + const struct AHardwareBuffer* buffer, + VkAndroidHardwareBufferPropertiesANDROID* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryAndroidHardwareBufferANDROID( + VkDevice device, + const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, + struct AHardwareBuffer** pBuffer); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vma-rs/vulkan/vulkan_core.h b/vma-rs/vulkan/vulkan_core.h new file mode 100644 index 0000000..ea96fc4 --- /dev/null +++ b/vma-rs/vulkan/vulkan_core.h @@ -0,0 +1,10722 @@ +#ifndef VULKAN_CORE_H_ +#define VULKAN_CORE_H_ 1 + +/* +** Copyright (c) 2015-2019 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_VERSION_1_0 1 +#include "vk_platform.h" +#define VK_MAKE_VERSION(major, minor, patch) \ + (((major) << 22) | ((minor) << 12) | (patch)) + +// DEPRECATED: This define has been removed. Specific version defines (e.g. VK_API_VERSION_1_0), or the VK_MAKE_VERSION macro, should be used instead. +//#define VK_API_VERSION VK_MAKE_VERSION(1, 0, 0) // Patch version should always be set to 0 + +// Vulkan 1.0 version number +#define VK_API_VERSION_1_0 VK_MAKE_VERSION(1, 0, 0)// Patch version should always be set to 0 + +#define VK_VERSION_MAJOR(version) ((uint32_t)(version) >> 22) +#define VK_VERSION_MINOR(version) (((uint32_t)(version) >> 12) & 0x3ff) +#define VK_VERSION_PATCH(version) ((uint32_t)(version) & 0xfff) +// Version of this file +#define VK_HEADER_VERSION 131 + + +#define VK_NULL_HANDLE 0 + + +#define VK_DEFINE_HANDLE(object) typedef struct object##_T* object; + + +#if !defined(VK_DEFINE_NON_DISPATCHABLE_HANDLE) +#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef struct object##_T *object; +#else + #define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) typedef uint64_t object; +#endif +#endif + +typedef uint32_t VkFlags; +typedef uint32_t VkBool32; +typedef uint64_t VkDeviceSize; +typedef uint32_t VkSampleMask; +VK_DEFINE_HANDLE(VkInstance) +VK_DEFINE_HANDLE(VkPhysicalDevice) +VK_DEFINE_HANDLE(VkDevice) +VK_DEFINE_HANDLE(VkQueue) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSemaphore) +VK_DEFINE_HANDLE(VkCommandBuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFence) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeviceMemory) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImage) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkEvent) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkQueryPool) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkBufferView) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkImageView) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkShaderModule) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineCache) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipelineLayout) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkRenderPass) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPipeline) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSetLayout) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSampler) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorPool) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorSet) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkFramebuffer) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkCommandPool) +#define VK_LOD_CLAMP_NONE 1000.0f +#define VK_REMAINING_MIP_LEVELS (~0U) +#define VK_REMAINING_ARRAY_LAYERS (~0U) +#define VK_WHOLE_SIZE (~0ULL) +#define VK_ATTACHMENT_UNUSED (~0U) +#define VK_TRUE 1 +#define VK_FALSE 0 +#define VK_QUEUE_FAMILY_IGNORED (~0U) +#define VK_SUBPASS_EXTERNAL (~0U) +#define VK_MAX_PHYSICAL_DEVICE_NAME_SIZE 256 +#define VK_UUID_SIZE 16 +#define VK_MAX_MEMORY_TYPES 32 +#define VK_MAX_MEMORY_HEAPS 16 +#define VK_MAX_EXTENSION_NAME_SIZE 256 +#define VK_MAX_DESCRIPTION_SIZE 256 + +typedef enum VkPipelineCacheHeaderVersion { + VK_PIPELINE_CACHE_HEADER_VERSION_ONE = 1, + VK_PIPELINE_CACHE_HEADER_VERSION_BEGIN_RANGE = VK_PIPELINE_CACHE_HEADER_VERSION_ONE, + VK_PIPELINE_CACHE_HEADER_VERSION_END_RANGE = VK_PIPELINE_CACHE_HEADER_VERSION_ONE, + VK_PIPELINE_CACHE_HEADER_VERSION_RANGE_SIZE = (VK_PIPELINE_CACHE_HEADER_VERSION_ONE - VK_PIPELINE_CACHE_HEADER_VERSION_ONE + 1), + VK_PIPELINE_CACHE_HEADER_VERSION_MAX_ENUM = 0x7FFFFFFF +} VkPipelineCacheHeaderVersion; + +typedef enum VkResult { + VK_SUCCESS = 0, + VK_NOT_READY = 1, + VK_TIMEOUT = 2, + VK_EVENT_SET = 3, + VK_EVENT_RESET = 4, + VK_INCOMPLETE = 5, + VK_ERROR_OUT_OF_HOST_MEMORY = -1, + VK_ERROR_OUT_OF_DEVICE_MEMORY = -2, + VK_ERROR_INITIALIZATION_FAILED = -3, + VK_ERROR_DEVICE_LOST = -4, + VK_ERROR_MEMORY_MAP_FAILED = -5, + VK_ERROR_LAYER_NOT_PRESENT = -6, + VK_ERROR_EXTENSION_NOT_PRESENT = -7, + VK_ERROR_FEATURE_NOT_PRESENT = -8, + VK_ERROR_INCOMPATIBLE_DRIVER = -9, + VK_ERROR_TOO_MANY_OBJECTS = -10, + VK_ERROR_FORMAT_NOT_SUPPORTED = -11, + VK_ERROR_FRAGMENTED_POOL = -12, + VK_ERROR_UNKNOWN = -13, + VK_ERROR_OUT_OF_POOL_MEMORY = -1000069000, + VK_ERROR_INVALID_EXTERNAL_HANDLE = -1000072003, + VK_ERROR_FRAGMENTATION = -1000161000, + VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS = -1000257000, + VK_ERROR_SURFACE_LOST_KHR = -1000000000, + VK_ERROR_NATIVE_WINDOW_IN_USE_KHR = -1000000001, + VK_SUBOPTIMAL_KHR = 1000001003, + VK_ERROR_OUT_OF_DATE_KHR = -1000001004, + VK_ERROR_INCOMPATIBLE_DISPLAY_KHR = -1000003001, + VK_ERROR_VALIDATION_FAILED_EXT = -1000011001, + VK_ERROR_INVALID_SHADER_NV = -1000012000, + VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT = -1000158000, + VK_ERROR_NOT_PERMITTED_EXT = -1000174001, + VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT = -1000255000, + VK_ERROR_OUT_OF_POOL_MEMORY_KHR = VK_ERROR_OUT_OF_POOL_MEMORY, + VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR = VK_ERROR_INVALID_EXTERNAL_HANDLE, + VK_ERROR_FRAGMENTATION_EXT = VK_ERROR_FRAGMENTATION, + VK_ERROR_INVALID_DEVICE_ADDRESS_EXT = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS, + VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS_KHR = VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS, + VK_RESULT_BEGIN_RANGE = VK_ERROR_UNKNOWN, + VK_RESULT_END_RANGE = VK_INCOMPLETE, + VK_RESULT_RANGE_SIZE = (VK_INCOMPLETE - VK_ERROR_UNKNOWN + 1), + VK_RESULT_MAX_ENUM = 0x7FFFFFFF +} VkResult; + +typedef enum VkStructureType { + VK_STRUCTURE_TYPE_APPLICATION_INFO = 0, + VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO = 1, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO = 2, + VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 3, + VK_STRUCTURE_TYPE_SUBMIT_INFO = 4, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO = 5, + VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE = 6, + VK_STRUCTURE_TYPE_BIND_SPARSE_INFO = 7, + VK_STRUCTURE_TYPE_FENCE_CREATE_INFO = 8, + VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO = 9, + VK_STRUCTURE_TYPE_EVENT_CREATE_INFO = 10, + VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO = 11, + VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO = 12, + VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO = 13, + VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 14, + VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 15, + VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO = 16, + VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO = 17, + VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO = 18, + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO = 19, + VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO = 20, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO = 21, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO = 22, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO = 23, + VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO = 24, + VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO = 25, + VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO = 26, + VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO = 27, + VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO = 28, + VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 29, + VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO = 30, + VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 31, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO = 32, + VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO = 33, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO = 34, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET = 35, + VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET = 36, + VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO = 37, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO = 38, + VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO = 39, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO = 40, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO = 41, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO = 42, + VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO = 43, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER = 44, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER = 45, + VK_STRUCTURE_TYPE_MEMORY_BARRIER = 46, + VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO = 47, + VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO = 48, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES = 1000094000, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO = 1000157000, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO = 1000157001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES = 1000083000, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS = 1000127000, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO = 1000127001, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO = 1000060000, + VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO = 1000060003, + VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO = 1000060004, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO = 1000060005, + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO = 1000060006, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO = 1000060013, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO = 1000060014, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES = 1000070000, + VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO = 1000070001, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2 = 1000146000, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2 = 1000146001, + VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2 = 1000146002, + VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2 = 1000146003, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2 = 1000146004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2 = 1000059000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2 = 1000059001, + VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2 = 1000059002, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2 = 1000059003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2 = 1000059004, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2 = 1000059005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2 = 1000059006, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2 = 1000059007, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2 = 1000059008, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES = 1000117000, + VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO = 1000117001, + VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO = 1000117002, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO = 1000117003, + VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO = 1000053000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES = 1000053001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES = 1000053002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES = 1000120000, + VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO = 1000145000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES = 1000145001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES = 1000145002, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2 = 1000145003, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO = 1000156000, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO = 1000156001, + VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO = 1000156002, + VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO = 1000156003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES = 1000156004, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES = 1000156005, + VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO = 1000085000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO = 1000071000, + VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES = 1000071001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO = 1000071002, + VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES = 1000071003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES = 1000071004, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO = 1000072000, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO = 1000072001, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO = 1000072002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO = 1000112000, + VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES = 1000112001, + VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO = 1000113000, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO = 1000077000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO = 1000076000, + VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES = 1000076001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES = 1000168000, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT = 1000168001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES = 1000063000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES = 49, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES = 50, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES = 51, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES = 52, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO = 1000147000, + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2 = 1000109000, + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2 = 1000109001, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2 = 1000109002, + VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2 = 1000109003, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2 = 1000109004, + VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO = 1000109005, + VK_STRUCTURE_TYPE_SUBPASS_END_INFO = 1000109006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES = 1000177000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES = 1000196000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES = 1000180000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES = 1000082000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES = 1000197000, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO = 1000161000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES = 1000161001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES = 1000161002, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO = 1000161003, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT = 1000161004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES = 1000199000, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE = 1000199001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES = 1000221000, + VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO = 1000246000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES = 1000130000, + VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO = 1000130001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES = 1000211000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES = 1000108000, + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO = 1000108001, + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO = 1000108002, + VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO = 1000108003, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES = 1000253000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES = 1000175000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES = 1000241000, + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT = 1000241001, + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT = 1000241002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES = 1000261000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES = 1000207000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES = 1000207001, + VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO = 1000207002, + VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO = 1000207003, + VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO = 1000207004, + VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO = 1000207005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES = 1000257000, + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO = 1000244001, + VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO = 1000257002, + VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO = 1000257003, + VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO = 1000257004, + VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR = 1000001000, + VK_STRUCTURE_TYPE_PRESENT_INFO_KHR = 1000001001, + VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR = 1000060007, + VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR = 1000060008, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR = 1000060009, + VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR = 1000060010, + VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR = 1000060011, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR = 1000060012, + VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR = 1000002000, + VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR = 1000002001, + VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR = 1000003000, + VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR = 1000004000, + VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR = 1000005000, + VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR = 1000006000, + VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR = 1000008000, + VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR = 1000009000, + VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT = 1000011000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD = 1000018000, + VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT = 1000022000, + VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_TAG_INFO_EXT = 1000022001, + VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT = 1000022002, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV = 1000026000, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV = 1000026001, + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV = 1000026002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT = 1000028000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT = 1000028001, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT = 1000028002, + VK_STRUCTURE_TYPE_IMAGE_VIEW_HANDLE_INFO_NVX = 1000030000, + VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD = 1000041000, + VK_STRUCTURE_TYPE_STREAM_DESCRIPTOR_SURFACE_CREATE_INFO_GGP = 1000049000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV = 1000050000, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV = 1000056000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV = 1000056001, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057001, + VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV = 1000058000, + VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT = 1000061000, + VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN = 1000062000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT = 1000066000, + VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT = 1000067000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT = 1000067001, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073000, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073001, + VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHR = 1000073002, + VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR = 1000073003, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR = 1000074000, + VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR = 1000074001, + VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR = 1000074002, + VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR = 1000075000, + VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078000, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078001, + VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR = 1000078002, + VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR = 1000078003, + VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR = 1000079000, + VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR = 1000079001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR = 1000080000, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT = 1000081000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT = 1000081001, + VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT = 1000081002, + VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR = 1000084000, + VK_STRUCTURE_TYPE_OBJECT_TABLE_CREATE_INFO_NVX = 1000086000, + VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NVX = 1000086001, + VK_STRUCTURE_TYPE_CMD_PROCESS_COMMANDS_INFO_NVX = 1000086002, + VK_STRUCTURE_TYPE_CMD_RESERVE_SPACE_FOR_COMMANDS_INFO_NVX = 1000086003, + VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_LIMITS_NVX = 1000086004, + VK_STRUCTURE_TYPE_DEVICE_GENERATED_COMMANDS_FEATURES_NVX = 1000086005, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV = 1000087000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT = 1000090000, + VK_STRUCTURE_TYPE_DISPLAY_POWER_INFO_EXT = 1000091000, + VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT = 1000091001, + VK_STRUCTURE_TYPE_DISPLAY_EVENT_INFO_EXT = 1000091002, + VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT = 1000091003, + VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE = 1000092000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX = 1000097000, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV = 1000098000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT = 1000099000, + VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT = 1000099001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT = 1000101000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT = 1000101001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT = 1000102000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT = 1000102001, + VK_STRUCTURE_TYPE_HDR_METADATA_EXT = 1000105000, + VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR = 1000111000, + VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114000, + VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114001, + VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR = 1000114002, + VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR = 1000115000, + VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR = 1000115001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR = 1000116000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR = 1000116001, + VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR = 1000116002, + VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR = 1000116003, + VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR = 1000116004, + VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR = 1000116005, + VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_DESCRIPTION_KHR = 1000116006, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR = 1000119000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR = 1000119001, + VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR = 1000119002, + VK_STRUCTURE_TYPE_DISPLAY_PROPERTIES_2_KHR = 1000121000, + VK_STRUCTURE_TYPE_DISPLAY_PLANE_PROPERTIES_2_KHR = 1000121001, + VK_STRUCTURE_TYPE_DISPLAY_MODE_PROPERTIES_2_KHR = 1000121002, + VK_STRUCTURE_TYPE_DISPLAY_PLANE_INFO_2_KHR = 1000121003, + VK_STRUCTURE_TYPE_DISPLAY_PLANE_CAPABILITIES_2_KHR = 1000121004, + VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK = 1000122000, + VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK = 1000123000, + VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT = 1000128000, + VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_TAG_INFO_EXT = 1000128001, + VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT = 1000128002, + VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT = 1000128003, + VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT = 1000128004, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID = 1000129000, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID = 1000129001, + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID = 1000129002, + VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129003, + VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129004, + VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID = 1000129005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT = 1000138000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT = 1000138001, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT = 1000138002, + VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT = 1000138003, + VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT = 1000143000, + VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT = 1000143001, + VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT = 1000143002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT = 1000143003, + VK_STRUCTURE_TYPE_MULTISAMPLE_PROPERTIES_EXT = 1000143004, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT = 1000148000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT = 1000148001, + VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT = 1000148002, + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV = 1000149000, + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV = 1000152000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV = 1000154000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV = 1000154001, + VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT = 1000158000, + VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT = 1000158001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT = 1000158002, + VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT = 1000158003, + VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT = 1000158004, + VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT = 1000158005, + VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160000, + VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160001, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV = 1000164000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV = 1000164001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV = 1000164002, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV = 1000164005, + VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV = 1000165000, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV = 1000165001, + VK_STRUCTURE_TYPE_GEOMETRY_NV = 1000165003, + VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV = 1000165004, + VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV = 1000165005, + VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV = 1000165006, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV = 1000165007, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV = 1000165008, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV = 1000165009, + VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV = 1000165011, + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV = 1000165012, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV = 1000166000, + VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV = 1000166001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT = 1000170000, + VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT = 1000170001, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT = 1000174000, + VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT = 1000178000, + VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT = 1000178001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT = 1000178002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR = 1000181000, + VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD = 1000183000, + VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT = 1000184000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD = 1000185000, + VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD = 1000189000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT = 1000190000, + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT = 1000190001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT = 1000190002, + VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP = 1000191000, + VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT = 1000192000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV = 1000201000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV = 1000202000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV = 1000202001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV = 1000203000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV = 1000204000, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV = 1000205000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV = 1000205002, + VK_STRUCTURE_TYPE_CHECKPOINT_DATA_NV = 1000206000, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV = 1000206001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL = 1000209000, + VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO_INTEL = 1000210000, + VK_STRUCTURE_TYPE_INITIALIZE_PERFORMANCE_API_INFO_INTEL = 1000210001, + VK_STRUCTURE_TYPE_PERFORMANCE_MARKER_INFO_INTEL = 1000210002, + VK_STRUCTURE_TYPE_PERFORMANCE_STREAM_MARKER_INFO_INTEL = 1000210003, + VK_STRUCTURE_TYPE_PERFORMANCE_OVERRIDE_INFO_INTEL = 1000210004, + VK_STRUCTURE_TYPE_PERFORMANCE_CONFIGURATION_ACQUIRE_INFO_INTEL = 1000210005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT = 1000212000, + VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD = 1000213000, + VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD = 1000213001, + VK_STRUCTURE_TYPE_IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA = 1000214000, + VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT = 1000217000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT = 1000218000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT = 1000218001, + VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT = 1000218002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT = 1000225000, + VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT = 1000225001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT = 1000225002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD = 1000227000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD = 1000229000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT = 1000237000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT = 1000238000, + VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT = 1000238001, + VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR = 1000239000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV = 1000240000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT = 1000244000, + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT = 1000244002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES_EXT = 1000245000, + VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT = 1000247000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV = 1000249000, + VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_PROPERTIES_NV = 1000249001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV = 1000249002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV = 1000250000, + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV = 1000250001, + VK_STRUCTURE_TYPE_FRAMEBUFFER_MIXED_SAMPLES_COMBINATION_NV = 1000250002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT = 1000251000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT = 1000252000, + VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT = 1000255000, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT = 1000255002, + VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT = 1000255001, + VK_STRUCTURE_TYPE_HEADLESS_SURFACE_CREATE_INFO_EXT = 1000256000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT = 1000259000, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT = 1000259001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT = 1000259002, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT = 1000265000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR = 1000269000, + VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR = 1000269001, + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_PROPERTIES_KHR = 1000269002, + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INFO_KHR = 1000269003, + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_STATISTIC_KHR = 1000269004, + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR = 1000269005, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT = 1000276000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT = 1000281000, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT = 1000281001, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES, + VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT, + VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2, + VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2, + VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2_KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO, + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES, + VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO, + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES, + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT16_INT8_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES, + VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO, + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES2_EXT = VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES, + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO, + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO_KHR = VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO, + VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO, + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2, + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2_KHR = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2, + VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2_KHR = VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2, + VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO_KHR = VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO, + VK_STRUCTURE_TYPE_SUBPASS_END_INFO_KHR = VK_STRUCTURE_TYPE_SUBPASS_END_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO, + VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES, + VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES, + VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO, + VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS, + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES, + VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2, + VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2_KHR = VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2, + VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2, + VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2_KHR = VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2, + VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO, + VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO, + VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO_KHR = VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES, + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES_KHR = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT_EXT = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT_KHR = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES, + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE_KHR = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES, + VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO, + VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO_KHR = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO, + VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO, + VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO_KHR = VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES, + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT, + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT_KHR = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_ADDRESS_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT, + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO_EXT = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, + VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO_EXT = VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_KHR = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES, + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO_KHR = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, + VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO_KHR = VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO, + VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO_KHR = VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO, + VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO_KHR = VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO, + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES_EXT = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES, + VK_STRUCTURE_TYPE_BEGIN_RANGE = VK_STRUCTURE_TYPE_APPLICATION_INFO, + VK_STRUCTURE_TYPE_END_RANGE = VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO, + VK_STRUCTURE_TYPE_RANGE_SIZE = (VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO - VK_STRUCTURE_TYPE_APPLICATION_INFO + 1), + VK_STRUCTURE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkStructureType; + +typedef enum VkSystemAllocationScope { + VK_SYSTEM_ALLOCATION_SCOPE_COMMAND = 0, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT = 1, + VK_SYSTEM_ALLOCATION_SCOPE_CACHE = 2, + VK_SYSTEM_ALLOCATION_SCOPE_DEVICE = 3, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE = 4, + VK_SYSTEM_ALLOCATION_SCOPE_BEGIN_RANGE = VK_SYSTEM_ALLOCATION_SCOPE_COMMAND, + VK_SYSTEM_ALLOCATION_SCOPE_END_RANGE = VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE, + VK_SYSTEM_ALLOCATION_SCOPE_RANGE_SIZE = (VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE - VK_SYSTEM_ALLOCATION_SCOPE_COMMAND + 1), + VK_SYSTEM_ALLOCATION_SCOPE_MAX_ENUM = 0x7FFFFFFF +} VkSystemAllocationScope; + +typedef enum VkInternalAllocationType { + VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE = 0, + VK_INTERNAL_ALLOCATION_TYPE_BEGIN_RANGE = VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE, + VK_INTERNAL_ALLOCATION_TYPE_END_RANGE = VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE, + VK_INTERNAL_ALLOCATION_TYPE_RANGE_SIZE = (VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE - VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE + 1), + VK_INTERNAL_ALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkInternalAllocationType; + +typedef enum VkFormat { + VK_FORMAT_UNDEFINED = 0, + VK_FORMAT_R4G4_UNORM_PACK8 = 1, + VK_FORMAT_R4G4B4A4_UNORM_PACK16 = 2, + VK_FORMAT_B4G4R4A4_UNORM_PACK16 = 3, + VK_FORMAT_R5G6B5_UNORM_PACK16 = 4, + VK_FORMAT_B5G6R5_UNORM_PACK16 = 5, + VK_FORMAT_R5G5B5A1_UNORM_PACK16 = 6, + VK_FORMAT_B5G5R5A1_UNORM_PACK16 = 7, + VK_FORMAT_A1R5G5B5_UNORM_PACK16 = 8, + VK_FORMAT_R8_UNORM = 9, + VK_FORMAT_R8_SNORM = 10, + VK_FORMAT_R8_USCALED = 11, + VK_FORMAT_R8_SSCALED = 12, + VK_FORMAT_R8_UINT = 13, + VK_FORMAT_R8_SINT = 14, + VK_FORMAT_R8_SRGB = 15, + VK_FORMAT_R8G8_UNORM = 16, + VK_FORMAT_R8G8_SNORM = 17, + VK_FORMAT_R8G8_USCALED = 18, + VK_FORMAT_R8G8_SSCALED = 19, + VK_FORMAT_R8G8_UINT = 20, + VK_FORMAT_R8G8_SINT = 21, + VK_FORMAT_R8G8_SRGB = 22, + VK_FORMAT_R8G8B8_UNORM = 23, + VK_FORMAT_R8G8B8_SNORM = 24, + VK_FORMAT_R8G8B8_USCALED = 25, + VK_FORMAT_R8G8B8_SSCALED = 26, + VK_FORMAT_R8G8B8_UINT = 27, + VK_FORMAT_R8G8B8_SINT = 28, + VK_FORMAT_R8G8B8_SRGB = 29, + VK_FORMAT_B8G8R8_UNORM = 30, + VK_FORMAT_B8G8R8_SNORM = 31, + VK_FORMAT_B8G8R8_USCALED = 32, + VK_FORMAT_B8G8R8_SSCALED = 33, + VK_FORMAT_B8G8R8_UINT = 34, + VK_FORMAT_B8G8R8_SINT = 35, + VK_FORMAT_B8G8R8_SRGB = 36, + VK_FORMAT_R8G8B8A8_UNORM = 37, + VK_FORMAT_R8G8B8A8_SNORM = 38, + VK_FORMAT_R8G8B8A8_USCALED = 39, + VK_FORMAT_R8G8B8A8_SSCALED = 40, + VK_FORMAT_R8G8B8A8_UINT = 41, + VK_FORMAT_R8G8B8A8_SINT = 42, + VK_FORMAT_R8G8B8A8_SRGB = 43, + VK_FORMAT_B8G8R8A8_UNORM = 44, + VK_FORMAT_B8G8R8A8_SNORM = 45, + VK_FORMAT_B8G8R8A8_USCALED = 46, + VK_FORMAT_B8G8R8A8_SSCALED = 47, + VK_FORMAT_B8G8R8A8_UINT = 48, + VK_FORMAT_B8G8R8A8_SINT = 49, + VK_FORMAT_B8G8R8A8_SRGB = 50, + VK_FORMAT_A8B8G8R8_UNORM_PACK32 = 51, + VK_FORMAT_A8B8G8R8_SNORM_PACK32 = 52, + VK_FORMAT_A8B8G8R8_USCALED_PACK32 = 53, + VK_FORMAT_A8B8G8R8_SSCALED_PACK32 = 54, + VK_FORMAT_A8B8G8R8_UINT_PACK32 = 55, + VK_FORMAT_A8B8G8R8_SINT_PACK32 = 56, + VK_FORMAT_A8B8G8R8_SRGB_PACK32 = 57, + VK_FORMAT_A2R10G10B10_UNORM_PACK32 = 58, + VK_FORMAT_A2R10G10B10_SNORM_PACK32 = 59, + VK_FORMAT_A2R10G10B10_USCALED_PACK32 = 60, + VK_FORMAT_A2R10G10B10_SSCALED_PACK32 = 61, + VK_FORMAT_A2R10G10B10_UINT_PACK32 = 62, + VK_FORMAT_A2R10G10B10_SINT_PACK32 = 63, + VK_FORMAT_A2B10G10R10_UNORM_PACK32 = 64, + VK_FORMAT_A2B10G10R10_SNORM_PACK32 = 65, + VK_FORMAT_A2B10G10R10_USCALED_PACK32 = 66, + VK_FORMAT_A2B10G10R10_SSCALED_PACK32 = 67, + VK_FORMAT_A2B10G10R10_UINT_PACK32 = 68, + VK_FORMAT_A2B10G10R10_SINT_PACK32 = 69, + VK_FORMAT_R16_UNORM = 70, + VK_FORMAT_R16_SNORM = 71, + VK_FORMAT_R16_USCALED = 72, + VK_FORMAT_R16_SSCALED = 73, + VK_FORMAT_R16_UINT = 74, + VK_FORMAT_R16_SINT = 75, + VK_FORMAT_R16_SFLOAT = 76, + VK_FORMAT_R16G16_UNORM = 77, + VK_FORMAT_R16G16_SNORM = 78, + VK_FORMAT_R16G16_USCALED = 79, + VK_FORMAT_R16G16_SSCALED = 80, + VK_FORMAT_R16G16_UINT = 81, + VK_FORMAT_R16G16_SINT = 82, + VK_FORMAT_R16G16_SFLOAT = 83, + VK_FORMAT_R16G16B16_UNORM = 84, + VK_FORMAT_R16G16B16_SNORM = 85, + VK_FORMAT_R16G16B16_USCALED = 86, + VK_FORMAT_R16G16B16_SSCALED = 87, + VK_FORMAT_R16G16B16_UINT = 88, + VK_FORMAT_R16G16B16_SINT = 89, + VK_FORMAT_R16G16B16_SFLOAT = 90, + VK_FORMAT_R16G16B16A16_UNORM = 91, + VK_FORMAT_R16G16B16A16_SNORM = 92, + VK_FORMAT_R16G16B16A16_USCALED = 93, + VK_FORMAT_R16G16B16A16_SSCALED = 94, + VK_FORMAT_R16G16B16A16_UINT = 95, + VK_FORMAT_R16G16B16A16_SINT = 96, + VK_FORMAT_R16G16B16A16_SFLOAT = 97, + VK_FORMAT_R32_UINT = 98, + VK_FORMAT_R32_SINT = 99, + VK_FORMAT_R32_SFLOAT = 100, + VK_FORMAT_R32G32_UINT = 101, + VK_FORMAT_R32G32_SINT = 102, + VK_FORMAT_R32G32_SFLOAT = 103, + VK_FORMAT_R32G32B32_UINT = 104, + VK_FORMAT_R32G32B32_SINT = 105, + VK_FORMAT_R32G32B32_SFLOAT = 106, + VK_FORMAT_R32G32B32A32_UINT = 107, + VK_FORMAT_R32G32B32A32_SINT = 108, + VK_FORMAT_R32G32B32A32_SFLOAT = 109, + VK_FORMAT_R64_UINT = 110, + VK_FORMAT_R64_SINT = 111, + VK_FORMAT_R64_SFLOAT = 112, + VK_FORMAT_R64G64_UINT = 113, + VK_FORMAT_R64G64_SINT = 114, + VK_FORMAT_R64G64_SFLOAT = 115, + VK_FORMAT_R64G64B64_UINT = 116, + VK_FORMAT_R64G64B64_SINT = 117, + VK_FORMAT_R64G64B64_SFLOAT = 118, + VK_FORMAT_R64G64B64A64_UINT = 119, + VK_FORMAT_R64G64B64A64_SINT = 120, + VK_FORMAT_R64G64B64A64_SFLOAT = 121, + VK_FORMAT_B10G11R11_UFLOAT_PACK32 = 122, + VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 = 123, + VK_FORMAT_D16_UNORM = 124, + VK_FORMAT_X8_D24_UNORM_PACK32 = 125, + VK_FORMAT_D32_SFLOAT = 126, + VK_FORMAT_S8_UINT = 127, + VK_FORMAT_D16_UNORM_S8_UINT = 128, + VK_FORMAT_D24_UNORM_S8_UINT = 129, + VK_FORMAT_D32_SFLOAT_S8_UINT = 130, + VK_FORMAT_BC1_RGB_UNORM_BLOCK = 131, + VK_FORMAT_BC1_RGB_SRGB_BLOCK = 132, + VK_FORMAT_BC1_RGBA_UNORM_BLOCK = 133, + VK_FORMAT_BC1_RGBA_SRGB_BLOCK = 134, + VK_FORMAT_BC2_UNORM_BLOCK = 135, + VK_FORMAT_BC2_SRGB_BLOCK = 136, + VK_FORMAT_BC3_UNORM_BLOCK = 137, + VK_FORMAT_BC3_SRGB_BLOCK = 138, + VK_FORMAT_BC4_UNORM_BLOCK = 139, + VK_FORMAT_BC4_SNORM_BLOCK = 140, + VK_FORMAT_BC5_UNORM_BLOCK = 141, + VK_FORMAT_BC5_SNORM_BLOCK = 142, + VK_FORMAT_BC6H_UFLOAT_BLOCK = 143, + VK_FORMAT_BC6H_SFLOAT_BLOCK = 144, + VK_FORMAT_BC7_UNORM_BLOCK = 145, + VK_FORMAT_BC7_SRGB_BLOCK = 146, + VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK = 147, + VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK = 148, + VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK = 149, + VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK = 150, + VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK = 151, + VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK = 152, + VK_FORMAT_EAC_R11_UNORM_BLOCK = 153, + VK_FORMAT_EAC_R11_SNORM_BLOCK = 154, + VK_FORMAT_EAC_R11G11_UNORM_BLOCK = 155, + VK_FORMAT_EAC_R11G11_SNORM_BLOCK = 156, + VK_FORMAT_ASTC_4x4_UNORM_BLOCK = 157, + VK_FORMAT_ASTC_4x4_SRGB_BLOCK = 158, + VK_FORMAT_ASTC_5x4_UNORM_BLOCK = 159, + VK_FORMAT_ASTC_5x4_SRGB_BLOCK = 160, + VK_FORMAT_ASTC_5x5_UNORM_BLOCK = 161, + VK_FORMAT_ASTC_5x5_SRGB_BLOCK = 162, + VK_FORMAT_ASTC_6x5_UNORM_BLOCK = 163, + VK_FORMAT_ASTC_6x5_SRGB_BLOCK = 164, + VK_FORMAT_ASTC_6x6_UNORM_BLOCK = 165, + VK_FORMAT_ASTC_6x6_SRGB_BLOCK = 166, + VK_FORMAT_ASTC_8x5_UNORM_BLOCK = 167, + VK_FORMAT_ASTC_8x5_SRGB_BLOCK = 168, + VK_FORMAT_ASTC_8x6_UNORM_BLOCK = 169, + VK_FORMAT_ASTC_8x6_SRGB_BLOCK = 170, + VK_FORMAT_ASTC_8x8_UNORM_BLOCK = 171, + VK_FORMAT_ASTC_8x8_SRGB_BLOCK = 172, + VK_FORMAT_ASTC_10x5_UNORM_BLOCK = 173, + VK_FORMAT_ASTC_10x5_SRGB_BLOCK = 174, + VK_FORMAT_ASTC_10x6_UNORM_BLOCK = 175, + VK_FORMAT_ASTC_10x6_SRGB_BLOCK = 176, + VK_FORMAT_ASTC_10x8_UNORM_BLOCK = 177, + VK_FORMAT_ASTC_10x8_SRGB_BLOCK = 178, + VK_FORMAT_ASTC_10x10_UNORM_BLOCK = 179, + VK_FORMAT_ASTC_10x10_SRGB_BLOCK = 180, + VK_FORMAT_ASTC_12x10_UNORM_BLOCK = 181, + VK_FORMAT_ASTC_12x10_SRGB_BLOCK = 182, + VK_FORMAT_ASTC_12x12_UNORM_BLOCK = 183, + VK_FORMAT_ASTC_12x12_SRGB_BLOCK = 184, + VK_FORMAT_G8B8G8R8_422_UNORM = 1000156000, + VK_FORMAT_B8G8R8G8_422_UNORM = 1000156001, + VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM = 1000156002, + VK_FORMAT_G8_B8R8_2PLANE_420_UNORM = 1000156003, + VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM = 1000156004, + VK_FORMAT_G8_B8R8_2PLANE_422_UNORM = 1000156005, + VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM = 1000156006, + VK_FORMAT_R10X6_UNORM_PACK16 = 1000156007, + VK_FORMAT_R10X6G10X6_UNORM_2PACK16 = 1000156008, + VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16 = 1000156009, + VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16 = 1000156010, + VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16 = 1000156011, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16 = 1000156012, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 = 1000156013, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16 = 1000156014, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16 = 1000156015, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16 = 1000156016, + VK_FORMAT_R12X4_UNORM_PACK16 = 1000156017, + VK_FORMAT_R12X4G12X4_UNORM_2PACK16 = 1000156018, + VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16 = 1000156019, + VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16 = 1000156020, + VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16 = 1000156021, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16 = 1000156022, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16 = 1000156023, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16 = 1000156024, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16 = 1000156025, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16 = 1000156026, + VK_FORMAT_G16B16G16R16_422_UNORM = 1000156027, + VK_FORMAT_B16G16R16G16_422_UNORM = 1000156028, + VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM = 1000156029, + VK_FORMAT_G16_B16R16_2PLANE_420_UNORM = 1000156030, + VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM = 1000156031, + VK_FORMAT_G16_B16R16_2PLANE_422_UNORM = 1000156032, + VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM = 1000156033, + VK_FORMAT_PVRTC1_2BPP_UNORM_BLOCK_IMG = 1000054000, + VK_FORMAT_PVRTC1_4BPP_UNORM_BLOCK_IMG = 1000054001, + VK_FORMAT_PVRTC2_2BPP_UNORM_BLOCK_IMG = 1000054002, + VK_FORMAT_PVRTC2_4BPP_UNORM_BLOCK_IMG = 1000054003, + VK_FORMAT_PVRTC1_2BPP_SRGB_BLOCK_IMG = 1000054004, + VK_FORMAT_PVRTC1_4BPP_SRGB_BLOCK_IMG = 1000054005, + VK_FORMAT_PVRTC2_2BPP_SRGB_BLOCK_IMG = 1000054006, + VK_FORMAT_PVRTC2_4BPP_SRGB_BLOCK_IMG = 1000054007, + VK_FORMAT_ASTC_4x4_SFLOAT_BLOCK_EXT = 1000066000, + VK_FORMAT_ASTC_5x4_SFLOAT_BLOCK_EXT = 1000066001, + VK_FORMAT_ASTC_5x5_SFLOAT_BLOCK_EXT = 1000066002, + VK_FORMAT_ASTC_6x5_SFLOAT_BLOCK_EXT = 1000066003, + VK_FORMAT_ASTC_6x6_SFLOAT_BLOCK_EXT = 1000066004, + VK_FORMAT_ASTC_8x5_SFLOAT_BLOCK_EXT = 1000066005, + VK_FORMAT_ASTC_8x6_SFLOAT_BLOCK_EXT = 1000066006, + VK_FORMAT_ASTC_8x8_SFLOAT_BLOCK_EXT = 1000066007, + VK_FORMAT_ASTC_10x5_SFLOAT_BLOCK_EXT = 1000066008, + VK_FORMAT_ASTC_10x6_SFLOAT_BLOCK_EXT = 1000066009, + VK_FORMAT_ASTC_10x8_SFLOAT_BLOCK_EXT = 1000066010, + VK_FORMAT_ASTC_10x10_SFLOAT_BLOCK_EXT = 1000066011, + VK_FORMAT_ASTC_12x10_SFLOAT_BLOCK_EXT = 1000066012, + VK_FORMAT_ASTC_12x12_SFLOAT_BLOCK_EXT = 1000066013, + VK_FORMAT_G8B8G8R8_422_UNORM_KHR = VK_FORMAT_G8B8G8R8_422_UNORM, + VK_FORMAT_B8G8R8G8_422_UNORM_KHR = VK_FORMAT_B8G8R8G8_422_UNORM, + VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM, + VK_FORMAT_G8_B8R8_2PLANE_420_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_420_UNORM, + VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_422_UNORM, + VK_FORMAT_G8_B8R8_2PLANE_422_UNORM_KHR = VK_FORMAT_G8_B8R8_2PLANE_422_UNORM, + VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM_KHR = VK_FORMAT_G8_B8_R8_3PLANE_444_UNORM, + VK_FORMAT_R10X6_UNORM_PACK16_KHR = VK_FORMAT_R10X6_UNORM_PACK16, + VK_FORMAT_R10X6G10X6_UNORM_2PACK16_KHR = VK_FORMAT_R10X6G10X6_UNORM_2PACK16, + VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16_KHR = VK_FORMAT_R10X6G10X6B10X6A10X6_UNORM_4PACK16, + VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_G10X6B10X6G10X6R10X6_422_UNORM_4PACK16, + VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16_KHR = VK_FORMAT_B10X6G10X6R10X6G10X6_422_UNORM_4PACK16, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16, + VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16, + VK_FORMAT_R12X4_UNORM_PACK16_KHR = VK_FORMAT_R12X4_UNORM_PACK16, + VK_FORMAT_R12X4G12X4_UNORM_2PACK16_KHR = VK_FORMAT_R12X4G12X4_UNORM_2PACK16, + VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16_KHR = VK_FORMAT_R12X4G12X4B12X4A12X4_UNORM_4PACK16, + VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_G12X4B12X4G12X4R12X4_422_UNORM_4PACK16, + VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16_KHR = VK_FORMAT_B12X4G12X4R12X4G12X4_422_UNORM_4PACK16, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16, + VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16_KHR = VK_FORMAT_G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16, + VK_FORMAT_G16B16G16R16_422_UNORM_KHR = VK_FORMAT_G16B16G16R16_422_UNORM, + VK_FORMAT_B16G16R16G16_422_UNORM_KHR = VK_FORMAT_B16G16R16G16_422_UNORM, + VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_420_UNORM, + VK_FORMAT_G16_B16R16_2PLANE_420_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_420_UNORM, + VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_422_UNORM, + VK_FORMAT_G16_B16R16_2PLANE_422_UNORM_KHR = VK_FORMAT_G16_B16R16_2PLANE_422_UNORM, + VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM_KHR = VK_FORMAT_G16_B16_R16_3PLANE_444_UNORM, + VK_FORMAT_BEGIN_RANGE = VK_FORMAT_UNDEFINED, + VK_FORMAT_END_RANGE = VK_FORMAT_ASTC_12x12_SRGB_BLOCK, + VK_FORMAT_RANGE_SIZE = (VK_FORMAT_ASTC_12x12_SRGB_BLOCK - VK_FORMAT_UNDEFINED + 1), + VK_FORMAT_MAX_ENUM = 0x7FFFFFFF +} VkFormat; + +typedef enum VkImageType { + VK_IMAGE_TYPE_1D = 0, + VK_IMAGE_TYPE_2D = 1, + VK_IMAGE_TYPE_3D = 2, + VK_IMAGE_TYPE_BEGIN_RANGE = VK_IMAGE_TYPE_1D, + VK_IMAGE_TYPE_END_RANGE = VK_IMAGE_TYPE_3D, + VK_IMAGE_TYPE_RANGE_SIZE = (VK_IMAGE_TYPE_3D - VK_IMAGE_TYPE_1D + 1), + VK_IMAGE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkImageType; + +typedef enum VkImageTiling { + VK_IMAGE_TILING_OPTIMAL = 0, + VK_IMAGE_TILING_LINEAR = 1, + VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT = 1000158000, + VK_IMAGE_TILING_BEGIN_RANGE = VK_IMAGE_TILING_OPTIMAL, + VK_IMAGE_TILING_END_RANGE = VK_IMAGE_TILING_LINEAR, + VK_IMAGE_TILING_RANGE_SIZE = (VK_IMAGE_TILING_LINEAR - VK_IMAGE_TILING_OPTIMAL + 1), + VK_IMAGE_TILING_MAX_ENUM = 0x7FFFFFFF +} VkImageTiling; + +typedef enum VkPhysicalDeviceType { + VK_PHYSICAL_DEVICE_TYPE_OTHER = 0, + VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU = 1, + VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU = 2, + VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU = 3, + VK_PHYSICAL_DEVICE_TYPE_CPU = 4, + VK_PHYSICAL_DEVICE_TYPE_BEGIN_RANGE = VK_PHYSICAL_DEVICE_TYPE_OTHER, + VK_PHYSICAL_DEVICE_TYPE_END_RANGE = VK_PHYSICAL_DEVICE_TYPE_CPU, + VK_PHYSICAL_DEVICE_TYPE_RANGE_SIZE = (VK_PHYSICAL_DEVICE_TYPE_CPU - VK_PHYSICAL_DEVICE_TYPE_OTHER + 1), + VK_PHYSICAL_DEVICE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkPhysicalDeviceType; + +typedef enum VkQueryType { + VK_QUERY_TYPE_OCCLUSION = 0, + VK_QUERY_TYPE_PIPELINE_STATISTICS = 1, + VK_QUERY_TYPE_TIMESTAMP = 2, + VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT = 1000028004, + VK_QUERY_TYPE_PERFORMANCE_QUERY_KHR = 1000116000, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_NV = 1000165000, + VK_QUERY_TYPE_PERFORMANCE_QUERY_INTEL = 1000210000, + VK_QUERY_TYPE_BEGIN_RANGE = VK_QUERY_TYPE_OCCLUSION, + VK_QUERY_TYPE_END_RANGE = VK_QUERY_TYPE_TIMESTAMP, + VK_QUERY_TYPE_RANGE_SIZE = (VK_QUERY_TYPE_TIMESTAMP - VK_QUERY_TYPE_OCCLUSION + 1), + VK_QUERY_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkQueryType; + +typedef enum VkSharingMode { + VK_SHARING_MODE_EXCLUSIVE = 0, + VK_SHARING_MODE_CONCURRENT = 1, + VK_SHARING_MODE_BEGIN_RANGE = VK_SHARING_MODE_EXCLUSIVE, + VK_SHARING_MODE_END_RANGE = VK_SHARING_MODE_CONCURRENT, + VK_SHARING_MODE_RANGE_SIZE = (VK_SHARING_MODE_CONCURRENT - VK_SHARING_MODE_EXCLUSIVE + 1), + VK_SHARING_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSharingMode; + +typedef enum VkImageLayout { + VK_IMAGE_LAYOUT_UNDEFINED = 0, + VK_IMAGE_LAYOUT_GENERAL = 1, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL = 2, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL = 3, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL = 4, + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL = 5, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL = 6, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL = 7, + VK_IMAGE_LAYOUT_PREINITIALIZED = 8, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL = 1000117000, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL = 1000117001, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL = 1000241000, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL = 1000241001, + VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL = 1000241002, + VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL = 1000241003, + VK_IMAGE_LAYOUT_PRESENT_SRC_KHR = 1000001002, + VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR = 1000111000, + VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV = 1000164003, + VK_IMAGE_LAYOUT_FRAGMENT_DENSITY_MAP_OPTIMAL_EXT = 1000218000, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, + VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL, + VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL_KHR = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL, + VK_IMAGE_LAYOUT_BEGIN_RANGE = VK_IMAGE_LAYOUT_UNDEFINED, + VK_IMAGE_LAYOUT_END_RANGE = VK_IMAGE_LAYOUT_PREINITIALIZED, + VK_IMAGE_LAYOUT_RANGE_SIZE = (VK_IMAGE_LAYOUT_PREINITIALIZED - VK_IMAGE_LAYOUT_UNDEFINED + 1), + VK_IMAGE_LAYOUT_MAX_ENUM = 0x7FFFFFFF +} VkImageLayout; + +typedef enum VkImageViewType { + VK_IMAGE_VIEW_TYPE_1D = 0, + VK_IMAGE_VIEW_TYPE_2D = 1, + VK_IMAGE_VIEW_TYPE_3D = 2, + VK_IMAGE_VIEW_TYPE_CUBE = 3, + VK_IMAGE_VIEW_TYPE_1D_ARRAY = 4, + VK_IMAGE_VIEW_TYPE_2D_ARRAY = 5, + VK_IMAGE_VIEW_TYPE_CUBE_ARRAY = 6, + VK_IMAGE_VIEW_TYPE_BEGIN_RANGE = VK_IMAGE_VIEW_TYPE_1D, + VK_IMAGE_VIEW_TYPE_END_RANGE = VK_IMAGE_VIEW_TYPE_CUBE_ARRAY, + VK_IMAGE_VIEW_TYPE_RANGE_SIZE = (VK_IMAGE_VIEW_TYPE_CUBE_ARRAY - VK_IMAGE_VIEW_TYPE_1D + 1), + VK_IMAGE_VIEW_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkImageViewType; + +typedef enum VkComponentSwizzle { + VK_COMPONENT_SWIZZLE_IDENTITY = 0, + VK_COMPONENT_SWIZZLE_ZERO = 1, + VK_COMPONENT_SWIZZLE_ONE = 2, + VK_COMPONENT_SWIZZLE_R = 3, + VK_COMPONENT_SWIZZLE_G = 4, + VK_COMPONENT_SWIZZLE_B = 5, + VK_COMPONENT_SWIZZLE_A = 6, + VK_COMPONENT_SWIZZLE_BEGIN_RANGE = VK_COMPONENT_SWIZZLE_IDENTITY, + VK_COMPONENT_SWIZZLE_END_RANGE = VK_COMPONENT_SWIZZLE_A, + VK_COMPONENT_SWIZZLE_RANGE_SIZE = (VK_COMPONENT_SWIZZLE_A - VK_COMPONENT_SWIZZLE_IDENTITY + 1), + VK_COMPONENT_SWIZZLE_MAX_ENUM = 0x7FFFFFFF +} VkComponentSwizzle; + +typedef enum VkVertexInputRate { + VK_VERTEX_INPUT_RATE_VERTEX = 0, + VK_VERTEX_INPUT_RATE_INSTANCE = 1, + VK_VERTEX_INPUT_RATE_BEGIN_RANGE = VK_VERTEX_INPUT_RATE_VERTEX, + VK_VERTEX_INPUT_RATE_END_RANGE = VK_VERTEX_INPUT_RATE_INSTANCE, + VK_VERTEX_INPUT_RATE_RANGE_SIZE = (VK_VERTEX_INPUT_RATE_INSTANCE - VK_VERTEX_INPUT_RATE_VERTEX + 1), + VK_VERTEX_INPUT_RATE_MAX_ENUM = 0x7FFFFFFF +} VkVertexInputRate; + +typedef enum VkPrimitiveTopology { + VK_PRIMITIVE_TOPOLOGY_POINT_LIST = 0, + VK_PRIMITIVE_TOPOLOGY_LINE_LIST = 1, + VK_PRIMITIVE_TOPOLOGY_LINE_STRIP = 2, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST = 3, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP = 4, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN = 5, + VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY = 6, + VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY = 7, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY = 8, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY = 9, + VK_PRIMITIVE_TOPOLOGY_PATCH_LIST = 10, + VK_PRIMITIVE_TOPOLOGY_BEGIN_RANGE = VK_PRIMITIVE_TOPOLOGY_POINT_LIST, + VK_PRIMITIVE_TOPOLOGY_END_RANGE = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, + VK_PRIMITIVE_TOPOLOGY_RANGE_SIZE = (VK_PRIMITIVE_TOPOLOGY_PATCH_LIST - VK_PRIMITIVE_TOPOLOGY_POINT_LIST + 1), + VK_PRIMITIVE_TOPOLOGY_MAX_ENUM = 0x7FFFFFFF +} VkPrimitiveTopology; + +typedef enum VkPolygonMode { + VK_POLYGON_MODE_FILL = 0, + VK_POLYGON_MODE_LINE = 1, + VK_POLYGON_MODE_POINT = 2, + VK_POLYGON_MODE_FILL_RECTANGLE_NV = 1000153000, + VK_POLYGON_MODE_BEGIN_RANGE = VK_POLYGON_MODE_FILL, + VK_POLYGON_MODE_END_RANGE = VK_POLYGON_MODE_POINT, + VK_POLYGON_MODE_RANGE_SIZE = (VK_POLYGON_MODE_POINT - VK_POLYGON_MODE_FILL + 1), + VK_POLYGON_MODE_MAX_ENUM = 0x7FFFFFFF +} VkPolygonMode; + +typedef enum VkFrontFace { + VK_FRONT_FACE_COUNTER_CLOCKWISE = 0, + VK_FRONT_FACE_CLOCKWISE = 1, + VK_FRONT_FACE_BEGIN_RANGE = VK_FRONT_FACE_COUNTER_CLOCKWISE, + VK_FRONT_FACE_END_RANGE = VK_FRONT_FACE_CLOCKWISE, + VK_FRONT_FACE_RANGE_SIZE = (VK_FRONT_FACE_CLOCKWISE - VK_FRONT_FACE_COUNTER_CLOCKWISE + 1), + VK_FRONT_FACE_MAX_ENUM = 0x7FFFFFFF +} VkFrontFace; + +typedef enum VkCompareOp { + VK_COMPARE_OP_NEVER = 0, + VK_COMPARE_OP_LESS = 1, + VK_COMPARE_OP_EQUAL = 2, + VK_COMPARE_OP_LESS_OR_EQUAL = 3, + VK_COMPARE_OP_GREATER = 4, + VK_COMPARE_OP_NOT_EQUAL = 5, + VK_COMPARE_OP_GREATER_OR_EQUAL = 6, + VK_COMPARE_OP_ALWAYS = 7, + VK_COMPARE_OP_BEGIN_RANGE = VK_COMPARE_OP_NEVER, + VK_COMPARE_OP_END_RANGE = VK_COMPARE_OP_ALWAYS, + VK_COMPARE_OP_RANGE_SIZE = (VK_COMPARE_OP_ALWAYS - VK_COMPARE_OP_NEVER + 1), + VK_COMPARE_OP_MAX_ENUM = 0x7FFFFFFF +} VkCompareOp; + +typedef enum VkStencilOp { + VK_STENCIL_OP_KEEP = 0, + VK_STENCIL_OP_ZERO = 1, + VK_STENCIL_OP_REPLACE = 2, + VK_STENCIL_OP_INCREMENT_AND_CLAMP = 3, + VK_STENCIL_OP_DECREMENT_AND_CLAMP = 4, + VK_STENCIL_OP_INVERT = 5, + VK_STENCIL_OP_INCREMENT_AND_WRAP = 6, + VK_STENCIL_OP_DECREMENT_AND_WRAP = 7, + VK_STENCIL_OP_BEGIN_RANGE = VK_STENCIL_OP_KEEP, + VK_STENCIL_OP_END_RANGE = VK_STENCIL_OP_DECREMENT_AND_WRAP, + VK_STENCIL_OP_RANGE_SIZE = (VK_STENCIL_OP_DECREMENT_AND_WRAP - VK_STENCIL_OP_KEEP + 1), + VK_STENCIL_OP_MAX_ENUM = 0x7FFFFFFF +} VkStencilOp; + +typedef enum VkLogicOp { + VK_LOGIC_OP_CLEAR = 0, + VK_LOGIC_OP_AND = 1, + VK_LOGIC_OP_AND_REVERSE = 2, + VK_LOGIC_OP_COPY = 3, + VK_LOGIC_OP_AND_INVERTED = 4, + VK_LOGIC_OP_NO_OP = 5, + VK_LOGIC_OP_XOR = 6, + VK_LOGIC_OP_OR = 7, + VK_LOGIC_OP_NOR = 8, + VK_LOGIC_OP_EQUIVALENT = 9, + VK_LOGIC_OP_INVERT = 10, + VK_LOGIC_OP_OR_REVERSE = 11, + VK_LOGIC_OP_COPY_INVERTED = 12, + VK_LOGIC_OP_OR_INVERTED = 13, + VK_LOGIC_OP_NAND = 14, + VK_LOGIC_OP_SET = 15, + VK_LOGIC_OP_BEGIN_RANGE = VK_LOGIC_OP_CLEAR, + VK_LOGIC_OP_END_RANGE = VK_LOGIC_OP_SET, + VK_LOGIC_OP_RANGE_SIZE = (VK_LOGIC_OP_SET - VK_LOGIC_OP_CLEAR + 1), + VK_LOGIC_OP_MAX_ENUM = 0x7FFFFFFF +} VkLogicOp; + +typedef enum VkBlendFactor { + VK_BLEND_FACTOR_ZERO = 0, + VK_BLEND_FACTOR_ONE = 1, + VK_BLEND_FACTOR_SRC_COLOR = 2, + VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR = 3, + VK_BLEND_FACTOR_DST_COLOR = 4, + VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR = 5, + VK_BLEND_FACTOR_SRC_ALPHA = 6, + VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA = 7, + VK_BLEND_FACTOR_DST_ALPHA = 8, + VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA = 9, + VK_BLEND_FACTOR_CONSTANT_COLOR = 10, + VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR = 11, + VK_BLEND_FACTOR_CONSTANT_ALPHA = 12, + VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA = 13, + VK_BLEND_FACTOR_SRC_ALPHA_SATURATE = 14, + VK_BLEND_FACTOR_SRC1_COLOR = 15, + VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR = 16, + VK_BLEND_FACTOR_SRC1_ALPHA = 17, + VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA = 18, + VK_BLEND_FACTOR_BEGIN_RANGE = VK_BLEND_FACTOR_ZERO, + VK_BLEND_FACTOR_END_RANGE = VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA, + VK_BLEND_FACTOR_RANGE_SIZE = (VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA - VK_BLEND_FACTOR_ZERO + 1), + VK_BLEND_FACTOR_MAX_ENUM = 0x7FFFFFFF +} VkBlendFactor; + +typedef enum VkBlendOp { + VK_BLEND_OP_ADD = 0, + VK_BLEND_OP_SUBTRACT = 1, + VK_BLEND_OP_REVERSE_SUBTRACT = 2, + VK_BLEND_OP_MIN = 3, + VK_BLEND_OP_MAX = 4, + VK_BLEND_OP_ZERO_EXT = 1000148000, + VK_BLEND_OP_SRC_EXT = 1000148001, + VK_BLEND_OP_DST_EXT = 1000148002, + VK_BLEND_OP_SRC_OVER_EXT = 1000148003, + VK_BLEND_OP_DST_OVER_EXT = 1000148004, + VK_BLEND_OP_SRC_IN_EXT = 1000148005, + VK_BLEND_OP_DST_IN_EXT = 1000148006, + VK_BLEND_OP_SRC_OUT_EXT = 1000148007, + VK_BLEND_OP_DST_OUT_EXT = 1000148008, + VK_BLEND_OP_SRC_ATOP_EXT = 1000148009, + VK_BLEND_OP_DST_ATOP_EXT = 1000148010, + VK_BLEND_OP_XOR_EXT = 1000148011, + VK_BLEND_OP_MULTIPLY_EXT = 1000148012, + VK_BLEND_OP_SCREEN_EXT = 1000148013, + VK_BLEND_OP_OVERLAY_EXT = 1000148014, + VK_BLEND_OP_DARKEN_EXT = 1000148015, + VK_BLEND_OP_LIGHTEN_EXT = 1000148016, + VK_BLEND_OP_COLORDODGE_EXT = 1000148017, + VK_BLEND_OP_COLORBURN_EXT = 1000148018, + VK_BLEND_OP_HARDLIGHT_EXT = 1000148019, + VK_BLEND_OP_SOFTLIGHT_EXT = 1000148020, + VK_BLEND_OP_DIFFERENCE_EXT = 1000148021, + VK_BLEND_OP_EXCLUSION_EXT = 1000148022, + VK_BLEND_OP_INVERT_EXT = 1000148023, + VK_BLEND_OP_INVERT_RGB_EXT = 1000148024, + VK_BLEND_OP_LINEARDODGE_EXT = 1000148025, + VK_BLEND_OP_LINEARBURN_EXT = 1000148026, + VK_BLEND_OP_VIVIDLIGHT_EXT = 1000148027, + VK_BLEND_OP_LINEARLIGHT_EXT = 1000148028, + VK_BLEND_OP_PINLIGHT_EXT = 1000148029, + VK_BLEND_OP_HARDMIX_EXT = 1000148030, + VK_BLEND_OP_HSL_HUE_EXT = 1000148031, + VK_BLEND_OP_HSL_SATURATION_EXT = 1000148032, + VK_BLEND_OP_HSL_COLOR_EXT = 1000148033, + VK_BLEND_OP_HSL_LUMINOSITY_EXT = 1000148034, + VK_BLEND_OP_PLUS_EXT = 1000148035, + VK_BLEND_OP_PLUS_CLAMPED_EXT = 1000148036, + VK_BLEND_OP_PLUS_CLAMPED_ALPHA_EXT = 1000148037, + VK_BLEND_OP_PLUS_DARKER_EXT = 1000148038, + VK_BLEND_OP_MINUS_EXT = 1000148039, + VK_BLEND_OP_MINUS_CLAMPED_EXT = 1000148040, + VK_BLEND_OP_CONTRAST_EXT = 1000148041, + VK_BLEND_OP_INVERT_OVG_EXT = 1000148042, + VK_BLEND_OP_RED_EXT = 1000148043, + VK_BLEND_OP_GREEN_EXT = 1000148044, + VK_BLEND_OP_BLUE_EXT = 1000148045, + VK_BLEND_OP_BEGIN_RANGE = VK_BLEND_OP_ADD, + VK_BLEND_OP_END_RANGE = VK_BLEND_OP_MAX, + VK_BLEND_OP_RANGE_SIZE = (VK_BLEND_OP_MAX - VK_BLEND_OP_ADD + 1), + VK_BLEND_OP_MAX_ENUM = 0x7FFFFFFF +} VkBlendOp; + +typedef enum VkDynamicState { + VK_DYNAMIC_STATE_VIEWPORT = 0, + VK_DYNAMIC_STATE_SCISSOR = 1, + VK_DYNAMIC_STATE_LINE_WIDTH = 2, + VK_DYNAMIC_STATE_DEPTH_BIAS = 3, + VK_DYNAMIC_STATE_BLEND_CONSTANTS = 4, + VK_DYNAMIC_STATE_DEPTH_BOUNDS = 5, + VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK = 6, + VK_DYNAMIC_STATE_STENCIL_WRITE_MASK = 7, + VK_DYNAMIC_STATE_STENCIL_REFERENCE = 8, + VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV = 1000087000, + VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT = 1000099000, + VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT = 1000143000, + VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV = 1000164004, + VK_DYNAMIC_STATE_VIEWPORT_COARSE_SAMPLE_ORDER_NV = 1000164006, + VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV = 1000205001, + VK_DYNAMIC_STATE_LINE_STIPPLE_EXT = 1000259000, + VK_DYNAMIC_STATE_BEGIN_RANGE = VK_DYNAMIC_STATE_VIEWPORT, + VK_DYNAMIC_STATE_END_RANGE = VK_DYNAMIC_STATE_STENCIL_REFERENCE, + VK_DYNAMIC_STATE_RANGE_SIZE = (VK_DYNAMIC_STATE_STENCIL_REFERENCE - VK_DYNAMIC_STATE_VIEWPORT + 1), + VK_DYNAMIC_STATE_MAX_ENUM = 0x7FFFFFFF +} VkDynamicState; + +typedef enum VkFilter { + VK_FILTER_NEAREST = 0, + VK_FILTER_LINEAR = 1, + VK_FILTER_CUBIC_IMG = 1000015000, + VK_FILTER_CUBIC_EXT = VK_FILTER_CUBIC_IMG, + VK_FILTER_BEGIN_RANGE = VK_FILTER_NEAREST, + VK_FILTER_END_RANGE = VK_FILTER_LINEAR, + VK_FILTER_RANGE_SIZE = (VK_FILTER_LINEAR - VK_FILTER_NEAREST + 1), + VK_FILTER_MAX_ENUM = 0x7FFFFFFF +} VkFilter; + +typedef enum VkSamplerMipmapMode { + VK_SAMPLER_MIPMAP_MODE_NEAREST = 0, + VK_SAMPLER_MIPMAP_MODE_LINEAR = 1, + VK_SAMPLER_MIPMAP_MODE_BEGIN_RANGE = VK_SAMPLER_MIPMAP_MODE_NEAREST, + VK_SAMPLER_MIPMAP_MODE_END_RANGE = VK_SAMPLER_MIPMAP_MODE_LINEAR, + VK_SAMPLER_MIPMAP_MODE_RANGE_SIZE = (VK_SAMPLER_MIPMAP_MODE_LINEAR - VK_SAMPLER_MIPMAP_MODE_NEAREST + 1), + VK_SAMPLER_MIPMAP_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerMipmapMode; + +typedef enum VkSamplerAddressMode { + VK_SAMPLER_ADDRESS_MODE_REPEAT = 0, + VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT = 1, + VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE = 2, + VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER = 3, + VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE = 4, + VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE_KHR = VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE, + VK_SAMPLER_ADDRESS_MODE_BEGIN_RANGE = VK_SAMPLER_ADDRESS_MODE_REPEAT, + VK_SAMPLER_ADDRESS_MODE_END_RANGE = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, + VK_SAMPLER_ADDRESS_MODE_RANGE_SIZE = (VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER - VK_SAMPLER_ADDRESS_MODE_REPEAT + 1), + VK_SAMPLER_ADDRESS_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerAddressMode; + +typedef enum VkBorderColor { + VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK = 0, + VK_BORDER_COLOR_INT_TRANSPARENT_BLACK = 1, + VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK = 2, + VK_BORDER_COLOR_INT_OPAQUE_BLACK = 3, + VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE = 4, + VK_BORDER_COLOR_INT_OPAQUE_WHITE = 5, + VK_BORDER_COLOR_BEGIN_RANGE = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK, + VK_BORDER_COLOR_END_RANGE = VK_BORDER_COLOR_INT_OPAQUE_WHITE, + VK_BORDER_COLOR_RANGE_SIZE = (VK_BORDER_COLOR_INT_OPAQUE_WHITE - VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK + 1), + VK_BORDER_COLOR_MAX_ENUM = 0x7FFFFFFF +} VkBorderColor; + +typedef enum VkDescriptorType { + VK_DESCRIPTOR_TYPE_SAMPLER = 0, + VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1, + VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE = 2, + VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3, + VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER = 4, + VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER = 5, + VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6, + VK_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7, + VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 8, + VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9, + VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10, + VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT = 1000138000, + VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000, + VK_DESCRIPTOR_TYPE_BEGIN_RANGE = VK_DESCRIPTOR_TYPE_SAMPLER, + VK_DESCRIPTOR_TYPE_END_RANGE = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, + VK_DESCRIPTOR_TYPE_RANGE_SIZE = (VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT - VK_DESCRIPTOR_TYPE_SAMPLER + 1), + VK_DESCRIPTOR_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorType; + +typedef enum VkAttachmentLoadOp { + VK_ATTACHMENT_LOAD_OP_LOAD = 0, + VK_ATTACHMENT_LOAD_OP_CLEAR = 1, + VK_ATTACHMENT_LOAD_OP_DONT_CARE = 2, + VK_ATTACHMENT_LOAD_OP_BEGIN_RANGE = VK_ATTACHMENT_LOAD_OP_LOAD, + VK_ATTACHMENT_LOAD_OP_END_RANGE = VK_ATTACHMENT_LOAD_OP_DONT_CARE, + VK_ATTACHMENT_LOAD_OP_RANGE_SIZE = (VK_ATTACHMENT_LOAD_OP_DONT_CARE - VK_ATTACHMENT_LOAD_OP_LOAD + 1), + VK_ATTACHMENT_LOAD_OP_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentLoadOp; + +typedef enum VkAttachmentStoreOp { + VK_ATTACHMENT_STORE_OP_STORE = 0, + VK_ATTACHMENT_STORE_OP_DONT_CARE = 1, + VK_ATTACHMENT_STORE_OP_BEGIN_RANGE = VK_ATTACHMENT_STORE_OP_STORE, + VK_ATTACHMENT_STORE_OP_END_RANGE = VK_ATTACHMENT_STORE_OP_DONT_CARE, + VK_ATTACHMENT_STORE_OP_RANGE_SIZE = (VK_ATTACHMENT_STORE_OP_DONT_CARE - VK_ATTACHMENT_STORE_OP_STORE + 1), + VK_ATTACHMENT_STORE_OP_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentStoreOp; + +typedef enum VkPipelineBindPoint { + VK_PIPELINE_BIND_POINT_GRAPHICS = 0, + VK_PIPELINE_BIND_POINT_COMPUTE = 1, + VK_PIPELINE_BIND_POINT_RAY_TRACING_NV = 1000165000, + VK_PIPELINE_BIND_POINT_BEGIN_RANGE = VK_PIPELINE_BIND_POINT_GRAPHICS, + VK_PIPELINE_BIND_POINT_END_RANGE = VK_PIPELINE_BIND_POINT_COMPUTE, + VK_PIPELINE_BIND_POINT_RANGE_SIZE = (VK_PIPELINE_BIND_POINT_COMPUTE - VK_PIPELINE_BIND_POINT_GRAPHICS + 1), + VK_PIPELINE_BIND_POINT_MAX_ENUM = 0x7FFFFFFF +} VkPipelineBindPoint; + +typedef enum VkCommandBufferLevel { + VK_COMMAND_BUFFER_LEVEL_PRIMARY = 0, + VK_COMMAND_BUFFER_LEVEL_SECONDARY = 1, + VK_COMMAND_BUFFER_LEVEL_BEGIN_RANGE = VK_COMMAND_BUFFER_LEVEL_PRIMARY, + VK_COMMAND_BUFFER_LEVEL_END_RANGE = VK_COMMAND_BUFFER_LEVEL_SECONDARY, + VK_COMMAND_BUFFER_LEVEL_RANGE_SIZE = (VK_COMMAND_BUFFER_LEVEL_SECONDARY - VK_COMMAND_BUFFER_LEVEL_PRIMARY + 1), + VK_COMMAND_BUFFER_LEVEL_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferLevel; + +typedef enum VkIndexType { + VK_INDEX_TYPE_UINT16 = 0, + VK_INDEX_TYPE_UINT32 = 1, + VK_INDEX_TYPE_NONE_NV = 1000165000, + VK_INDEX_TYPE_UINT8_EXT = 1000265000, + VK_INDEX_TYPE_BEGIN_RANGE = VK_INDEX_TYPE_UINT16, + VK_INDEX_TYPE_END_RANGE = VK_INDEX_TYPE_UINT32, + VK_INDEX_TYPE_RANGE_SIZE = (VK_INDEX_TYPE_UINT32 - VK_INDEX_TYPE_UINT16 + 1), + VK_INDEX_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkIndexType; + +typedef enum VkSubpassContents { + VK_SUBPASS_CONTENTS_INLINE = 0, + VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS = 1, + VK_SUBPASS_CONTENTS_BEGIN_RANGE = VK_SUBPASS_CONTENTS_INLINE, + VK_SUBPASS_CONTENTS_END_RANGE = VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS, + VK_SUBPASS_CONTENTS_RANGE_SIZE = (VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS - VK_SUBPASS_CONTENTS_INLINE + 1), + VK_SUBPASS_CONTENTS_MAX_ENUM = 0x7FFFFFFF +} VkSubpassContents; + +typedef enum VkObjectType { + VK_OBJECT_TYPE_UNKNOWN = 0, + VK_OBJECT_TYPE_INSTANCE = 1, + VK_OBJECT_TYPE_PHYSICAL_DEVICE = 2, + VK_OBJECT_TYPE_DEVICE = 3, + VK_OBJECT_TYPE_QUEUE = 4, + VK_OBJECT_TYPE_SEMAPHORE = 5, + VK_OBJECT_TYPE_COMMAND_BUFFER = 6, + VK_OBJECT_TYPE_FENCE = 7, + VK_OBJECT_TYPE_DEVICE_MEMORY = 8, + VK_OBJECT_TYPE_BUFFER = 9, + VK_OBJECT_TYPE_IMAGE = 10, + VK_OBJECT_TYPE_EVENT = 11, + VK_OBJECT_TYPE_QUERY_POOL = 12, + VK_OBJECT_TYPE_BUFFER_VIEW = 13, + VK_OBJECT_TYPE_IMAGE_VIEW = 14, + VK_OBJECT_TYPE_SHADER_MODULE = 15, + VK_OBJECT_TYPE_PIPELINE_CACHE = 16, + VK_OBJECT_TYPE_PIPELINE_LAYOUT = 17, + VK_OBJECT_TYPE_RENDER_PASS = 18, + VK_OBJECT_TYPE_PIPELINE = 19, + VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT = 20, + VK_OBJECT_TYPE_SAMPLER = 21, + VK_OBJECT_TYPE_DESCRIPTOR_POOL = 22, + VK_OBJECT_TYPE_DESCRIPTOR_SET = 23, + VK_OBJECT_TYPE_FRAMEBUFFER = 24, + VK_OBJECT_TYPE_COMMAND_POOL = 25, + VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION = 1000156000, + VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE = 1000085000, + VK_OBJECT_TYPE_SURFACE_KHR = 1000000000, + VK_OBJECT_TYPE_SWAPCHAIN_KHR = 1000001000, + VK_OBJECT_TYPE_DISPLAY_KHR = 1000002000, + VK_OBJECT_TYPE_DISPLAY_MODE_KHR = 1000002001, + VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT = 1000011000, + VK_OBJECT_TYPE_OBJECT_TABLE_NVX = 1000086000, + VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX = 1000086001, + VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT = 1000128000, + VK_OBJECT_TYPE_VALIDATION_CACHE_EXT = 1000160000, + VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV = 1000165000, + VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL = 1000210000, + VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR = VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE, + VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR = VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION, + VK_OBJECT_TYPE_BEGIN_RANGE = VK_OBJECT_TYPE_UNKNOWN, + VK_OBJECT_TYPE_END_RANGE = VK_OBJECT_TYPE_COMMAND_POOL, + VK_OBJECT_TYPE_RANGE_SIZE = (VK_OBJECT_TYPE_COMMAND_POOL - VK_OBJECT_TYPE_UNKNOWN + 1), + VK_OBJECT_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkObjectType; + +typedef enum VkVendorId { + VK_VENDOR_ID_VIV = 0x10001, + VK_VENDOR_ID_VSI = 0x10002, + VK_VENDOR_ID_KAZAN = 0x10003, + VK_VENDOR_ID_BEGIN_RANGE = VK_VENDOR_ID_VIV, + VK_VENDOR_ID_END_RANGE = VK_VENDOR_ID_KAZAN, + VK_VENDOR_ID_RANGE_SIZE = (VK_VENDOR_ID_KAZAN - VK_VENDOR_ID_VIV + 1), + VK_VENDOR_ID_MAX_ENUM = 0x7FFFFFFF +} VkVendorId; +typedef VkFlags VkInstanceCreateFlags; + +typedef enum VkFormatFeatureFlagBits { + VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT = 0x00000001, + VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT = 0x00000002, + VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT = 0x00000004, + VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000008, + VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT = 0x00000010, + VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x00000020, + VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT = 0x00000040, + VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT = 0x00000080, + VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT = 0x00000100, + VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000200, + VK_FORMAT_FEATURE_BLIT_SRC_BIT = 0x00000400, + VK_FORMAT_FEATURE_BLIT_DST_BIT = 0x00000800, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x00001000, + VK_FORMAT_FEATURE_TRANSFER_SRC_BIT = 0x00004000, + VK_FORMAT_FEATURE_TRANSFER_DST_BIT = 0x00008000, + VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT = 0x00020000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT = 0x00040000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT = 0x00080000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT = 0x00100000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT = 0x00200000, + VK_FORMAT_FEATURE_DISJOINT_BIT = 0x00400000, + VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT = 0x00800000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT = 0x00010000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG = 0x00002000, + VK_FORMAT_FEATURE_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x01000000, + VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, + VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR = VK_FORMAT_FEATURE_TRANSFER_DST_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT_EXT = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_MINMAX_BIT, + VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT_KHR = VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT_KHR = VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_CHROMA_RECONSTRUCTION_EXPLICIT_FORCEABLE_BIT, + VK_FORMAT_FEATURE_DISJOINT_BIT_KHR = VK_FORMAT_FEATURE_DISJOINT_BIT, + VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT_KHR = VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_EXT = VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG, + VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFormatFeatureFlagBits; +typedef VkFlags VkFormatFeatureFlags; + +typedef enum VkImageUsageFlagBits { + VK_IMAGE_USAGE_TRANSFER_SRC_BIT = 0x00000001, + VK_IMAGE_USAGE_TRANSFER_DST_BIT = 0x00000002, + VK_IMAGE_USAGE_SAMPLED_BIT = 0x00000004, + VK_IMAGE_USAGE_STORAGE_BIT = 0x00000008, + VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 0x00000010, + VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x00000020, + VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 0x00000040, + VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT = 0x00000080, + VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV = 0x00000100, + VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT = 0x00000200, + VK_IMAGE_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageUsageFlagBits; +typedef VkFlags VkImageUsageFlags; + +typedef enum VkImageCreateFlagBits { + VK_IMAGE_CREATE_SPARSE_BINDING_BIT = 0x00000001, + VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002, + VK_IMAGE_CREATE_SPARSE_ALIASED_BIT = 0x00000004, + VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = 0x00000008, + VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT = 0x00000010, + VK_IMAGE_CREATE_ALIAS_BIT = 0x00000400, + VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT = 0x00000040, + VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT = 0x00000020, + VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT = 0x00000080, + VK_IMAGE_CREATE_EXTENDED_USAGE_BIT = 0x00000100, + VK_IMAGE_CREATE_PROTECTED_BIT = 0x00000800, + VK_IMAGE_CREATE_DISJOINT_BIT = 0x00000200, + VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV = 0x00002000, + VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT = 0x00001000, + VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT = 0x00004000, + VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT, + VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT, + VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT_KHR = VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT, + VK_IMAGE_CREATE_EXTENDED_USAGE_BIT_KHR = VK_IMAGE_CREATE_EXTENDED_USAGE_BIT, + VK_IMAGE_CREATE_DISJOINT_BIT_KHR = VK_IMAGE_CREATE_DISJOINT_BIT, + VK_IMAGE_CREATE_ALIAS_BIT_KHR = VK_IMAGE_CREATE_ALIAS_BIT, + VK_IMAGE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageCreateFlagBits; +typedef VkFlags VkImageCreateFlags; + +typedef enum VkSampleCountFlagBits { + VK_SAMPLE_COUNT_1_BIT = 0x00000001, + VK_SAMPLE_COUNT_2_BIT = 0x00000002, + VK_SAMPLE_COUNT_4_BIT = 0x00000004, + VK_SAMPLE_COUNT_8_BIT = 0x00000008, + VK_SAMPLE_COUNT_16_BIT = 0x00000010, + VK_SAMPLE_COUNT_32_BIT = 0x00000020, + VK_SAMPLE_COUNT_64_BIT = 0x00000040, + VK_SAMPLE_COUNT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSampleCountFlagBits; +typedef VkFlags VkSampleCountFlags; + +typedef enum VkQueueFlagBits { + VK_QUEUE_GRAPHICS_BIT = 0x00000001, + VK_QUEUE_COMPUTE_BIT = 0x00000002, + VK_QUEUE_TRANSFER_BIT = 0x00000004, + VK_QUEUE_SPARSE_BINDING_BIT = 0x00000008, + VK_QUEUE_PROTECTED_BIT = 0x00000010, + VK_QUEUE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueueFlagBits; +typedef VkFlags VkQueueFlags; + +typedef enum VkMemoryPropertyFlagBits { + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT = 0x00000001, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT = 0x00000002, + VK_MEMORY_PROPERTY_HOST_COHERENT_BIT = 0x00000004, + VK_MEMORY_PROPERTY_HOST_CACHED_BIT = 0x00000008, + VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT = 0x00000010, + VK_MEMORY_PROPERTY_PROTECTED_BIT = 0x00000020, + VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD = 0x00000040, + VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD = 0x00000080, + VK_MEMORY_PROPERTY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryPropertyFlagBits; +typedef VkFlags VkMemoryPropertyFlags; + +typedef enum VkMemoryHeapFlagBits { + VK_MEMORY_HEAP_DEVICE_LOCAL_BIT = 0x00000001, + VK_MEMORY_HEAP_MULTI_INSTANCE_BIT = 0x00000002, + VK_MEMORY_HEAP_MULTI_INSTANCE_BIT_KHR = VK_MEMORY_HEAP_MULTI_INSTANCE_BIT, + VK_MEMORY_HEAP_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryHeapFlagBits; +typedef VkFlags VkMemoryHeapFlags; +typedef VkFlags VkDeviceCreateFlags; + +typedef enum VkDeviceQueueCreateFlagBits { + VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT = 0x00000001, + VK_DEVICE_QUEUE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDeviceQueueCreateFlagBits; +typedef VkFlags VkDeviceQueueCreateFlags; + +typedef enum VkPipelineStageFlagBits { + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT = 0x00000001, + VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT = 0x00000002, + VK_PIPELINE_STAGE_VERTEX_INPUT_BIT = 0x00000004, + VK_PIPELINE_STAGE_VERTEX_SHADER_BIT = 0x00000008, + VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT = 0x00000010, + VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT = 0x00000020, + VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT = 0x00000040, + VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT = 0x00000080, + VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT = 0x00000100, + VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT = 0x00000200, + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT = 0x00000400, + VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT = 0x00000800, + VK_PIPELINE_STAGE_TRANSFER_BIT = 0x00001000, + VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT = 0x00002000, + VK_PIPELINE_STAGE_HOST_BIT = 0x00004000, + VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT = 0x00008000, + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT = 0x00010000, + VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT = 0x01000000, + VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00040000, + VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX = 0x00020000, + VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV = 0x00400000, + VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV = 0x00200000, + VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_NV = 0x02000000, + VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV = 0x00080000, + VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV = 0x00100000, + VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT = 0x00800000, + VK_PIPELINE_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineStageFlagBits; +typedef VkFlags VkPipelineStageFlags; +typedef VkFlags VkMemoryMapFlags; + +typedef enum VkImageAspectFlagBits { + VK_IMAGE_ASPECT_COLOR_BIT = 0x00000001, + VK_IMAGE_ASPECT_DEPTH_BIT = 0x00000002, + VK_IMAGE_ASPECT_STENCIL_BIT = 0x00000004, + VK_IMAGE_ASPECT_METADATA_BIT = 0x00000008, + VK_IMAGE_ASPECT_PLANE_0_BIT = 0x00000010, + VK_IMAGE_ASPECT_PLANE_1_BIT = 0x00000020, + VK_IMAGE_ASPECT_PLANE_2_BIT = 0x00000040, + VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT = 0x00000080, + VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT = 0x00000100, + VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT = 0x00000200, + VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT = 0x00000400, + VK_IMAGE_ASPECT_PLANE_0_BIT_KHR = VK_IMAGE_ASPECT_PLANE_0_BIT, + VK_IMAGE_ASPECT_PLANE_1_BIT_KHR = VK_IMAGE_ASPECT_PLANE_1_BIT, + VK_IMAGE_ASPECT_PLANE_2_BIT_KHR = VK_IMAGE_ASPECT_PLANE_2_BIT, + VK_IMAGE_ASPECT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageAspectFlagBits; +typedef VkFlags VkImageAspectFlags; + +typedef enum VkSparseImageFormatFlagBits { + VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT = 0x00000001, + VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT = 0x00000002, + VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT = 0x00000004, + VK_SPARSE_IMAGE_FORMAT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSparseImageFormatFlagBits; +typedef VkFlags VkSparseImageFormatFlags; + +typedef enum VkSparseMemoryBindFlagBits { + VK_SPARSE_MEMORY_BIND_METADATA_BIT = 0x00000001, + VK_SPARSE_MEMORY_BIND_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSparseMemoryBindFlagBits; +typedef VkFlags VkSparseMemoryBindFlags; + +typedef enum VkFenceCreateFlagBits { + VK_FENCE_CREATE_SIGNALED_BIT = 0x00000001, + VK_FENCE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFenceCreateFlagBits; +typedef VkFlags VkFenceCreateFlags; +typedef VkFlags VkSemaphoreCreateFlags; +typedef VkFlags VkEventCreateFlags; +typedef VkFlags VkQueryPoolCreateFlags; + +typedef enum VkQueryPipelineStatisticFlagBits { + VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT = 0x00000001, + VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT = 0x00000002, + VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT = 0x00000004, + VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT = 0x00000008, + VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT = 0x00000010, + VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT = 0x00000020, + VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT = 0x00000040, + VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT = 0x00000080, + VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT = 0x00000100, + VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT = 0x00000200, + VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT = 0x00000400, + VK_QUERY_PIPELINE_STATISTIC_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryPipelineStatisticFlagBits; +typedef VkFlags VkQueryPipelineStatisticFlags; + +typedef enum VkQueryResultFlagBits { + VK_QUERY_RESULT_64_BIT = 0x00000001, + VK_QUERY_RESULT_WAIT_BIT = 0x00000002, + VK_QUERY_RESULT_WITH_AVAILABILITY_BIT = 0x00000004, + VK_QUERY_RESULT_PARTIAL_BIT = 0x00000008, + VK_QUERY_RESULT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryResultFlagBits; +typedef VkFlags VkQueryResultFlags; + +typedef enum VkBufferCreateFlagBits { + VK_BUFFER_CREATE_SPARSE_BINDING_BIT = 0x00000001, + VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT = 0x00000002, + VK_BUFFER_CREATE_SPARSE_ALIASED_BIT = 0x00000004, + VK_BUFFER_CREATE_PROTECTED_BIT = 0x00000008, + VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT = 0x00000010, + VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT = VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT, + VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR = VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT, + VK_BUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkBufferCreateFlagBits; +typedef VkFlags VkBufferCreateFlags; + +typedef enum VkBufferUsageFlagBits { + VK_BUFFER_USAGE_TRANSFER_SRC_BIT = 0x00000001, + VK_BUFFER_USAGE_TRANSFER_DST_BIT = 0x00000002, + VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = 0x00000004, + VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = 0x00000008, + VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = 0x00000010, + VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = 0x00000020, + VK_BUFFER_USAGE_INDEX_BUFFER_BIT = 0x00000040, + VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = 0x00000080, + VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = 0x00000100, + VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT = 0x00020000, + VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT = 0x00000800, + VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT = 0x00001000, + VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x00000200, + VK_BUFFER_USAGE_RAY_TRACING_BIT_NV = 0x00000400, + VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, + VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_KHR = VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, + VK_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkBufferUsageFlagBits; +typedef VkFlags VkBufferUsageFlags; +typedef VkFlags VkBufferViewCreateFlags; + +typedef enum VkImageViewCreateFlagBits { + VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT = 0x00000001, + VK_IMAGE_VIEW_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkImageViewCreateFlagBits; +typedef VkFlags VkImageViewCreateFlags; + +typedef enum VkShaderModuleCreateFlagBits { + VK_SHADER_MODULE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkShaderModuleCreateFlagBits; +typedef VkFlags VkShaderModuleCreateFlags; +typedef VkFlags VkPipelineCacheCreateFlags; + +typedef enum VkPipelineCreateFlagBits { + VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x00000001, + VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = 0x00000002, + VK_PIPELINE_CREATE_DERIVATIVE_BIT = 0x00000004, + VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT = 0x00000008, + VK_PIPELINE_CREATE_DISPATCH_BASE_BIT = 0x00000010, + VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV = 0x00000020, + VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR = 0x00000040, + VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR = 0x00000080, + VK_PIPELINE_CREATE_DISPATCH_BASE = VK_PIPELINE_CREATE_DISPATCH_BASE_BIT, + VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT_KHR = VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT, + VK_PIPELINE_CREATE_DISPATCH_BASE_KHR = VK_PIPELINE_CREATE_DISPATCH_BASE, + VK_PIPELINE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineCreateFlagBits; +typedef VkFlags VkPipelineCreateFlags; + +typedef enum VkPipelineShaderStageCreateFlagBits { + VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT = 0x00000001, + VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT = 0x00000002, + VK_PIPELINE_SHADER_STAGE_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPipelineShaderStageCreateFlagBits; +typedef VkFlags VkPipelineShaderStageCreateFlags; + +typedef enum VkShaderStageFlagBits { + VK_SHADER_STAGE_VERTEX_BIT = 0x00000001, + VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT = 0x00000002, + VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT = 0x00000004, + VK_SHADER_STAGE_GEOMETRY_BIT = 0x00000008, + VK_SHADER_STAGE_FRAGMENT_BIT = 0x00000010, + VK_SHADER_STAGE_COMPUTE_BIT = 0x00000020, + VK_SHADER_STAGE_ALL_GRAPHICS = 0x0000001F, + VK_SHADER_STAGE_ALL = 0x7FFFFFFF, + VK_SHADER_STAGE_RAYGEN_BIT_NV = 0x00000100, + VK_SHADER_STAGE_ANY_HIT_BIT_NV = 0x00000200, + VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV = 0x00000400, + VK_SHADER_STAGE_MISS_BIT_NV = 0x00000800, + VK_SHADER_STAGE_INTERSECTION_BIT_NV = 0x00001000, + VK_SHADER_STAGE_CALLABLE_BIT_NV = 0x00002000, + VK_SHADER_STAGE_TASK_BIT_NV = 0x00000040, + VK_SHADER_STAGE_MESH_BIT_NV = 0x00000080, + VK_SHADER_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkShaderStageFlagBits; +typedef VkFlags VkPipelineVertexInputStateCreateFlags; +typedef VkFlags VkPipelineInputAssemblyStateCreateFlags; +typedef VkFlags VkPipelineTessellationStateCreateFlags; +typedef VkFlags VkPipelineViewportStateCreateFlags; +typedef VkFlags VkPipelineRasterizationStateCreateFlags; + +typedef enum VkCullModeFlagBits { + VK_CULL_MODE_NONE = 0, + VK_CULL_MODE_FRONT_BIT = 0x00000001, + VK_CULL_MODE_BACK_BIT = 0x00000002, + VK_CULL_MODE_FRONT_AND_BACK = 0x00000003, + VK_CULL_MODE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCullModeFlagBits; +typedef VkFlags VkCullModeFlags; +typedef VkFlags VkPipelineMultisampleStateCreateFlags; +typedef VkFlags VkPipelineDepthStencilStateCreateFlags; +typedef VkFlags VkPipelineColorBlendStateCreateFlags; + +typedef enum VkColorComponentFlagBits { + VK_COLOR_COMPONENT_R_BIT = 0x00000001, + VK_COLOR_COMPONENT_G_BIT = 0x00000002, + VK_COLOR_COMPONENT_B_BIT = 0x00000004, + VK_COLOR_COMPONENT_A_BIT = 0x00000008, + VK_COLOR_COMPONENT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkColorComponentFlagBits; +typedef VkFlags VkColorComponentFlags; +typedef VkFlags VkPipelineDynamicStateCreateFlags; +typedef VkFlags VkPipelineLayoutCreateFlags; +typedef VkFlags VkShaderStageFlags; + +typedef enum VkSamplerCreateFlagBits { + VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT = 0x00000001, + VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT = 0x00000002, + VK_SAMPLER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSamplerCreateFlagBits; +typedef VkFlags VkSamplerCreateFlags; + +typedef enum VkDescriptorSetLayoutCreateFlagBits { + VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT = 0x00000002, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR = 0x00000001, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorSetLayoutCreateFlagBits; +typedef VkFlags VkDescriptorSetLayoutCreateFlags; + +typedef enum VkDescriptorPoolCreateFlagBits { + VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT = 0x00000001, + VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT = 0x00000002, + VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT, + VK_DESCRIPTOR_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorPoolCreateFlagBits; +typedef VkFlags VkDescriptorPoolCreateFlags; +typedef VkFlags VkDescriptorPoolResetFlags; + +typedef enum VkFramebufferCreateFlagBits { + VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT = 0x00000001, + VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT_KHR = VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT, + VK_FRAMEBUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFramebufferCreateFlagBits; +typedef VkFlags VkFramebufferCreateFlags; + +typedef enum VkRenderPassCreateFlagBits { + VK_RENDER_PASS_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkRenderPassCreateFlagBits; +typedef VkFlags VkRenderPassCreateFlags; + +typedef enum VkAttachmentDescriptionFlagBits { + VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT = 0x00000001, + VK_ATTACHMENT_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkAttachmentDescriptionFlagBits; +typedef VkFlags VkAttachmentDescriptionFlags; + +typedef enum VkSubpassDescriptionFlagBits { + VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX = 0x00000001, + VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX = 0x00000002, + VK_SUBPASS_DESCRIPTION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSubpassDescriptionFlagBits; +typedef VkFlags VkSubpassDescriptionFlags; + +typedef enum VkAccessFlagBits { + VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0x00000001, + VK_ACCESS_INDEX_READ_BIT = 0x00000002, + VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 0x00000004, + VK_ACCESS_UNIFORM_READ_BIT = 0x00000008, + VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 0x00000010, + VK_ACCESS_SHADER_READ_BIT = 0x00000020, + VK_ACCESS_SHADER_WRITE_BIT = 0x00000040, + VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 0x00000080, + VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 0x00000100, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x00000200, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x00000400, + VK_ACCESS_TRANSFER_READ_BIT = 0x00000800, + VK_ACCESS_TRANSFER_WRITE_BIT = 0x00001000, + VK_ACCESS_HOST_READ_BIT = 0x00002000, + VK_ACCESS_HOST_WRITE_BIT = 0x00004000, + VK_ACCESS_MEMORY_READ_BIT = 0x00008000, + VK_ACCESS_MEMORY_WRITE_BIT = 0x00010000, + VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 0x02000000, + VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 0x04000000, + VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 0x08000000, + VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT = 0x00100000, + VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX = 0x00020000, + VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 0x00040000, + VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 0x00080000, + VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV = 0x00800000, + VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_NV = 0x00200000, + VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_NV = 0x00400000, + VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT = 0x01000000, + VK_ACCESS_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkAccessFlagBits; +typedef VkFlags VkAccessFlags; + +typedef enum VkDependencyFlagBits { + VK_DEPENDENCY_BY_REGION_BIT = 0x00000001, + VK_DEPENDENCY_DEVICE_GROUP_BIT = 0x00000004, + VK_DEPENDENCY_VIEW_LOCAL_BIT = 0x00000002, + VK_DEPENDENCY_VIEW_LOCAL_BIT_KHR = VK_DEPENDENCY_VIEW_LOCAL_BIT, + VK_DEPENDENCY_DEVICE_GROUP_BIT_KHR = VK_DEPENDENCY_DEVICE_GROUP_BIT, + VK_DEPENDENCY_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDependencyFlagBits; +typedef VkFlags VkDependencyFlags; + +typedef enum VkCommandPoolCreateFlagBits { + VK_COMMAND_POOL_CREATE_TRANSIENT_BIT = 0x00000001, + VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT = 0x00000002, + VK_COMMAND_POOL_CREATE_PROTECTED_BIT = 0x00000004, + VK_COMMAND_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandPoolCreateFlagBits; +typedef VkFlags VkCommandPoolCreateFlags; + +typedef enum VkCommandPoolResetFlagBits { + VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT = 0x00000001, + VK_COMMAND_POOL_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandPoolResetFlagBits; +typedef VkFlags VkCommandPoolResetFlags; + +typedef enum VkCommandBufferUsageFlagBits { + VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT = 0x00000001, + VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT = 0x00000002, + VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT = 0x00000004, + VK_COMMAND_BUFFER_USAGE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferUsageFlagBits; +typedef VkFlags VkCommandBufferUsageFlags; + +typedef enum VkQueryControlFlagBits { + VK_QUERY_CONTROL_PRECISE_BIT = 0x00000001, + VK_QUERY_CONTROL_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkQueryControlFlagBits; +typedef VkFlags VkQueryControlFlags; + +typedef enum VkCommandBufferResetFlagBits { + VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT = 0x00000001, + VK_COMMAND_BUFFER_RESET_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkCommandBufferResetFlagBits; +typedef VkFlags VkCommandBufferResetFlags; + +typedef enum VkStencilFaceFlagBits { + VK_STENCIL_FACE_FRONT_BIT = 0x00000001, + VK_STENCIL_FACE_BACK_BIT = 0x00000002, + VK_STENCIL_FACE_FRONT_AND_BACK = 0x00000003, + VK_STENCIL_FRONT_AND_BACK = VK_STENCIL_FACE_FRONT_AND_BACK, + VK_STENCIL_FACE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkStencilFaceFlagBits; +typedef VkFlags VkStencilFaceFlags; +typedef struct VkApplicationInfo { + VkStructureType sType; + const void* pNext; + const char* pApplicationName; + uint32_t applicationVersion; + const char* pEngineName; + uint32_t engineVersion; + uint32_t apiVersion; +} VkApplicationInfo; + +typedef struct VkInstanceCreateInfo { + VkStructureType sType; + const void* pNext; + VkInstanceCreateFlags flags; + const VkApplicationInfo* pApplicationInfo; + uint32_t enabledLayerCount; + const char* const* ppEnabledLayerNames; + uint32_t enabledExtensionCount; + const char* const* ppEnabledExtensionNames; +} VkInstanceCreateInfo; + +typedef void* (VKAPI_PTR *PFN_vkAllocationFunction)( + void* pUserData, + size_t size, + size_t alignment, + VkSystemAllocationScope allocationScope); + +typedef void* (VKAPI_PTR *PFN_vkReallocationFunction)( + void* pUserData, + void* pOriginal, + size_t size, + size_t alignment, + VkSystemAllocationScope allocationScope); + +typedef void (VKAPI_PTR *PFN_vkFreeFunction)( + void* pUserData, + void* pMemory); + +typedef void (VKAPI_PTR *PFN_vkInternalAllocationNotification)( + void* pUserData, + size_t size, + VkInternalAllocationType allocationType, + VkSystemAllocationScope allocationScope); + +typedef void (VKAPI_PTR *PFN_vkInternalFreeNotification)( + void* pUserData, + size_t size, + VkInternalAllocationType allocationType, + VkSystemAllocationScope allocationScope); + +typedef struct VkAllocationCallbacks { + void* pUserData; + PFN_vkAllocationFunction pfnAllocation; + PFN_vkReallocationFunction pfnReallocation; + PFN_vkFreeFunction pfnFree; + PFN_vkInternalAllocationNotification pfnInternalAllocation; + PFN_vkInternalFreeNotification pfnInternalFree; +} VkAllocationCallbacks; + +typedef struct VkPhysicalDeviceFeatures { + VkBool32 robustBufferAccess; + VkBool32 fullDrawIndexUint32; + VkBool32 imageCubeArray; + VkBool32 independentBlend; + VkBool32 geometryShader; + VkBool32 tessellationShader; + VkBool32 sampleRateShading; + VkBool32 dualSrcBlend; + VkBool32 logicOp; + VkBool32 multiDrawIndirect; + VkBool32 drawIndirectFirstInstance; + VkBool32 depthClamp; + VkBool32 depthBiasClamp; + VkBool32 fillModeNonSolid; + VkBool32 depthBounds; + VkBool32 wideLines; + VkBool32 largePoints; + VkBool32 alphaToOne; + VkBool32 multiViewport; + VkBool32 samplerAnisotropy; + VkBool32 textureCompressionETC2; + VkBool32 textureCompressionASTC_LDR; + VkBool32 textureCompressionBC; + VkBool32 occlusionQueryPrecise; + VkBool32 pipelineStatisticsQuery; + VkBool32 vertexPipelineStoresAndAtomics; + VkBool32 fragmentStoresAndAtomics; + VkBool32 shaderTessellationAndGeometryPointSize; + VkBool32 shaderImageGatherExtended; + VkBool32 shaderStorageImageExtendedFormats; + VkBool32 shaderStorageImageMultisample; + VkBool32 shaderStorageImageReadWithoutFormat; + VkBool32 shaderStorageImageWriteWithoutFormat; + VkBool32 shaderUniformBufferArrayDynamicIndexing; + VkBool32 shaderSampledImageArrayDynamicIndexing; + VkBool32 shaderStorageBufferArrayDynamicIndexing; + VkBool32 shaderStorageImageArrayDynamicIndexing; + VkBool32 shaderClipDistance; + VkBool32 shaderCullDistance; + VkBool32 shaderFloat64; + VkBool32 shaderInt64; + VkBool32 shaderInt16; + VkBool32 shaderResourceResidency; + VkBool32 shaderResourceMinLod; + VkBool32 sparseBinding; + VkBool32 sparseResidencyBuffer; + VkBool32 sparseResidencyImage2D; + VkBool32 sparseResidencyImage3D; + VkBool32 sparseResidency2Samples; + VkBool32 sparseResidency4Samples; + VkBool32 sparseResidency8Samples; + VkBool32 sparseResidency16Samples; + VkBool32 sparseResidencyAliased; + VkBool32 variableMultisampleRate; + VkBool32 inheritedQueries; +} VkPhysicalDeviceFeatures; + +typedef struct VkFormatProperties { + VkFormatFeatureFlags linearTilingFeatures; + VkFormatFeatureFlags optimalTilingFeatures; + VkFormatFeatureFlags bufferFeatures; +} VkFormatProperties; + +typedef struct VkExtent3D { + uint32_t width; + uint32_t height; + uint32_t depth; +} VkExtent3D; + +typedef struct VkImageFormatProperties { + VkExtent3D maxExtent; + uint32_t maxMipLevels; + uint32_t maxArrayLayers; + VkSampleCountFlags sampleCounts; + VkDeviceSize maxResourceSize; +} VkImageFormatProperties; + +typedef struct VkPhysicalDeviceLimits { + uint32_t maxImageDimension1D; + uint32_t maxImageDimension2D; + uint32_t maxImageDimension3D; + uint32_t maxImageDimensionCube; + uint32_t maxImageArrayLayers; + uint32_t maxTexelBufferElements; + uint32_t maxUniformBufferRange; + uint32_t maxStorageBufferRange; + uint32_t maxPushConstantsSize; + uint32_t maxMemoryAllocationCount; + uint32_t maxSamplerAllocationCount; + VkDeviceSize bufferImageGranularity; + VkDeviceSize sparseAddressSpaceSize; + uint32_t maxBoundDescriptorSets; + uint32_t maxPerStageDescriptorSamplers; + uint32_t maxPerStageDescriptorUniformBuffers; + uint32_t maxPerStageDescriptorStorageBuffers; + uint32_t maxPerStageDescriptorSampledImages; + uint32_t maxPerStageDescriptorStorageImages; + uint32_t maxPerStageDescriptorInputAttachments; + uint32_t maxPerStageResources; + uint32_t maxDescriptorSetSamplers; + uint32_t maxDescriptorSetUniformBuffers; + uint32_t maxDescriptorSetUniformBuffersDynamic; + uint32_t maxDescriptorSetStorageBuffers; + uint32_t maxDescriptorSetStorageBuffersDynamic; + uint32_t maxDescriptorSetSampledImages; + uint32_t maxDescriptorSetStorageImages; + uint32_t maxDescriptorSetInputAttachments; + uint32_t maxVertexInputAttributes; + uint32_t maxVertexInputBindings; + uint32_t maxVertexInputAttributeOffset; + uint32_t maxVertexInputBindingStride; + uint32_t maxVertexOutputComponents; + uint32_t maxTessellationGenerationLevel; + uint32_t maxTessellationPatchSize; + uint32_t maxTessellationControlPerVertexInputComponents; + uint32_t maxTessellationControlPerVertexOutputComponents; + uint32_t maxTessellationControlPerPatchOutputComponents; + uint32_t maxTessellationControlTotalOutputComponents; + uint32_t maxTessellationEvaluationInputComponents; + uint32_t maxTessellationEvaluationOutputComponents; + uint32_t maxGeometryShaderInvocations; + uint32_t maxGeometryInputComponents; + uint32_t maxGeometryOutputComponents; + uint32_t maxGeometryOutputVertices; + uint32_t maxGeometryTotalOutputComponents; + uint32_t maxFragmentInputComponents; + uint32_t maxFragmentOutputAttachments; + uint32_t maxFragmentDualSrcAttachments; + uint32_t maxFragmentCombinedOutputResources; + uint32_t maxComputeSharedMemorySize; + uint32_t maxComputeWorkGroupCount[3]; + uint32_t maxComputeWorkGroupInvocations; + uint32_t maxComputeWorkGroupSize[3]; + uint32_t subPixelPrecisionBits; + uint32_t subTexelPrecisionBits; + uint32_t mipmapPrecisionBits; + uint32_t maxDrawIndexedIndexValue; + uint32_t maxDrawIndirectCount; + float maxSamplerLodBias; + float maxSamplerAnisotropy; + uint32_t maxViewports; + uint32_t maxViewportDimensions[2]; + float viewportBoundsRange[2]; + uint32_t viewportSubPixelBits; + size_t minMemoryMapAlignment; + VkDeviceSize minTexelBufferOffsetAlignment; + VkDeviceSize minUniformBufferOffsetAlignment; + VkDeviceSize minStorageBufferOffsetAlignment; + int32_t minTexelOffset; + uint32_t maxTexelOffset; + int32_t minTexelGatherOffset; + uint32_t maxTexelGatherOffset; + float minInterpolationOffset; + float maxInterpolationOffset; + uint32_t subPixelInterpolationOffsetBits; + uint32_t maxFramebufferWidth; + uint32_t maxFramebufferHeight; + uint32_t maxFramebufferLayers; + VkSampleCountFlags framebufferColorSampleCounts; + VkSampleCountFlags framebufferDepthSampleCounts; + VkSampleCountFlags framebufferStencilSampleCounts; + VkSampleCountFlags framebufferNoAttachmentsSampleCounts; + uint32_t maxColorAttachments; + VkSampleCountFlags sampledImageColorSampleCounts; + VkSampleCountFlags sampledImageIntegerSampleCounts; + VkSampleCountFlags sampledImageDepthSampleCounts; + VkSampleCountFlags sampledImageStencilSampleCounts; + VkSampleCountFlags storageImageSampleCounts; + uint32_t maxSampleMaskWords; + VkBool32 timestampComputeAndGraphics; + float timestampPeriod; + uint32_t maxClipDistances; + uint32_t maxCullDistances; + uint32_t maxCombinedClipAndCullDistances; + uint32_t discreteQueuePriorities; + float pointSizeRange[2]; + float lineWidthRange[2]; + float pointSizeGranularity; + float lineWidthGranularity; + VkBool32 strictLines; + VkBool32 standardSampleLocations; + VkDeviceSize optimalBufferCopyOffsetAlignment; + VkDeviceSize optimalBufferCopyRowPitchAlignment; + VkDeviceSize nonCoherentAtomSize; +} VkPhysicalDeviceLimits; + +typedef struct VkPhysicalDeviceSparseProperties { + VkBool32 residencyStandard2DBlockShape; + VkBool32 residencyStandard2DMultisampleBlockShape; + VkBool32 residencyStandard3DBlockShape; + VkBool32 residencyAlignedMipSize; + VkBool32 residencyNonResidentStrict; +} VkPhysicalDeviceSparseProperties; + +typedef struct VkPhysicalDeviceProperties { + uint32_t apiVersion; + uint32_t driverVersion; + uint32_t vendorID; + uint32_t deviceID; + VkPhysicalDeviceType deviceType; + char deviceName[VK_MAX_PHYSICAL_DEVICE_NAME_SIZE]; + uint8_t pipelineCacheUUID[VK_UUID_SIZE]; + VkPhysicalDeviceLimits limits; + VkPhysicalDeviceSparseProperties sparseProperties; +} VkPhysicalDeviceProperties; + +typedef struct VkQueueFamilyProperties { + VkQueueFlags queueFlags; + uint32_t queueCount; + uint32_t timestampValidBits; + VkExtent3D minImageTransferGranularity; +} VkQueueFamilyProperties; + +typedef struct VkMemoryType { + VkMemoryPropertyFlags propertyFlags; + uint32_t heapIndex; +} VkMemoryType; + +typedef struct VkMemoryHeap { + VkDeviceSize size; + VkMemoryHeapFlags flags; +} VkMemoryHeap; + +typedef struct VkPhysicalDeviceMemoryProperties { + uint32_t memoryTypeCount; + VkMemoryType memoryTypes[VK_MAX_MEMORY_TYPES]; + uint32_t memoryHeapCount; + VkMemoryHeap memoryHeaps[VK_MAX_MEMORY_HEAPS]; +} VkPhysicalDeviceMemoryProperties; + +typedef void (VKAPI_PTR *PFN_vkVoidFunction)(void); +typedef struct VkDeviceQueueCreateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceQueueCreateFlags flags; + uint32_t queueFamilyIndex; + uint32_t queueCount; + const float* pQueuePriorities; +} VkDeviceQueueCreateInfo; + +typedef struct VkDeviceCreateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceCreateFlags flags; + uint32_t queueCreateInfoCount; + const VkDeviceQueueCreateInfo* pQueueCreateInfos; + uint32_t enabledLayerCount; + const char* const* ppEnabledLayerNames; + uint32_t enabledExtensionCount; + const char* const* ppEnabledExtensionNames; + const VkPhysicalDeviceFeatures* pEnabledFeatures; +} VkDeviceCreateInfo; + +typedef struct VkExtensionProperties { + char extensionName[VK_MAX_EXTENSION_NAME_SIZE]; + uint32_t specVersion; +} VkExtensionProperties; + +typedef struct VkLayerProperties { + char layerName[VK_MAX_EXTENSION_NAME_SIZE]; + uint32_t specVersion; + uint32_t implementationVersion; + char description[VK_MAX_DESCRIPTION_SIZE]; +} VkLayerProperties; + +typedef struct VkSubmitInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + const VkPipelineStageFlags* pWaitDstStageMask; + uint32_t commandBufferCount; + const VkCommandBuffer* pCommandBuffers; + uint32_t signalSemaphoreCount; + const VkSemaphore* pSignalSemaphores; +} VkSubmitInfo; + +typedef struct VkMemoryAllocateInfo { + VkStructureType sType; + const void* pNext; + VkDeviceSize allocationSize; + uint32_t memoryTypeIndex; +} VkMemoryAllocateInfo; + +typedef struct VkMappedMemoryRange { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkDeviceSize offset; + VkDeviceSize size; +} VkMappedMemoryRange; + +typedef struct VkMemoryRequirements { + VkDeviceSize size; + VkDeviceSize alignment; + uint32_t memoryTypeBits; +} VkMemoryRequirements; + +typedef struct VkSparseImageFormatProperties { + VkImageAspectFlags aspectMask; + VkExtent3D imageGranularity; + VkSparseImageFormatFlags flags; +} VkSparseImageFormatProperties; + +typedef struct VkSparseImageMemoryRequirements { + VkSparseImageFormatProperties formatProperties; + uint32_t imageMipTailFirstLod; + VkDeviceSize imageMipTailSize; + VkDeviceSize imageMipTailOffset; + VkDeviceSize imageMipTailStride; +} VkSparseImageMemoryRequirements; + +typedef struct VkSparseMemoryBind { + VkDeviceSize resourceOffset; + VkDeviceSize size; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + VkSparseMemoryBindFlags flags; +} VkSparseMemoryBind; + +typedef struct VkSparseBufferMemoryBindInfo { + VkBuffer buffer; + uint32_t bindCount; + const VkSparseMemoryBind* pBinds; +} VkSparseBufferMemoryBindInfo; + +typedef struct VkSparseImageOpaqueMemoryBindInfo { + VkImage image; + uint32_t bindCount; + const VkSparseMemoryBind* pBinds; +} VkSparseImageOpaqueMemoryBindInfo; + +typedef struct VkImageSubresource { + VkImageAspectFlags aspectMask; + uint32_t mipLevel; + uint32_t arrayLayer; +} VkImageSubresource; + +typedef struct VkOffset3D { + int32_t x; + int32_t y; + int32_t z; +} VkOffset3D; + +typedef struct VkSparseImageMemoryBind { + VkImageSubresource subresource; + VkOffset3D offset; + VkExtent3D extent; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + VkSparseMemoryBindFlags flags; +} VkSparseImageMemoryBind; + +typedef struct VkSparseImageMemoryBindInfo { + VkImage image; + uint32_t bindCount; + const VkSparseImageMemoryBind* pBinds; +} VkSparseImageMemoryBindInfo; + +typedef struct VkBindSparseInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + uint32_t bufferBindCount; + const VkSparseBufferMemoryBindInfo* pBufferBinds; + uint32_t imageOpaqueBindCount; + const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; + uint32_t imageBindCount; + const VkSparseImageMemoryBindInfo* pImageBinds; + uint32_t signalSemaphoreCount; + const VkSemaphore* pSignalSemaphores; +} VkBindSparseInfo; + +typedef struct VkFenceCreateInfo { + VkStructureType sType; + const void* pNext; + VkFenceCreateFlags flags; +} VkFenceCreateInfo; + +typedef struct VkSemaphoreCreateInfo { + VkStructureType sType; + const void* pNext; + VkSemaphoreCreateFlags flags; +} VkSemaphoreCreateInfo; + +typedef struct VkEventCreateInfo { + VkStructureType sType; + const void* pNext; + VkEventCreateFlags flags; +} VkEventCreateInfo; + +typedef struct VkQueryPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkQueryPoolCreateFlags flags; + VkQueryType queryType; + uint32_t queryCount; + VkQueryPipelineStatisticFlags pipelineStatistics; +} VkQueryPoolCreateInfo; + +typedef struct VkBufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkBufferCreateFlags flags; + VkDeviceSize size; + VkBufferUsageFlags usage; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; +} VkBufferCreateInfo; + +typedef struct VkBufferViewCreateInfo { + VkStructureType sType; + const void* pNext; + VkBufferViewCreateFlags flags; + VkBuffer buffer; + VkFormat format; + VkDeviceSize offset; + VkDeviceSize range; +} VkBufferViewCreateInfo; + +typedef struct VkImageCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageCreateFlags flags; + VkImageType imageType; + VkFormat format; + VkExtent3D extent; + uint32_t mipLevels; + uint32_t arrayLayers; + VkSampleCountFlagBits samples; + VkImageTiling tiling; + VkImageUsageFlags usage; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; + VkImageLayout initialLayout; +} VkImageCreateInfo; + +typedef struct VkSubresourceLayout { + VkDeviceSize offset; + VkDeviceSize size; + VkDeviceSize rowPitch; + VkDeviceSize arrayPitch; + VkDeviceSize depthPitch; +} VkSubresourceLayout; + +typedef struct VkComponentMapping { + VkComponentSwizzle r; + VkComponentSwizzle g; + VkComponentSwizzle b; + VkComponentSwizzle a; +} VkComponentMapping; + +typedef struct VkImageSubresourceRange { + VkImageAspectFlags aspectMask; + uint32_t baseMipLevel; + uint32_t levelCount; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkImageSubresourceRange; + +typedef struct VkImageViewCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageViewCreateFlags flags; + VkImage image; + VkImageViewType viewType; + VkFormat format; + VkComponentMapping components; + VkImageSubresourceRange subresourceRange; +} VkImageViewCreateInfo; + +typedef struct VkShaderModuleCreateInfo { + VkStructureType sType; + const void* pNext; + VkShaderModuleCreateFlags flags; + size_t codeSize; + const uint32_t* pCode; +} VkShaderModuleCreateInfo; + +typedef struct VkPipelineCacheCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCacheCreateFlags flags; + size_t initialDataSize; + const void* pInitialData; +} VkPipelineCacheCreateInfo; + +typedef struct VkSpecializationMapEntry { + uint32_t constantID; + uint32_t offset; + size_t size; +} VkSpecializationMapEntry; + +typedef struct VkSpecializationInfo { + uint32_t mapEntryCount; + const VkSpecializationMapEntry* pMapEntries; + size_t dataSize; + const void* pData; +} VkSpecializationInfo; + +typedef struct VkPipelineShaderStageCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineShaderStageCreateFlags flags; + VkShaderStageFlagBits stage; + VkShaderModule module; + const char* pName; + const VkSpecializationInfo* pSpecializationInfo; +} VkPipelineShaderStageCreateInfo; + +typedef struct VkVertexInputBindingDescription { + uint32_t binding; + uint32_t stride; + VkVertexInputRate inputRate; +} VkVertexInputBindingDescription; + +typedef struct VkVertexInputAttributeDescription { + uint32_t location; + uint32_t binding; + VkFormat format; + uint32_t offset; +} VkVertexInputAttributeDescription; + +typedef struct VkPipelineVertexInputStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineVertexInputStateCreateFlags flags; + uint32_t vertexBindingDescriptionCount; + const VkVertexInputBindingDescription* pVertexBindingDescriptions; + uint32_t vertexAttributeDescriptionCount; + const VkVertexInputAttributeDescription* pVertexAttributeDescriptions; +} VkPipelineVertexInputStateCreateInfo; + +typedef struct VkPipelineInputAssemblyStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineInputAssemblyStateCreateFlags flags; + VkPrimitiveTopology topology; + VkBool32 primitiveRestartEnable; +} VkPipelineInputAssemblyStateCreateInfo; + +typedef struct VkPipelineTessellationStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineTessellationStateCreateFlags flags; + uint32_t patchControlPoints; +} VkPipelineTessellationStateCreateInfo; + +typedef struct VkViewport { + float x; + float y; + float width; + float height; + float minDepth; + float maxDepth; +} VkViewport; + +typedef struct VkOffset2D { + int32_t x; + int32_t y; +} VkOffset2D; + +typedef struct VkExtent2D { + uint32_t width; + uint32_t height; +} VkExtent2D; + +typedef struct VkRect2D { + VkOffset2D offset; + VkExtent2D extent; +} VkRect2D; + +typedef struct VkPipelineViewportStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineViewportStateCreateFlags flags; + uint32_t viewportCount; + const VkViewport* pViewports; + uint32_t scissorCount; + const VkRect2D* pScissors; +} VkPipelineViewportStateCreateInfo; + +typedef struct VkPipelineRasterizationStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationStateCreateFlags flags; + VkBool32 depthClampEnable; + VkBool32 rasterizerDiscardEnable; + VkPolygonMode polygonMode; + VkCullModeFlags cullMode; + VkFrontFace frontFace; + VkBool32 depthBiasEnable; + float depthBiasConstantFactor; + float depthBiasClamp; + float depthBiasSlopeFactor; + float lineWidth; +} VkPipelineRasterizationStateCreateInfo; + +typedef struct VkPipelineMultisampleStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineMultisampleStateCreateFlags flags; + VkSampleCountFlagBits rasterizationSamples; + VkBool32 sampleShadingEnable; + float minSampleShading; + const VkSampleMask* pSampleMask; + VkBool32 alphaToCoverageEnable; + VkBool32 alphaToOneEnable; +} VkPipelineMultisampleStateCreateInfo; + +typedef struct VkStencilOpState { + VkStencilOp failOp; + VkStencilOp passOp; + VkStencilOp depthFailOp; + VkCompareOp compareOp; + uint32_t compareMask; + uint32_t writeMask; + uint32_t reference; +} VkStencilOpState; + +typedef struct VkPipelineDepthStencilStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineDepthStencilStateCreateFlags flags; + VkBool32 depthTestEnable; + VkBool32 depthWriteEnable; + VkCompareOp depthCompareOp; + VkBool32 depthBoundsTestEnable; + VkBool32 stencilTestEnable; + VkStencilOpState front; + VkStencilOpState back; + float minDepthBounds; + float maxDepthBounds; +} VkPipelineDepthStencilStateCreateInfo; + +typedef struct VkPipelineColorBlendAttachmentState { + VkBool32 blendEnable; + VkBlendFactor srcColorBlendFactor; + VkBlendFactor dstColorBlendFactor; + VkBlendOp colorBlendOp; + VkBlendFactor srcAlphaBlendFactor; + VkBlendFactor dstAlphaBlendFactor; + VkBlendOp alphaBlendOp; + VkColorComponentFlags colorWriteMask; +} VkPipelineColorBlendAttachmentState; + +typedef struct VkPipelineColorBlendStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineColorBlendStateCreateFlags flags; + VkBool32 logicOpEnable; + VkLogicOp logicOp; + uint32_t attachmentCount; + const VkPipelineColorBlendAttachmentState* pAttachments; + float blendConstants[4]; +} VkPipelineColorBlendStateCreateInfo; + +typedef struct VkPipelineDynamicStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineDynamicStateCreateFlags flags; + uint32_t dynamicStateCount; + const VkDynamicState* pDynamicStates; +} VkPipelineDynamicStateCreateInfo; + +typedef struct VkGraphicsPipelineCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + const VkPipelineVertexInputStateCreateInfo* pVertexInputState; + const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState; + const VkPipelineTessellationStateCreateInfo* pTessellationState; + const VkPipelineViewportStateCreateInfo* pViewportState; + const VkPipelineRasterizationStateCreateInfo* pRasterizationState; + const VkPipelineMultisampleStateCreateInfo* pMultisampleState; + const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState; + const VkPipelineColorBlendStateCreateInfo* pColorBlendState; + const VkPipelineDynamicStateCreateInfo* pDynamicState; + VkPipelineLayout layout; + VkRenderPass renderPass; + uint32_t subpass; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkGraphicsPipelineCreateInfo; + +typedef struct VkComputePipelineCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + VkPipelineShaderStageCreateInfo stage; + VkPipelineLayout layout; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkComputePipelineCreateInfo; + +typedef struct VkPushConstantRange { + VkShaderStageFlags stageFlags; + uint32_t offset; + uint32_t size; +} VkPushConstantRange; + +typedef struct VkPipelineLayoutCreateInfo { + VkStructureType sType; + const void* pNext; + VkPipelineLayoutCreateFlags flags; + uint32_t setLayoutCount; + const VkDescriptorSetLayout* pSetLayouts; + uint32_t pushConstantRangeCount; + const VkPushConstantRange* pPushConstantRanges; +} VkPipelineLayoutCreateInfo; + +typedef struct VkSamplerCreateInfo { + VkStructureType sType; + const void* pNext; + VkSamplerCreateFlags flags; + VkFilter magFilter; + VkFilter minFilter; + VkSamplerMipmapMode mipmapMode; + VkSamplerAddressMode addressModeU; + VkSamplerAddressMode addressModeV; + VkSamplerAddressMode addressModeW; + float mipLodBias; + VkBool32 anisotropyEnable; + float maxAnisotropy; + VkBool32 compareEnable; + VkCompareOp compareOp; + float minLod; + float maxLod; + VkBorderColor borderColor; + VkBool32 unnormalizedCoordinates; +} VkSamplerCreateInfo; + +typedef struct VkDescriptorSetLayoutBinding { + uint32_t binding; + VkDescriptorType descriptorType; + uint32_t descriptorCount; + VkShaderStageFlags stageFlags; + const VkSampler* pImmutableSamplers; +} VkDescriptorSetLayoutBinding; + +typedef struct VkDescriptorSetLayoutCreateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorSetLayoutCreateFlags flags; + uint32_t bindingCount; + const VkDescriptorSetLayoutBinding* pBindings; +} VkDescriptorSetLayoutCreateInfo; + +typedef struct VkDescriptorPoolSize { + VkDescriptorType type; + uint32_t descriptorCount; +} VkDescriptorPoolSize; + +typedef struct VkDescriptorPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorPoolCreateFlags flags; + uint32_t maxSets; + uint32_t poolSizeCount; + const VkDescriptorPoolSize* pPoolSizes; +} VkDescriptorPoolCreateInfo; + +typedef struct VkDescriptorSetAllocateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorPool descriptorPool; + uint32_t descriptorSetCount; + const VkDescriptorSetLayout* pSetLayouts; +} VkDescriptorSetAllocateInfo; + +typedef struct VkDescriptorImageInfo { + VkSampler sampler; + VkImageView imageView; + VkImageLayout imageLayout; +} VkDescriptorImageInfo; + +typedef struct VkDescriptorBufferInfo { + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize range; +} VkDescriptorBufferInfo; + +typedef struct VkWriteDescriptorSet { + VkStructureType sType; + const void* pNext; + VkDescriptorSet dstSet; + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; + VkDescriptorType descriptorType; + const VkDescriptorImageInfo* pImageInfo; + const VkDescriptorBufferInfo* pBufferInfo; + const VkBufferView* pTexelBufferView; +} VkWriteDescriptorSet; + +typedef struct VkCopyDescriptorSet { + VkStructureType sType; + const void* pNext; + VkDescriptorSet srcSet; + uint32_t srcBinding; + uint32_t srcArrayElement; + VkDescriptorSet dstSet; + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; +} VkCopyDescriptorSet; + +typedef struct VkFramebufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkFramebufferCreateFlags flags; + VkRenderPass renderPass; + uint32_t attachmentCount; + const VkImageView* pAttachments; + uint32_t width; + uint32_t height; + uint32_t layers; +} VkFramebufferCreateInfo; + +typedef struct VkAttachmentDescription { + VkAttachmentDescriptionFlags flags; + VkFormat format; + VkSampleCountFlagBits samples; + VkAttachmentLoadOp loadOp; + VkAttachmentStoreOp storeOp; + VkAttachmentLoadOp stencilLoadOp; + VkAttachmentStoreOp stencilStoreOp; + VkImageLayout initialLayout; + VkImageLayout finalLayout; +} VkAttachmentDescription; + +typedef struct VkAttachmentReference { + uint32_t attachment; + VkImageLayout layout; +} VkAttachmentReference; + +typedef struct VkSubpassDescription { + VkSubpassDescriptionFlags flags; + VkPipelineBindPoint pipelineBindPoint; + uint32_t inputAttachmentCount; + const VkAttachmentReference* pInputAttachments; + uint32_t colorAttachmentCount; + const VkAttachmentReference* pColorAttachments; + const VkAttachmentReference* pResolveAttachments; + const VkAttachmentReference* pDepthStencilAttachment; + uint32_t preserveAttachmentCount; + const uint32_t* pPreserveAttachments; +} VkSubpassDescription; + +typedef struct VkSubpassDependency { + uint32_t srcSubpass; + uint32_t dstSubpass; + VkPipelineStageFlags srcStageMask; + VkPipelineStageFlags dstStageMask; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkDependencyFlags dependencyFlags; +} VkSubpassDependency; + +typedef struct VkRenderPassCreateInfo { + VkStructureType sType; + const void* pNext; + VkRenderPassCreateFlags flags; + uint32_t attachmentCount; + const VkAttachmentDescription* pAttachments; + uint32_t subpassCount; + const VkSubpassDescription* pSubpasses; + uint32_t dependencyCount; + const VkSubpassDependency* pDependencies; +} VkRenderPassCreateInfo; + +typedef struct VkCommandPoolCreateInfo { + VkStructureType sType; + const void* pNext; + VkCommandPoolCreateFlags flags; + uint32_t queueFamilyIndex; +} VkCommandPoolCreateInfo; + +typedef struct VkCommandBufferAllocateInfo { + VkStructureType sType; + const void* pNext; + VkCommandPool commandPool; + VkCommandBufferLevel level; + uint32_t commandBufferCount; +} VkCommandBufferAllocateInfo; + +typedef struct VkCommandBufferInheritanceInfo { + VkStructureType sType; + const void* pNext; + VkRenderPass renderPass; + uint32_t subpass; + VkFramebuffer framebuffer; + VkBool32 occlusionQueryEnable; + VkQueryControlFlags queryFlags; + VkQueryPipelineStatisticFlags pipelineStatistics; +} VkCommandBufferInheritanceInfo; + +typedef struct VkCommandBufferBeginInfo { + VkStructureType sType; + const void* pNext; + VkCommandBufferUsageFlags flags; + const VkCommandBufferInheritanceInfo* pInheritanceInfo; +} VkCommandBufferBeginInfo; + +typedef struct VkBufferCopy { + VkDeviceSize srcOffset; + VkDeviceSize dstOffset; + VkDeviceSize size; +} VkBufferCopy; + +typedef struct VkImageSubresourceLayers { + VkImageAspectFlags aspectMask; + uint32_t mipLevel; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkImageSubresourceLayers; + +typedef struct VkImageCopy { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageCopy; + +typedef struct VkImageBlit { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffsets[2]; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffsets[2]; +} VkImageBlit; + +typedef struct VkBufferImageCopy { + VkDeviceSize bufferOffset; + uint32_t bufferRowLength; + uint32_t bufferImageHeight; + VkImageSubresourceLayers imageSubresource; + VkOffset3D imageOffset; + VkExtent3D imageExtent; +} VkBufferImageCopy; + +typedef union VkClearColorValue { + float float32[4]; + int32_t int32[4]; + uint32_t uint32[4]; +} VkClearColorValue; + +typedef struct VkClearDepthStencilValue { + float depth; + uint32_t stencil; +} VkClearDepthStencilValue; + +typedef union VkClearValue { + VkClearColorValue color; + VkClearDepthStencilValue depthStencil; +} VkClearValue; + +typedef struct VkClearAttachment { + VkImageAspectFlags aspectMask; + uint32_t colorAttachment; + VkClearValue clearValue; +} VkClearAttachment; + +typedef struct VkClearRect { + VkRect2D rect; + uint32_t baseArrayLayer; + uint32_t layerCount; +} VkClearRect; + +typedef struct VkImageResolve { + VkImageSubresourceLayers srcSubresource; + VkOffset3D srcOffset; + VkImageSubresourceLayers dstSubresource; + VkOffset3D dstOffset; + VkExtent3D extent; +} VkImageResolve; + +typedef struct VkMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; +} VkMemoryBarrier; + +typedef struct VkBufferMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + VkBuffer buffer; + VkDeviceSize offset; + VkDeviceSize size; +} VkBufferMemoryBarrier; + +typedef struct VkImageMemoryBarrier { + VkStructureType sType; + const void* pNext; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkImageLayout oldLayout; + VkImageLayout newLayout; + uint32_t srcQueueFamilyIndex; + uint32_t dstQueueFamilyIndex; + VkImage image; + VkImageSubresourceRange subresourceRange; +} VkImageMemoryBarrier; + +typedef struct VkRenderPassBeginInfo { + VkStructureType sType; + const void* pNext; + VkRenderPass renderPass; + VkFramebuffer framebuffer; + VkRect2D renderArea; + uint32_t clearValueCount; + const VkClearValue* pClearValues; +} VkRenderPassBeginInfo; + +typedef struct VkDispatchIndirectCommand { + uint32_t x; + uint32_t y; + uint32_t z; +} VkDispatchIndirectCommand; + +typedef struct VkDrawIndexedIndirectCommand { + uint32_t indexCount; + uint32_t instanceCount; + uint32_t firstIndex; + int32_t vertexOffset; + uint32_t firstInstance; +} VkDrawIndexedIndirectCommand; + +typedef struct VkDrawIndirectCommand { + uint32_t vertexCount; + uint32_t instanceCount; + uint32_t firstVertex; + uint32_t firstInstance; +} VkDrawIndirectCommand; + +typedef struct VkBaseOutStructure { + VkStructureType sType; + struct VkBaseOutStructure* pNext; +} VkBaseOutStructure; + +typedef struct VkBaseInStructure { + VkStructureType sType; + const struct VkBaseInStructure* pNext; +} VkBaseInStructure; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateInstance)(const VkInstanceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkInstance* pInstance); +typedef void (VKAPI_PTR *PFN_vkDestroyInstance)(VkInstance instance, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDevices)(VkInstance instance, uint32_t* pPhysicalDeviceCount, VkPhysicalDevice* pPhysicalDevices); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkImageFormatProperties* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties* pMemoryProperties); +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetInstanceProcAddr)(VkInstance instance, const char* pName); +typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_vkGetDeviceProcAddr)(VkDevice device, const char* pName); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDevice)(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDevice* pDevice); +typedef void (VKAPI_PTR *PFN_vkDestroyDevice)(VkDevice device, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceExtensionProperties)(const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceExtensionProperties)(VkPhysicalDevice physicalDevice, const char* pLayerName, uint32_t* pPropertyCount, VkExtensionProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceLayerProperties)(uint32_t* pPropertyCount, VkLayerProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateDeviceLayerProperties)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkLayerProperties* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue)(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue* pQueue); +typedef VkResult (VKAPI_PTR *PFN_vkQueueSubmit)(VkQueue queue, uint32_t submitCount, const VkSubmitInfo* pSubmits, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkQueueWaitIdle)(VkQueue queue); +typedef VkResult (VKAPI_PTR *PFN_vkDeviceWaitIdle)(VkDevice device); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateMemory)(VkDevice device, const VkMemoryAllocateInfo* pAllocateInfo, const VkAllocationCallbacks* pAllocator, VkDeviceMemory* pMemory); +typedef void (VKAPI_PTR *PFN_vkFreeMemory)(VkDevice device, VkDeviceMemory memory, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkMapMemory)(VkDevice device, VkDeviceMemory memory, VkDeviceSize offset, VkDeviceSize size, VkMemoryMapFlags flags, void** ppData); +typedef void (VKAPI_PTR *PFN_vkUnmapMemory)(VkDevice device, VkDeviceMemory memory); +typedef VkResult (VKAPI_PTR *PFN_vkFlushMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges); +typedef VkResult (VKAPI_PTR *PFN_vkInvalidateMappedMemoryRanges)(VkDevice device, uint32_t memoryRangeCount, const VkMappedMemoryRange* pMemoryRanges); +typedef void (VKAPI_PTR *PFN_vkGetDeviceMemoryCommitment)(VkDevice device, VkDeviceMemory memory, VkDeviceSize* pCommittedMemoryInBytes); +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory)(VkDevice device, VkBuffer buffer, VkDeviceMemory memory, VkDeviceSize memoryOffset); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory)(VkDevice device, VkImage image, VkDeviceMemory memory, VkDeviceSize memoryOffset); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements)(VkDevice device, VkBuffer buffer, VkMemoryRequirements* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements)(VkDevice device, VkImage image, VkMemoryRequirements* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements)(VkDevice device, VkImage image, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements* pSparseMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkSampleCountFlagBits samples, VkImageUsageFlags usage, VkImageTiling tiling, uint32_t* pPropertyCount, VkSparseImageFormatProperties* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkQueueBindSparse)(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo* pBindInfo, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkCreateFence)(VkDevice device, const VkFenceCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef void (VKAPI_PTR *PFN_vkDestroyFence)(VkDevice device, VkFence fence, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceStatus)(VkDevice device, VkFence fence); +typedef VkResult (VKAPI_PTR *PFN_vkWaitForFences)(VkDevice device, uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSemaphore)(VkDevice device, const VkSemaphoreCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSemaphore* pSemaphore); +typedef void (VKAPI_PTR *PFN_vkDestroySemaphore)(VkDevice device, VkSemaphore semaphore, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateEvent)(VkDevice device, const VkEventCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkEvent* pEvent); +typedef void (VKAPI_PTR *PFN_vkDestroyEvent)(VkDevice device, VkEvent event, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetEventStatus)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkSetEvent)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkResetEvent)(VkDevice device, VkEvent event); +typedef VkResult (VKAPI_PTR *PFN_vkCreateQueryPool)(VkDevice device, const VkQueryPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkQueryPool* pQueryPool); +typedef void (VKAPI_PTR *PFN_vkDestroyQueryPool)(VkDevice device, VkQueryPool queryPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetQueryPoolResults)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void* pData, VkDeviceSize stride, VkQueryResultFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkCreateBuffer)(VkDevice device, const VkBufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBuffer* pBuffer); +typedef void (VKAPI_PTR *PFN_vkDestroyBuffer)(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateBufferView)(VkDevice device, const VkBufferViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkBufferView* pView); +typedef void (VKAPI_PTR *PFN_vkDestroyBufferView)(VkDevice device, VkBufferView bufferView, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateImage)(VkDevice device, const VkImageCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImage* pImage); +typedef void (VKAPI_PTR *PFN_vkDestroyImage)(VkDevice device, VkImage image, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetImageSubresourceLayout)(VkDevice device, VkImage image, const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout); +typedef VkResult (VKAPI_PTR *PFN_vkCreateImageView)(VkDevice device, const VkImageViewCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkImageView* pView); +typedef void (VKAPI_PTR *PFN_vkDestroyImageView)(VkDevice device, VkImageView imageView, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateShaderModule)(VkDevice device, const VkShaderModuleCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkShaderModule* pShaderModule); +typedef void (VKAPI_PTR *PFN_vkDestroyShaderModule)(VkDevice device, VkShaderModule shaderModule, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineCache)(VkDevice device, const VkPipelineCacheCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineCache* pPipelineCache); +typedef void (VKAPI_PTR *PFN_vkDestroyPipelineCache)(VkDevice device, VkPipelineCache pipelineCache, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineCacheData)(VkDevice device, VkPipelineCache pipelineCache, size_t* pDataSize, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkMergePipelineCaches)(VkDevice device, VkPipelineCache dstCache, uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches); +typedef VkResult (VKAPI_PTR *PFN_vkCreateGraphicsPipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkGraphicsPipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef VkResult (VKAPI_PTR *PFN_vkCreateComputePipelines)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkComputePipelineCreateInfo* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef void (VKAPI_PTR *PFN_vkDestroyPipeline)(VkDevice device, VkPipeline pipeline, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreatePipelineLayout)(VkDevice device, const VkPipelineLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkPipelineLayout* pPipelineLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyPipelineLayout)(VkDevice device, VkPipelineLayout pipelineLayout, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSampler)(VkDevice device, const VkSamplerCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSampler* pSampler); +typedef void (VKAPI_PTR *PFN_vkDestroySampler)(VkDevice device, VkSampler sampler, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorSetLayout)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorSetLayout* pSetLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorSetLayout)(VkDevice device, VkDescriptorSetLayout descriptorSetLayout, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorPool)(VkDevice device, const VkDescriptorPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorPool* pDescriptorPool); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetDescriptorPool)(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorPoolResetFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateDescriptorSets)(VkDevice device, const VkDescriptorSetAllocateInfo* pAllocateInfo, VkDescriptorSet* pDescriptorSets); +typedef VkResult (VKAPI_PTR *PFN_vkFreeDescriptorSets)(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSets)(VkDevice device, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites, uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies); +typedef VkResult (VKAPI_PTR *PFN_vkCreateFramebuffer)(VkDevice device, const VkFramebufferCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkFramebuffer* pFramebuffer); +typedef void (VKAPI_PTR *PFN_vkDestroyFramebuffer)(VkDevice device, VkFramebuffer framebuffer, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass)(VkDevice device, const VkRenderPassCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); +typedef void (VKAPI_PTR *PFN_vkDestroyRenderPass)(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetRenderAreaGranularity)(VkDevice device, VkRenderPass renderPass, VkExtent2D* pGranularity); +typedef VkResult (VKAPI_PTR *PFN_vkCreateCommandPool)(VkDevice device, const VkCommandPoolCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkCommandPool* pCommandPool); +typedef void (VKAPI_PTR *PFN_vkDestroyCommandPool)(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkResetCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags); +typedef VkResult (VKAPI_PTR *PFN_vkAllocateCommandBuffers)(VkDevice device, const VkCommandBufferAllocateInfo* pAllocateInfo, VkCommandBuffer* pCommandBuffers); +typedef void (VKAPI_PTR *PFN_vkFreeCommandBuffers)(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers); +typedef VkResult (VKAPI_PTR *PFN_vkBeginCommandBuffer)(VkCommandBuffer commandBuffer, const VkCommandBufferBeginInfo* pBeginInfo); +typedef VkResult (VKAPI_PTR *PFN_vkEndCommandBuffer)(VkCommandBuffer commandBuffer); +typedef VkResult (VKAPI_PTR *PFN_vkResetCommandBuffer)(VkCommandBuffer commandBuffer, VkCommandBufferResetFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdBindPipeline)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipeline pipeline); +typedef void (VKAPI_PTR *PFN_vkCmdSetViewport)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewport* pViewports); +typedef void (VKAPI_PTR *PFN_vkCmdSetScissor)(VkCommandBuffer commandBuffer, uint32_t firstScissor, uint32_t scissorCount, const VkRect2D* pScissors); +typedef void (VKAPI_PTR *PFN_vkCmdSetLineWidth)(VkCommandBuffer commandBuffer, float lineWidth); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBias)(VkCommandBuffer commandBuffer, float depthBiasConstantFactor, float depthBiasClamp, float depthBiasSlopeFactor); +typedef void (VKAPI_PTR *PFN_vkCmdSetBlendConstants)(VkCommandBuffer commandBuffer, const float blendConstants[4]); +typedef void (VKAPI_PTR *PFN_vkCmdSetDepthBounds)(VkCommandBuffer commandBuffer, float minDepthBounds, float maxDepthBounds); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilCompareMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t compareMask); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilWriteMask)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t writeMask); +typedef void (VKAPI_PTR *PFN_vkCmdSetStencilReference)(VkCommandBuffer commandBuffer, VkStencilFaceFlags faceMask, uint32_t reference); +typedef void (VKAPI_PTR *PFN_vkCmdBindDescriptorSets)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t firstSet, uint32_t descriptorSetCount, const VkDescriptorSet* pDescriptorSets, uint32_t dynamicOffsetCount, const uint32_t* pDynamicOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdBindIndexBuffer)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkIndexType indexType); +typedef void (VKAPI_PTR *PFN_vkCmdBindVertexBuffers)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdDraw)(VkCommandBuffer commandBuffer, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexed)(VkCommandBuffer commandBuffer, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDispatch)(VkCommandBuffer commandBuffer, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchIndirect)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset); +typedef void (VKAPI_PTR *PFN_vkCmdCopyBuffer)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdBlitImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageBlit* pRegions, VkFilter filter); +typedef void (VKAPI_PTR *PFN_vkCmdCopyBufferToImage)(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkBufferImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdCopyImageToBuffer)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdUpdateBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdFillBuffer)(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize size, uint32_t data); +typedef void (VKAPI_PTR *PFN_vkCmdClearColorImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearColorValue* pColor, uint32_t rangeCount, const VkImageSubresourceRange* pRanges); +typedef void (VKAPI_PTR *PFN_vkCmdClearDepthStencilImage)(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout, const VkClearDepthStencilValue* pDepthStencil, uint32_t rangeCount, const VkImageSubresourceRange* pRanges); +typedef void (VKAPI_PTR *PFN_vkCmdClearAttachments)(VkCommandBuffer commandBuffer, uint32_t attachmentCount, const VkClearAttachment* pAttachments, uint32_t rectCount, const VkClearRect* pRects); +typedef void (VKAPI_PTR *PFN_vkCmdResolveImage)(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount, const VkImageResolve* pRegions); +typedef void (VKAPI_PTR *PFN_vkCmdSetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask); +typedef void (VKAPI_PTR *PFN_vkCmdResetEvent)(VkCommandBuffer commandBuffer, VkEvent event, VkPipelineStageFlags stageMask); +typedef void (VKAPI_PTR *PFN_vkCmdWaitEvents)(VkCommandBuffer commandBuffer, uint32_t eventCount, const VkEvent* pEvents, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers); +typedef void (VKAPI_PTR *PFN_vkCmdPipelineBarrier)(VkCommandBuffer commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags, uint32_t memoryBarrierCount, const VkMemoryBarrier* pMemoryBarriers, uint32_t bufferMemoryBarrierCount, const VkBufferMemoryBarrier* pBufferMemoryBarriers, uint32_t imageMemoryBarrierCount, const VkImageMemoryBarrier* pImageMemoryBarriers); +typedef void (VKAPI_PTR *PFN_vkCmdBeginQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdEndQuery)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query); +typedef void (VKAPI_PTR *PFN_vkCmdResetQueryPool)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); +typedef void (VKAPI_PTR *PFN_vkCmdWriteTimestamp)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkQueryPool queryPool, uint32_t query); +typedef void (VKAPI_PTR *PFN_vkCmdCopyQueryPoolResults)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount, VkBuffer dstBuffer, VkDeviceSize dstOffset, VkDeviceSize stride, VkQueryResultFlags flags); +typedef void (VKAPI_PTR *PFN_vkCmdPushConstants)(VkCommandBuffer commandBuffer, VkPipelineLayout layout, VkShaderStageFlags stageFlags, uint32_t offset, uint32_t size, const void* pValues); +typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, VkSubpassContents contents); +typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass)(VkCommandBuffer commandBuffer, VkSubpassContents contents); +typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdExecuteCommands)(VkCommandBuffer commandBuffer, uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance( + const VkInstanceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkInstance* pInstance); + +VKAPI_ATTR void VKAPI_CALL vkDestroyInstance( + VkInstance instance, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDevices( + VkInstance instance, + uint32_t* pPhysicalDeviceCount, + VkPhysicalDevice* pPhysicalDevices); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkImageTiling tiling, + VkImageUsageFlags usage, + VkImageCreateFlags flags, + VkImageFormatProperties* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties* pMemoryProperties); + +VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr( + VkInstance instance, + const char* pName); + +VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr( + VkDevice device, + const char* pName); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDevice( + VkPhysicalDevice physicalDevice, + const VkDeviceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDevice* pDevice); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDevice( + VkDevice device, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties( + const char* pLayerName, + uint32_t* pPropertyCount, + VkExtensionProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties( + VkPhysicalDevice physicalDevice, + const char* pLayerName, + uint32_t* pPropertyCount, + VkExtensionProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties( + uint32_t* pPropertyCount, + VkLayerProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkLayerProperties* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue( + VkDevice device, + uint32_t queueFamilyIndex, + uint32_t queueIndex, + VkQueue* pQueue); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueSubmit( + VkQueue queue, + uint32_t submitCount, + const VkSubmitInfo* pSubmits, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueWaitIdle( + VkQueue queue); + +VKAPI_ATTR VkResult VKAPI_CALL vkDeviceWaitIdle( + VkDevice device); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateMemory( + VkDevice device, + const VkMemoryAllocateInfo* pAllocateInfo, + const VkAllocationCallbacks* pAllocator, + VkDeviceMemory* pMemory); + +VKAPI_ATTR void VKAPI_CALL vkFreeMemory( + VkDevice device, + VkDeviceMemory memory, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkMapMemory( + VkDevice device, + VkDeviceMemory memory, + VkDeviceSize offset, + VkDeviceSize size, + VkMemoryMapFlags flags, + void** ppData); + +VKAPI_ATTR void VKAPI_CALL vkUnmapMemory( + VkDevice device, + VkDeviceMemory memory); + +VKAPI_ATTR VkResult VKAPI_CALL vkFlushMappedMemoryRanges( + VkDevice device, + uint32_t memoryRangeCount, + const VkMappedMemoryRange* pMemoryRanges); + +VKAPI_ATTR VkResult VKAPI_CALL vkInvalidateMappedMemoryRanges( + VkDevice device, + uint32_t memoryRangeCount, + const VkMappedMemoryRange* pMemoryRanges); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceMemoryCommitment( + VkDevice device, + VkDeviceMemory memory, + VkDeviceSize* pCommittedMemoryInBytes); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory( + VkDevice device, + VkBuffer buffer, + VkDeviceMemory memory, + VkDeviceSize memoryOffset); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory( + VkDevice device, + VkImage image, + VkDeviceMemory memory, + VkDeviceSize memoryOffset); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements( + VkDevice device, + VkBuffer buffer, + VkMemoryRequirements* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements( + VkDevice device, + VkImage image, + VkMemoryRequirements* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements( + VkDevice device, + VkImage image, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements* pSparseMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkSampleCountFlagBits samples, + VkImageUsageFlags usage, + VkImageTiling tiling, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueBindSparse( + VkQueue queue, + uint32_t bindInfoCount, + const VkBindSparseInfo* pBindInfo, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateFence( + VkDevice device, + const VkFenceCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR void VKAPI_CALL vkDestroyFence( + VkDevice device, + VkFence fence, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetFences( + VkDevice device, + uint32_t fenceCount, + const VkFence* pFences); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceStatus( + VkDevice device, + VkFence fence); + +VKAPI_ATTR VkResult VKAPI_CALL vkWaitForFences( + VkDevice device, + uint32_t fenceCount, + const VkFence* pFences, + VkBool32 waitAll, + uint64_t timeout); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSemaphore( + VkDevice device, + const VkSemaphoreCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSemaphore* pSemaphore); + +VKAPI_ATTR void VKAPI_CALL vkDestroySemaphore( + VkDevice device, + VkSemaphore semaphore, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateEvent( + VkDevice device, + const VkEventCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkEvent* pEvent); + +VKAPI_ATTR void VKAPI_CALL vkDestroyEvent( + VkDevice device, + VkEvent event, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetEventStatus( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkSetEvent( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetEvent( + VkDevice device, + VkEvent event); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateQueryPool( + VkDevice device, + const VkQueryPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkQueryPool* pQueryPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyQueryPool( + VkDevice device, + VkQueryPool queryPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetQueryPoolResults( + VkDevice device, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount, + size_t dataSize, + void* pData, + VkDeviceSize stride, + VkQueryResultFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateBuffer( + VkDevice device, + const VkBufferCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkBuffer* pBuffer); + +VKAPI_ATTR void VKAPI_CALL vkDestroyBuffer( + VkDevice device, + VkBuffer buffer, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateBufferView( + VkDevice device, + const VkBufferViewCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkBufferView* pView); + +VKAPI_ATTR void VKAPI_CALL vkDestroyBufferView( + VkDevice device, + VkBufferView bufferView, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateImage( + VkDevice device, + const VkImageCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkImage* pImage); + +VKAPI_ATTR void VKAPI_CALL vkDestroyImage( + VkDevice device, + VkImage image, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSubresourceLayout( + VkDevice device, + VkImage image, + const VkImageSubresource* pSubresource, + VkSubresourceLayout* pLayout); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateImageView( + VkDevice device, + const VkImageViewCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkImageView* pView); + +VKAPI_ATTR void VKAPI_CALL vkDestroyImageView( + VkDevice device, + VkImageView imageView, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateShaderModule( + VkDevice device, + const VkShaderModuleCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkShaderModule* pShaderModule); + +VKAPI_ATTR void VKAPI_CALL vkDestroyShaderModule( + VkDevice device, + VkShaderModule shaderModule, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineCache( + VkDevice device, + const VkPipelineCacheCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPipelineCache* pPipelineCache); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineCache( + VkDevice device, + VkPipelineCache pipelineCache, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineCacheData( + VkDevice device, + VkPipelineCache pipelineCache, + size_t* pDataSize, + void* pData); + +VKAPI_ATTR VkResult VKAPI_CALL vkMergePipelineCaches( + VkDevice device, + VkPipelineCache dstCache, + uint32_t srcCacheCount, + const VkPipelineCache* pSrcCaches); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateGraphicsPipelines( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkGraphicsPipelineCreateInfo* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateComputePipelines( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkComputePipelineCreateInfo* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipeline( + VkDevice device, + VkPipeline pipeline, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreatePipelineLayout( + VkDevice device, + const VkPipelineLayoutCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkPipelineLayout* pPipelineLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyPipelineLayout( + VkDevice device, + VkPipelineLayout pipelineLayout, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSampler( + VkDevice device, + const VkSamplerCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSampler* pSampler); + +VKAPI_ATTR void VKAPI_CALL vkDestroySampler( + VkDevice device, + VkSampler sampler, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorSetLayout( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorSetLayout* pSetLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorSetLayout( + VkDevice device, + VkDescriptorSetLayout descriptorSetLayout, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorPool( + VkDevice device, + const VkDescriptorPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorPool* pDescriptorPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorPool( + VkDevice device, + VkDescriptorPool descriptorPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetDescriptorPool( + VkDevice device, + VkDescriptorPool descriptorPool, + VkDescriptorPoolResetFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateDescriptorSets( + VkDevice device, + const VkDescriptorSetAllocateInfo* pAllocateInfo, + VkDescriptorSet* pDescriptorSets); + +VKAPI_ATTR VkResult VKAPI_CALL vkFreeDescriptorSets( + VkDevice device, + VkDescriptorPool descriptorPool, + uint32_t descriptorSetCount, + const VkDescriptorSet* pDescriptorSets); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSets( + VkDevice device, + uint32_t descriptorWriteCount, + const VkWriteDescriptorSet* pDescriptorWrites, + uint32_t descriptorCopyCount, + const VkCopyDescriptorSet* pDescriptorCopies); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateFramebuffer( + VkDevice device, + const VkFramebufferCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkFramebuffer* pFramebuffer); + +VKAPI_ATTR void VKAPI_CALL vkDestroyFramebuffer( + VkDevice device, + VkFramebuffer framebuffer, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass( + VkDevice device, + const VkRenderPassCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkRenderPass* pRenderPass); + +VKAPI_ATTR void VKAPI_CALL vkDestroyRenderPass( + VkDevice device, + VkRenderPass renderPass, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkGetRenderAreaGranularity( + VkDevice device, + VkRenderPass renderPass, + VkExtent2D* pGranularity); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateCommandPool( + VkDevice device, + const VkCommandPoolCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkCommandPool* pCommandPool); + +VKAPI_ATTR void VKAPI_CALL vkDestroyCommandPool( + VkDevice device, + VkCommandPool commandPool, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandPool( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolResetFlags flags); + +VKAPI_ATTR VkResult VKAPI_CALL vkAllocateCommandBuffers( + VkDevice device, + const VkCommandBufferAllocateInfo* pAllocateInfo, + VkCommandBuffer* pCommandBuffers); + +VKAPI_ATTR void VKAPI_CALL vkFreeCommandBuffers( + VkDevice device, + VkCommandPool commandPool, + uint32_t commandBufferCount, + const VkCommandBuffer* pCommandBuffers); + +VKAPI_ATTR VkResult VKAPI_CALL vkBeginCommandBuffer( + VkCommandBuffer commandBuffer, + const VkCommandBufferBeginInfo* pBeginInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkEndCommandBuffer( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR VkResult VKAPI_CALL vkResetCommandBuffer( + VkCommandBuffer commandBuffer, + VkCommandBufferResetFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindPipeline( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipeline pipeline); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewport( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkViewport* pViewports); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetScissor( + VkCommandBuffer commandBuffer, + uint32_t firstScissor, + uint32_t scissorCount, + const VkRect2D* pScissors); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetLineWidth( + VkCommandBuffer commandBuffer, + float lineWidth); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBias( + VkCommandBuffer commandBuffer, + float depthBiasConstantFactor, + float depthBiasClamp, + float depthBiasSlopeFactor); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetBlendConstants( + VkCommandBuffer commandBuffer, + const float blendConstants[4]); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDepthBounds( + VkCommandBuffer commandBuffer, + float minDepthBounds, + float maxDepthBounds); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilCompareMask( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t compareMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilWriteMask( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t writeMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetStencilReference( + VkCommandBuffer commandBuffer, + VkStencilFaceFlags faceMask, + uint32_t reference); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindDescriptorSets( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t firstSet, + uint32_t descriptorSetCount, + const VkDescriptorSet* pDescriptorSets, + uint32_t dynamicOffsetCount, + const uint32_t* pDynamicOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindIndexBuffer( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkIndexType indexType); + +VKAPI_ATTR void VKAPI_CALL vkCmdBindVertexBuffers( + VkCommandBuffer commandBuffer, + uint32_t firstBinding, + uint32_t bindingCount, + const VkBuffer* pBuffers, + const VkDeviceSize* pOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdDraw( + VkCommandBuffer commandBuffer, + uint32_t vertexCount, + uint32_t instanceCount, + uint32_t firstVertex, + uint32_t firstInstance); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexed( + VkCommandBuffer commandBuffer, + uint32_t indexCount, + uint32_t instanceCount, + uint32_t firstIndex, + int32_t vertexOffset, + uint32_t firstInstance); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatch( + VkCommandBuffer commandBuffer, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchIndirect( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBuffer( + VkCommandBuffer commandBuffer, + VkBuffer srcBuffer, + VkBuffer dstBuffer, + uint32_t regionCount, + const VkBufferCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdBlitImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageBlit* pRegions, + VkFilter filter); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyBufferToImage( + VkCommandBuffer commandBuffer, + VkBuffer srcBuffer, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkBufferImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyImageToBuffer( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkBuffer dstBuffer, + uint32_t regionCount, + const VkBufferImageCopy* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdUpdateBuffer( + VkCommandBuffer commandBuffer, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize dataSize, + const void* pData); + +VKAPI_ATTR void VKAPI_CALL vkCmdFillBuffer( + VkCommandBuffer commandBuffer, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize size, + uint32_t data); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearColorImage( + VkCommandBuffer commandBuffer, + VkImage image, + VkImageLayout imageLayout, + const VkClearColorValue* pColor, + uint32_t rangeCount, + const VkImageSubresourceRange* pRanges); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearDepthStencilImage( + VkCommandBuffer commandBuffer, + VkImage image, + VkImageLayout imageLayout, + const VkClearDepthStencilValue* pDepthStencil, + uint32_t rangeCount, + const VkImageSubresourceRange* pRanges); + +VKAPI_ATTR void VKAPI_CALL vkCmdClearAttachments( + VkCommandBuffer commandBuffer, + uint32_t attachmentCount, + const VkClearAttachment* pAttachments, + uint32_t rectCount, + const VkClearRect* pRects); + +VKAPI_ATTR void VKAPI_CALL vkCmdResolveImage( + VkCommandBuffer commandBuffer, + VkImage srcImage, + VkImageLayout srcImageLayout, + VkImage dstImage, + VkImageLayout dstImageLayout, + uint32_t regionCount, + const VkImageResolve* pRegions); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetEvent( + VkCommandBuffer commandBuffer, + VkEvent event, + VkPipelineStageFlags stageMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdResetEvent( + VkCommandBuffer commandBuffer, + VkEvent event, + VkPipelineStageFlags stageMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdWaitEvents( + VkCommandBuffer commandBuffer, + uint32_t eventCount, + const VkEvent* pEvents, + VkPipelineStageFlags srcStageMask, + VkPipelineStageFlags dstStageMask, + uint32_t memoryBarrierCount, + const VkMemoryBarrier* pMemoryBarriers, + uint32_t bufferMemoryBarrierCount, + const VkBufferMemoryBarrier* pBufferMemoryBarriers, + uint32_t imageMemoryBarrierCount, + const VkImageMemoryBarrier* pImageMemoryBarriers); + +VKAPI_ATTR void VKAPI_CALL vkCmdPipelineBarrier( + VkCommandBuffer commandBuffer, + VkPipelineStageFlags srcStageMask, + VkPipelineStageFlags dstStageMask, + VkDependencyFlags dependencyFlags, + uint32_t memoryBarrierCount, + const VkMemoryBarrier* pMemoryBarriers, + uint32_t bufferMemoryBarrierCount, + const VkBufferMemoryBarrier* pBufferMemoryBarriers, + uint32_t imageMemoryBarrierCount, + const VkImageMemoryBarrier* pImageMemoryBarriers); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginQuery( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query, + VkQueryControlFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndQuery( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query); + +VKAPI_ATTR void VKAPI_CALL vkCmdResetQueryPool( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount); + +VKAPI_ATTR void VKAPI_CALL vkCmdWriteTimestamp( + VkCommandBuffer commandBuffer, + VkPipelineStageFlagBits pipelineStage, + VkQueryPool queryPool, + uint32_t query); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyQueryPoolResults( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + VkDeviceSize stride, + VkQueryResultFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushConstants( + VkCommandBuffer commandBuffer, + VkPipelineLayout layout, + VkShaderStageFlags stageFlags, + uint32_t offset, + uint32_t size, + const void* pValues); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass( + VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + VkSubpassContents contents); + +VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass( + VkCommandBuffer commandBuffer, + VkSubpassContents contents); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR void VKAPI_CALL vkCmdExecuteCommands( + VkCommandBuffer commandBuffer, + uint32_t commandBufferCount, + const VkCommandBuffer* pCommandBuffers); +#endif + + +#define VK_VERSION_1_1 1 +// Vulkan 1.1 version number +#define VK_API_VERSION_1_1 VK_MAKE_VERSION(1, 1, 0)// Patch version should always be set to 0 + +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSamplerYcbcrConversion) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDescriptorUpdateTemplate) +#define VK_MAX_DEVICE_GROUP_SIZE 32 +#define VK_LUID_SIZE 8 +#define VK_QUEUE_FAMILY_EXTERNAL (~0U-1) + +typedef enum VkPointClippingBehavior { + VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES = 0, + VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY = 1, + VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES, + VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY_KHR = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY, + VK_POINT_CLIPPING_BEHAVIOR_BEGIN_RANGE = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES, + VK_POINT_CLIPPING_BEHAVIOR_END_RANGE = VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY, + VK_POINT_CLIPPING_BEHAVIOR_RANGE_SIZE = (VK_POINT_CLIPPING_BEHAVIOR_USER_CLIP_PLANES_ONLY - VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES + 1), + VK_POINT_CLIPPING_BEHAVIOR_MAX_ENUM = 0x7FFFFFFF +} VkPointClippingBehavior; + +typedef enum VkTessellationDomainOrigin { + VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT = 0, + VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT = 1, + VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT, + VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT_KHR = VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT, + VK_TESSELLATION_DOMAIN_ORIGIN_BEGIN_RANGE = VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT, + VK_TESSELLATION_DOMAIN_ORIGIN_END_RANGE = VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT, + VK_TESSELLATION_DOMAIN_ORIGIN_RANGE_SIZE = (VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT - VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT + 1), + VK_TESSELLATION_DOMAIN_ORIGIN_MAX_ENUM = 0x7FFFFFFF +} VkTessellationDomainOrigin; + +typedef enum VkSamplerYcbcrModelConversion { + VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY = 0, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY = 1, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709 = 2, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601 = 3, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020 = 4, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_BEGIN_RANGE = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_END_RANGE = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020, + VK_SAMPLER_YCBCR_MODEL_CONVERSION_RANGE_SIZE = (VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020 - VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY + 1), + VK_SAMPLER_YCBCR_MODEL_CONVERSION_MAX_ENUM = 0x7FFFFFFF +} VkSamplerYcbcrModelConversion; + +typedef enum VkSamplerYcbcrRange { + VK_SAMPLER_YCBCR_RANGE_ITU_FULL = 0, + VK_SAMPLER_YCBCR_RANGE_ITU_NARROW = 1, + VK_SAMPLER_YCBCR_RANGE_ITU_FULL_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_FULL, + VK_SAMPLER_YCBCR_RANGE_ITU_NARROW_KHR = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW, + VK_SAMPLER_YCBCR_RANGE_BEGIN_RANGE = VK_SAMPLER_YCBCR_RANGE_ITU_FULL, + VK_SAMPLER_YCBCR_RANGE_END_RANGE = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW, + VK_SAMPLER_YCBCR_RANGE_RANGE_SIZE = (VK_SAMPLER_YCBCR_RANGE_ITU_NARROW - VK_SAMPLER_YCBCR_RANGE_ITU_FULL + 1), + VK_SAMPLER_YCBCR_RANGE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerYcbcrRange; + +typedef enum VkChromaLocation { + VK_CHROMA_LOCATION_COSITED_EVEN = 0, + VK_CHROMA_LOCATION_MIDPOINT = 1, + VK_CHROMA_LOCATION_COSITED_EVEN_KHR = VK_CHROMA_LOCATION_COSITED_EVEN, + VK_CHROMA_LOCATION_MIDPOINT_KHR = VK_CHROMA_LOCATION_MIDPOINT, + VK_CHROMA_LOCATION_BEGIN_RANGE = VK_CHROMA_LOCATION_COSITED_EVEN, + VK_CHROMA_LOCATION_END_RANGE = VK_CHROMA_LOCATION_MIDPOINT, + VK_CHROMA_LOCATION_RANGE_SIZE = (VK_CHROMA_LOCATION_MIDPOINT - VK_CHROMA_LOCATION_COSITED_EVEN + 1), + VK_CHROMA_LOCATION_MAX_ENUM = 0x7FFFFFFF +} VkChromaLocation; + +typedef enum VkDescriptorUpdateTemplateType { + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET = 0, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR = 1, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET_KHR = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_BEGIN_RANGE = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_END_RANGE = VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_RANGE_SIZE = (VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET - VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET + 1), + VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorUpdateTemplateType; + +typedef enum VkSubgroupFeatureFlagBits { + VK_SUBGROUP_FEATURE_BASIC_BIT = 0x00000001, + VK_SUBGROUP_FEATURE_VOTE_BIT = 0x00000002, + VK_SUBGROUP_FEATURE_ARITHMETIC_BIT = 0x00000004, + VK_SUBGROUP_FEATURE_BALLOT_BIT = 0x00000008, + VK_SUBGROUP_FEATURE_SHUFFLE_BIT = 0x00000010, + VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT = 0x00000020, + VK_SUBGROUP_FEATURE_CLUSTERED_BIT = 0x00000040, + VK_SUBGROUP_FEATURE_QUAD_BIT = 0x00000080, + VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV = 0x00000100, + VK_SUBGROUP_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSubgroupFeatureFlagBits; +typedef VkFlags VkSubgroupFeatureFlags; + +typedef enum VkPeerMemoryFeatureFlagBits { + VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT = 0x00000001, + VK_PEER_MEMORY_FEATURE_COPY_DST_BIT = 0x00000002, + VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT = 0x00000004, + VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT = 0x00000008, + VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT_KHR = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT, + VK_PEER_MEMORY_FEATURE_COPY_DST_BIT_KHR = VK_PEER_MEMORY_FEATURE_COPY_DST_BIT, + VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT_KHR = VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT, + VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT_KHR = VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT, + VK_PEER_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkPeerMemoryFeatureFlagBits; +typedef VkFlags VkPeerMemoryFeatureFlags; + +typedef enum VkMemoryAllocateFlagBits { + VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT = 0x00000001, + VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT = 0x00000002, + VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT = 0x00000004, + VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT, + VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT, + VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT, + VK_MEMORY_ALLOCATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkMemoryAllocateFlagBits; +typedef VkFlags VkMemoryAllocateFlags; +typedef VkFlags VkCommandPoolTrimFlags; +typedef VkFlags VkDescriptorUpdateTemplateCreateFlags; + +typedef enum VkExternalMemoryHandleTypeFlagBits { + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT = 0x00000008, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT = 0x00000010, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT = 0x00000020, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT = 0x00000040, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT = 0x00000200, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID = 0x00000400, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT = 0x00000080, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT = 0x00000100, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT_KHR = VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalMemoryHandleTypeFlagBits; +typedef VkFlags VkExternalMemoryHandleTypeFlags; + +typedef enum VkExternalMemoryFeatureFlagBits { + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT = 0x00000001, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT = 0x00000002, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT = 0x00000004, + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT, + VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalMemoryFeatureFlagBits; +typedef VkFlags VkExternalMemoryFeatureFlags; + +typedef enum VkExternalFenceHandleTypeFlagBits { + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, + VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT = 0x00000008, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR = VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT, + VK_EXTERNAL_FENCE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalFenceHandleTypeFlagBits; +typedef VkFlags VkExternalFenceHandleTypeFlags; + +typedef enum VkExternalFenceFeatureFlagBits { + VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT = 0x00000001, + VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT = 0x00000002, + VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT, + VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_FENCE_FEATURE_IMPORTABLE_BIT, + VK_EXTERNAL_FENCE_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalFenceFeatureFlagBits; +typedef VkFlags VkExternalFenceFeatureFlags; + +typedef enum VkFenceImportFlagBits { + VK_FENCE_IMPORT_TEMPORARY_BIT = 0x00000001, + VK_FENCE_IMPORT_TEMPORARY_BIT_KHR = VK_FENCE_IMPORT_TEMPORARY_BIT, + VK_FENCE_IMPORT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkFenceImportFlagBits; +typedef VkFlags VkFenceImportFlags; + +typedef enum VkSemaphoreImportFlagBits { + VK_SEMAPHORE_IMPORT_TEMPORARY_BIT = 0x00000001, + VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT, + VK_SEMAPHORE_IMPORT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSemaphoreImportFlagBits; +typedef VkFlags VkSemaphoreImportFlags; + +typedef enum VkExternalSemaphoreHandleTypeFlagBits { + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT = 0x00000001, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x00000002, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x00000004, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT = 0x00000008, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT = 0x00000010, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT, + VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalSemaphoreHandleTypeFlagBits; +typedef VkFlags VkExternalSemaphoreHandleTypeFlags; + +typedef enum VkExternalSemaphoreFeatureFlagBits { + VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT = 0x00000001, + VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT = 0x00000002, + VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT, + VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR = VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT, + VK_EXTERNAL_SEMAPHORE_FEATURE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkExternalSemaphoreFeatureFlagBits; +typedef VkFlags VkExternalSemaphoreFeatureFlags; +typedef struct VkPhysicalDeviceSubgroupProperties { + VkStructureType sType; + void* pNext; + uint32_t subgroupSize; + VkShaderStageFlags supportedStages; + VkSubgroupFeatureFlags supportedOperations; + VkBool32 quadOperationsInAllStages; +} VkPhysicalDeviceSubgroupProperties; + +typedef struct VkBindBufferMemoryInfo { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; +} VkBindBufferMemoryInfo; + +typedef struct VkBindImageMemoryInfo { + VkStructureType sType; + const void* pNext; + VkImage image; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; +} VkBindImageMemoryInfo; + +typedef struct VkPhysicalDevice16BitStorageFeatures { + VkStructureType sType; + void* pNext; + VkBool32 storageBuffer16BitAccess; + VkBool32 uniformAndStorageBuffer16BitAccess; + VkBool32 storagePushConstant16; + VkBool32 storageInputOutput16; +} VkPhysicalDevice16BitStorageFeatures; + +typedef struct VkMemoryDedicatedRequirements { + VkStructureType sType; + void* pNext; + VkBool32 prefersDedicatedAllocation; + VkBool32 requiresDedicatedAllocation; +} VkMemoryDedicatedRequirements; + +typedef struct VkMemoryDedicatedAllocateInfo { + VkStructureType sType; + const void* pNext; + VkImage image; + VkBuffer buffer; +} VkMemoryDedicatedAllocateInfo; + +typedef struct VkMemoryAllocateFlagsInfo { + VkStructureType sType; + const void* pNext; + VkMemoryAllocateFlags flags; + uint32_t deviceMask; +} VkMemoryAllocateFlagsInfo; + +typedef struct VkDeviceGroupRenderPassBeginInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceMask; + uint32_t deviceRenderAreaCount; + const VkRect2D* pDeviceRenderAreas; +} VkDeviceGroupRenderPassBeginInfo; + +typedef struct VkDeviceGroupCommandBufferBeginInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceMask; +} VkDeviceGroupCommandBufferBeginInfo; + +typedef struct VkDeviceGroupSubmitInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const uint32_t* pWaitSemaphoreDeviceIndices; + uint32_t commandBufferCount; + const uint32_t* pCommandBufferDeviceMasks; + uint32_t signalSemaphoreCount; + const uint32_t* pSignalSemaphoreDeviceIndices; +} VkDeviceGroupSubmitInfo; + +typedef struct VkDeviceGroupBindSparseInfo { + VkStructureType sType; + const void* pNext; + uint32_t resourceDeviceIndex; + uint32_t memoryDeviceIndex; +} VkDeviceGroupBindSparseInfo; + +typedef struct VkBindBufferMemoryDeviceGroupInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; +} VkBindBufferMemoryDeviceGroupInfo; + +typedef struct VkBindImageMemoryDeviceGroupInfo { + VkStructureType sType; + const void* pNext; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; + uint32_t splitInstanceBindRegionCount; + const VkRect2D* pSplitInstanceBindRegions; +} VkBindImageMemoryDeviceGroupInfo; + +typedef struct VkPhysicalDeviceGroupProperties { + VkStructureType sType; + void* pNext; + uint32_t physicalDeviceCount; + VkPhysicalDevice physicalDevices[VK_MAX_DEVICE_GROUP_SIZE]; + VkBool32 subsetAllocation; +} VkPhysicalDeviceGroupProperties; + +typedef struct VkDeviceGroupDeviceCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t physicalDeviceCount; + const VkPhysicalDevice* pPhysicalDevices; +} VkDeviceGroupDeviceCreateInfo; + +typedef struct VkBufferMemoryRequirementsInfo2 { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; +} VkBufferMemoryRequirementsInfo2; + +typedef struct VkImageMemoryRequirementsInfo2 { + VkStructureType sType; + const void* pNext; + VkImage image; +} VkImageMemoryRequirementsInfo2; + +typedef struct VkImageSparseMemoryRequirementsInfo2 { + VkStructureType sType; + const void* pNext; + VkImage image; +} VkImageSparseMemoryRequirementsInfo2; + +typedef struct VkMemoryRequirements2 { + VkStructureType sType; + void* pNext; + VkMemoryRequirements memoryRequirements; +} VkMemoryRequirements2; + +typedef VkMemoryRequirements2 VkMemoryRequirements2KHR; + +typedef struct VkSparseImageMemoryRequirements2 { + VkStructureType sType; + void* pNext; + VkSparseImageMemoryRequirements memoryRequirements; +} VkSparseImageMemoryRequirements2; + +typedef struct VkPhysicalDeviceFeatures2 { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceFeatures features; +} VkPhysicalDeviceFeatures2; + +typedef struct VkPhysicalDeviceProperties2 { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceProperties properties; +} VkPhysicalDeviceProperties2; + +typedef struct VkFormatProperties2 { + VkStructureType sType; + void* pNext; + VkFormatProperties formatProperties; +} VkFormatProperties2; + +typedef struct VkImageFormatProperties2 { + VkStructureType sType; + void* pNext; + VkImageFormatProperties imageFormatProperties; +} VkImageFormatProperties2; + +typedef struct VkPhysicalDeviceImageFormatInfo2 { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkImageType type; + VkImageTiling tiling; + VkImageUsageFlags usage; + VkImageCreateFlags flags; +} VkPhysicalDeviceImageFormatInfo2; + +typedef struct VkQueueFamilyProperties2 { + VkStructureType sType; + void* pNext; + VkQueueFamilyProperties queueFamilyProperties; +} VkQueueFamilyProperties2; + +typedef struct VkPhysicalDeviceMemoryProperties2 { + VkStructureType sType; + void* pNext; + VkPhysicalDeviceMemoryProperties memoryProperties; +} VkPhysicalDeviceMemoryProperties2; + +typedef struct VkSparseImageFormatProperties2 { + VkStructureType sType; + void* pNext; + VkSparseImageFormatProperties properties; +} VkSparseImageFormatProperties2; + +typedef struct VkPhysicalDeviceSparseImageFormatInfo2 { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkImageType type; + VkSampleCountFlagBits samples; + VkImageUsageFlags usage; + VkImageTiling tiling; +} VkPhysicalDeviceSparseImageFormatInfo2; + +typedef struct VkPhysicalDevicePointClippingProperties { + VkStructureType sType; + void* pNext; + VkPointClippingBehavior pointClippingBehavior; +} VkPhysicalDevicePointClippingProperties; + +typedef struct VkInputAttachmentAspectReference { + uint32_t subpass; + uint32_t inputAttachmentIndex; + VkImageAspectFlags aspectMask; +} VkInputAttachmentAspectReference; + +typedef struct VkRenderPassInputAttachmentAspectCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t aspectReferenceCount; + const VkInputAttachmentAspectReference* pAspectReferences; +} VkRenderPassInputAttachmentAspectCreateInfo; + +typedef struct VkImageViewUsageCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageUsageFlags usage; +} VkImageViewUsageCreateInfo; + +typedef struct VkPipelineTessellationDomainOriginStateCreateInfo { + VkStructureType sType; + const void* pNext; + VkTessellationDomainOrigin domainOrigin; +} VkPipelineTessellationDomainOriginStateCreateInfo; + +typedef struct VkRenderPassMultiviewCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t subpassCount; + const uint32_t* pViewMasks; + uint32_t dependencyCount; + const int32_t* pViewOffsets; + uint32_t correlationMaskCount; + const uint32_t* pCorrelationMasks; +} VkRenderPassMultiviewCreateInfo; + +typedef struct VkPhysicalDeviceMultiviewFeatures { + VkStructureType sType; + void* pNext; + VkBool32 multiview; + VkBool32 multiviewGeometryShader; + VkBool32 multiviewTessellationShader; +} VkPhysicalDeviceMultiviewFeatures; + +typedef struct VkPhysicalDeviceMultiviewProperties { + VkStructureType sType; + void* pNext; + uint32_t maxMultiviewViewCount; + uint32_t maxMultiviewInstanceIndex; +} VkPhysicalDeviceMultiviewProperties; + +typedef struct VkPhysicalDeviceVariablePointersFeatures { + VkStructureType sType; + void* pNext; + VkBool32 variablePointersStorageBuffer; + VkBool32 variablePointers; +} VkPhysicalDeviceVariablePointersFeatures; + +typedef VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointerFeatures; + +typedef struct VkPhysicalDeviceProtectedMemoryFeatures { + VkStructureType sType; + void* pNext; + VkBool32 protectedMemory; +} VkPhysicalDeviceProtectedMemoryFeatures; + +typedef struct VkPhysicalDeviceProtectedMemoryProperties { + VkStructureType sType; + void* pNext; + VkBool32 protectedNoFault; +} VkPhysicalDeviceProtectedMemoryProperties; + +typedef struct VkDeviceQueueInfo2 { + VkStructureType sType; + const void* pNext; + VkDeviceQueueCreateFlags flags; + uint32_t queueFamilyIndex; + uint32_t queueIndex; +} VkDeviceQueueInfo2; + +typedef struct VkProtectedSubmitInfo { + VkStructureType sType; + const void* pNext; + VkBool32 protectedSubmit; +} VkProtectedSubmitInfo; + +typedef struct VkSamplerYcbcrConversionCreateInfo { + VkStructureType sType; + const void* pNext; + VkFormat format; + VkSamplerYcbcrModelConversion ycbcrModel; + VkSamplerYcbcrRange ycbcrRange; + VkComponentMapping components; + VkChromaLocation xChromaOffset; + VkChromaLocation yChromaOffset; + VkFilter chromaFilter; + VkBool32 forceExplicitReconstruction; +} VkSamplerYcbcrConversionCreateInfo; + +typedef struct VkSamplerYcbcrConversionInfo { + VkStructureType sType; + const void* pNext; + VkSamplerYcbcrConversion conversion; +} VkSamplerYcbcrConversionInfo; + +typedef struct VkBindImagePlaneMemoryInfo { + VkStructureType sType; + const void* pNext; + VkImageAspectFlagBits planeAspect; +} VkBindImagePlaneMemoryInfo; + +typedef struct VkImagePlaneMemoryRequirementsInfo { + VkStructureType sType; + const void* pNext; + VkImageAspectFlagBits planeAspect; +} VkImagePlaneMemoryRequirementsInfo; + +typedef struct VkPhysicalDeviceSamplerYcbcrConversionFeatures { + VkStructureType sType; + void* pNext; + VkBool32 samplerYcbcrConversion; +} VkPhysicalDeviceSamplerYcbcrConversionFeatures; + +typedef struct VkSamplerYcbcrConversionImageFormatProperties { + VkStructureType sType; + void* pNext; + uint32_t combinedImageSamplerDescriptorCount; +} VkSamplerYcbcrConversionImageFormatProperties; + +typedef struct VkDescriptorUpdateTemplateEntry { + uint32_t dstBinding; + uint32_t dstArrayElement; + uint32_t descriptorCount; + VkDescriptorType descriptorType; + size_t offset; + size_t stride; +} VkDescriptorUpdateTemplateEntry; + +typedef struct VkDescriptorUpdateTemplateCreateInfo { + VkStructureType sType; + const void* pNext; + VkDescriptorUpdateTemplateCreateFlags flags; + uint32_t descriptorUpdateEntryCount; + const VkDescriptorUpdateTemplateEntry* pDescriptorUpdateEntries; + VkDescriptorUpdateTemplateType templateType; + VkDescriptorSetLayout descriptorSetLayout; + VkPipelineBindPoint pipelineBindPoint; + VkPipelineLayout pipelineLayout; + uint32_t set; +} VkDescriptorUpdateTemplateCreateInfo; + +typedef struct VkExternalMemoryProperties { + VkExternalMemoryFeatureFlags externalMemoryFeatures; + VkExternalMemoryHandleTypeFlags exportFromImportedHandleTypes; + VkExternalMemoryHandleTypeFlags compatibleHandleTypes; +} VkExternalMemoryProperties; + +typedef struct VkPhysicalDeviceExternalImageFormatInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalImageFormatInfo; + +typedef struct VkExternalImageFormatProperties { + VkStructureType sType; + void* pNext; + VkExternalMemoryProperties externalMemoryProperties; +} VkExternalImageFormatProperties; + +typedef struct VkPhysicalDeviceExternalBufferInfo { + VkStructureType sType; + const void* pNext; + VkBufferCreateFlags flags; + VkBufferUsageFlags usage; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalBufferInfo; + +typedef struct VkExternalBufferProperties { + VkStructureType sType; + void* pNext; + VkExternalMemoryProperties externalMemoryProperties; +} VkExternalBufferProperties; + +typedef struct VkPhysicalDeviceIDProperties { + VkStructureType sType; + void* pNext; + uint8_t deviceUUID[VK_UUID_SIZE]; + uint8_t driverUUID[VK_UUID_SIZE]; + uint8_t deviceLUID[VK_LUID_SIZE]; + uint32_t deviceNodeMask; + VkBool32 deviceLUIDValid; +} VkPhysicalDeviceIDProperties; + +typedef struct VkExternalMemoryImageCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExternalMemoryImageCreateInfo; + +typedef struct VkExternalMemoryBufferCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExternalMemoryBufferCreateInfo; + +typedef struct VkExportMemoryAllocateInfo { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlags handleTypes; +} VkExportMemoryAllocateInfo; + +typedef struct VkPhysicalDeviceExternalFenceInfo { + VkStructureType sType; + const void* pNext; + VkExternalFenceHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalFenceInfo; + +typedef struct VkExternalFenceProperties { + VkStructureType sType; + void* pNext; + VkExternalFenceHandleTypeFlags exportFromImportedHandleTypes; + VkExternalFenceHandleTypeFlags compatibleHandleTypes; + VkExternalFenceFeatureFlags externalFenceFeatures; +} VkExternalFenceProperties; + +typedef struct VkExportFenceCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalFenceHandleTypeFlags handleTypes; +} VkExportFenceCreateInfo; + +typedef struct VkExportSemaphoreCreateInfo { + VkStructureType sType; + const void* pNext; + VkExternalSemaphoreHandleTypeFlags handleTypes; +} VkExportSemaphoreCreateInfo; + +typedef struct VkPhysicalDeviceExternalSemaphoreInfo { + VkStructureType sType; + const void* pNext; + VkExternalSemaphoreHandleTypeFlagBits handleType; +} VkPhysicalDeviceExternalSemaphoreInfo; + +typedef struct VkExternalSemaphoreProperties { + VkStructureType sType; + void* pNext; + VkExternalSemaphoreHandleTypeFlags exportFromImportedHandleTypes; + VkExternalSemaphoreHandleTypeFlags compatibleHandleTypes; + VkExternalSemaphoreFeatureFlags externalSemaphoreFeatures; +} VkExternalSemaphoreProperties; + +typedef struct VkPhysicalDeviceMaintenance3Properties { + VkStructureType sType; + void* pNext; + uint32_t maxPerSetDescriptors; + VkDeviceSize maxMemoryAllocationSize; +} VkPhysicalDeviceMaintenance3Properties; + +typedef struct VkDescriptorSetLayoutSupport { + VkStructureType sType; + void* pNext; + VkBool32 supported; +} VkDescriptorSetLayoutSupport; + +typedef struct VkPhysicalDeviceShaderDrawParametersFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderDrawParameters; +} VkPhysicalDeviceShaderDrawParametersFeatures; + +typedef VkPhysicalDeviceShaderDrawParametersFeatures VkPhysicalDeviceShaderDrawParameterFeatures; + +typedef VkResult (VKAPI_PTR *PFN_vkEnumerateInstanceVersion)(uint32_t* pApiVersion); +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos); +typedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeatures)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); +typedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMask)(VkCommandBuffer commandBuffer, uint32_t deviceMask); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchBase)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroups)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2)(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2)(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties); +typedef void (VKAPI_PTR *PFN_vkTrimCommandPool)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags); +typedef void (VKAPI_PTR *PFN_vkGetDeviceQueue2)(VkDevice device, const VkDeviceQueueInfo2* pQueueInfo, VkQueue* pQueue); +typedef VkResult (VKAPI_PTR *PFN_vkCreateSamplerYcbcrConversion)(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion); +typedef void (VKAPI_PTR *PFN_vkDestroySamplerYcbcrConversion)(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplate)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplate)(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplate)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalFenceProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphoreProperties)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties); +typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutSupport)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceVersion( + uint32_t* pApiVersion); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2( + VkDevice device, + uint32_t bindInfoCount, + const VkBindBufferMemoryInfo* pBindInfos); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2( + VkDevice device, + uint32_t bindInfoCount, + const VkBindImageMemoryInfo* pBindInfos); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeatures( + VkDevice device, + uint32_t heapIndex, + uint32_t localDeviceIndex, + uint32_t remoteDeviceIndex, + VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMask( + VkCommandBuffer commandBuffer, + uint32_t deviceMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBase( + VkCommandBuffer commandBuffer, + uint32_t baseGroupX, + uint32_t baseGroupY, + uint32_t baseGroupZ, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); + +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroups( + VkInstance instance, + uint32_t* pPhysicalDeviceGroupCount, + VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2( + VkDevice device, + const VkImageMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2( + VkDevice device, + const VkBufferMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2( + VkDevice device, + const VkImageSparseMemoryRequirementsInfo2* pInfo, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures2* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties2* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties2* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, + VkImageFormatProperties2* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties2* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties2* pMemoryProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties2* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkTrimCommandPool( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolTrimFlags flags); + +VKAPI_ATTR void VKAPI_CALL vkGetDeviceQueue2( + VkDevice device, + const VkDeviceQueueInfo2* pQueueInfo, + VkQueue* pQueue); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversion( + VkDevice device, + const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSamplerYcbcrConversion* pYcbcrConversion); + +VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversion( + VkDevice device, + VkSamplerYcbcrConversion ycbcrConversion, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplate( + VkDevice device, + const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplate( + VkDevice device, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplate( + VkDevice device, + VkDescriptorSet descriptorSet, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const void* pData); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, + VkExternalBufferProperties* pExternalBufferProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFenceProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, + VkExternalFenceProperties* pExternalFenceProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphoreProperties( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, + VkExternalSemaphoreProperties* pExternalSemaphoreProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupport( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + VkDescriptorSetLayoutSupport* pSupport); +#endif + + +#define VK_VERSION_1_2 1 +// Vulkan 1.2 version number +#define VK_API_VERSION_1_2 VK_MAKE_VERSION(1, 2, 0)// Patch version should always be set to 0 + +typedef uint64_t VkDeviceAddress; +#define VK_MAX_DRIVER_NAME_SIZE 256 +#define VK_MAX_DRIVER_INFO_SIZE 256 + +typedef enum VkDriverId { + VK_DRIVER_ID_AMD_PROPRIETARY = 1, + VK_DRIVER_ID_AMD_OPEN_SOURCE = 2, + VK_DRIVER_ID_MESA_RADV = 3, + VK_DRIVER_ID_NVIDIA_PROPRIETARY = 4, + VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS = 5, + VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA = 6, + VK_DRIVER_ID_IMAGINATION_PROPRIETARY = 7, + VK_DRIVER_ID_QUALCOMM_PROPRIETARY = 8, + VK_DRIVER_ID_ARM_PROPRIETARY = 9, + VK_DRIVER_ID_GOOGLE_SWIFTSHADER = 10, + VK_DRIVER_ID_GGP_PROPRIETARY = 11, + VK_DRIVER_ID_BROADCOM_PROPRIETARY = 12, + VK_DRIVER_ID_AMD_PROPRIETARY_KHR = VK_DRIVER_ID_AMD_PROPRIETARY, + VK_DRIVER_ID_AMD_OPEN_SOURCE_KHR = VK_DRIVER_ID_AMD_OPEN_SOURCE, + VK_DRIVER_ID_MESA_RADV_KHR = VK_DRIVER_ID_MESA_RADV, + VK_DRIVER_ID_NVIDIA_PROPRIETARY_KHR = VK_DRIVER_ID_NVIDIA_PROPRIETARY, + VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS_KHR = VK_DRIVER_ID_INTEL_PROPRIETARY_WINDOWS, + VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA_KHR = VK_DRIVER_ID_INTEL_OPEN_SOURCE_MESA, + VK_DRIVER_ID_IMAGINATION_PROPRIETARY_KHR = VK_DRIVER_ID_IMAGINATION_PROPRIETARY, + VK_DRIVER_ID_QUALCOMM_PROPRIETARY_KHR = VK_DRIVER_ID_QUALCOMM_PROPRIETARY, + VK_DRIVER_ID_ARM_PROPRIETARY_KHR = VK_DRIVER_ID_ARM_PROPRIETARY, + VK_DRIVER_ID_GOOGLE_SWIFTSHADER_KHR = VK_DRIVER_ID_GOOGLE_SWIFTSHADER, + VK_DRIVER_ID_GGP_PROPRIETARY_KHR = VK_DRIVER_ID_GGP_PROPRIETARY, + VK_DRIVER_ID_BROADCOM_PROPRIETARY_KHR = VK_DRIVER_ID_BROADCOM_PROPRIETARY, + VK_DRIVER_ID_BEGIN_RANGE = VK_DRIVER_ID_AMD_PROPRIETARY, + VK_DRIVER_ID_END_RANGE = VK_DRIVER_ID_BROADCOM_PROPRIETARY, + VK_DRIVER_ID_RANGE_SIZE = (VK_DRIVER_ID_BROADCOM_PROPRIETARY - VK_DRIVER_ID_AMD_PROPRIETARY + 1), + VK_DRIVER_ID_MAX_ENUM = 0x7FFFFFFF +} VkDriverId; + +typedef enum VkShaderFloatControlsIndependence { + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY = 0, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL = 1, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE = 2, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_ALL, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE_KHR = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_BEGIN_RANGE = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_END_RANGE = VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE, + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_RANGE_SIZE = (VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_NONE - VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_32_BIT_ONLY + 1), + VK_SHADER_FLOAT_CONTROLS_INDEPENDENCE_MAX_ENUM = 0x7FFFFFFF +} VkShaderFloatControlsIndependence; + +typedef enum VkSamplerReductionMode { + VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE = 0, + VK_SAMPLER_REDUCTION_MODE_MIN = 1, + VK_SAMPLER_REDUCTION_MODE_MAX = 2, + VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT = VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE, + VK_SAMPLER_REDUCTION_MODE_MIN_EXT = VK_SAMPLER_REDUCTION_MODE_MIN, + VK_SAMPLER_REDUCTION_MODE_MAX_EXT = VK_SAMPLER_REDUCTION_MODE_MAX, + VK_SAMPLER_REDUCTION_MODE_BEGIN_RANGE = VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE, + VK_SAMPLER_REDUCTION_MODE_END_RANGE = VK_SAMPLER_REDUCTION_MODE_MAX, + VK_SAMPLER_REDUCTION_MODE_RANGE_SIZE = (VK_SAMPLER_REDUCTION_MODE_MAX - VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE + 1), + VK_SAMPLER_REDUCTION_MODE_MAX_ENUM = 0x7FFFFFFF +} VkSamplerReductionMode; + +typedef enum VkSemaphoreType { + VK_SEMAPHORE_TYPE_BINARY = 0, + VK_SEMAPHORE_TYPE_TIMELINE = 1, + VK_SEMAPHORE_TYPE_BINARY_KHR = VK_SEMAPHORE_TYPE_BINARY, + VK_SEMAPHORE_TYPE_TIMELINE_KHR = VK_SEMAPHORE_TYPE_TIMELINE, + VK_SEMAPHORE_TYPE_BEGIN_RANGE = VK_SEMAPHORE_TYPE_BINARY, + VK_SEMAPHORE_TYPE_END_RANGE = VK_SEMAPHORE_TYPE_TIMELINE, + VK_SEMAPHORE_TYPE_RANGE_SIZE = (VK_SEMAPHORE_TYPE_TIMELINE - VK_SEMAPHORE_TYPE_BINARY + 1), + VK_SEMAPHORE_TYPE_MAX_ENUM = 0x7FFFFFFF +} VkSemaphoreType; + +typedef enum VkResolveModeFlagBits { + VK_RESOLVE_MODE_NONE = 0, + VK_RESOLVE_MODE_SAMPLE_ZERO_BIT = 0x00000001, + VK_RESOLVE_MODE_AVERAGE_BIT = 0x00000002, + VK_RESOLVE_MODE_MIN_BIT = 0x00000004, + VK_RESOLVE_MODE_MAX_BIT = 0x00000008, + VK_RESOLVE_MODE_NONE_KHR = VK_RESOLVE_MODE_NONE, + VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR = VK_RESOLVE_MODE_SAMPLE_ZERO_BIT, + VK_RESOLVE_MODE_AVERAGE_BIT_KHR = VK_RESOLVE_MODE_AVERAGE_BIT, + VK_RESOLVE_MODE_MIN_BIT_KHR = VK_RESOLVE_MODE_MIN_BIT, + VK_RESOLVE_MODE_MAX_BIT_KHR = VK_RESOLVE_MODE_MAX_BIT, + VK_RESOLVE_MODE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkResolveModeFlagBits; +typedef VkFlags VkResolveModeFlags; + +typedef enum VkDescriptorBindingFlagBits { + VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT = 0x00000001, + VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT = 0x00000002, + VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT = 0x00000004, + VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT = 0x00000008, + VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT = VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT, + VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT = VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT, + VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT = VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT, + VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT = VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT, + VK_DESCRIPTOR_BINDING_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkDescriptorBindingFlagBits; +typedef VkFlags VkDescriptorBindingFlags; + +typedef enum VkSemaphoreWaitFlagBits { + VK_SEMAPHORE_WAIT_ANY_BIT = 0x00000001, + VK_SEMAPHORE_WAIT_ANY_BIT_KHR = VK_SEMAPHORE_WAIT_ANY_BIT, + VK_SEMAPHORE_WAIT_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VkSemaphoreWaitFlagBits; +typedef VkFlags VkSemaphoreWaitFlags; +typedef struct VkPhysicalDeviceVulkan11Features { + VkStructureType sType; + void* pNext; + VkBool32 storageBuffer16BitAccess; + VkBool32 uniformAndStorageBuffer16BitAccess; + VkBool32 storagePushConstant16; + VkBool32 storageInputOutput16; + VkBool32 multiview; + VkBool32 multiviewGeometryShader; + VkBool32 multiviewTessellationShader; + VkBool32 variablePointersStorageBuffer; + VkBool32 variablePointers; + VkBool32 protectedMemory; + VkBool32 samplerYcbcrConversion; + VkBool32 shaderDrawParameters; +} VkPhysicalDeviceVulkan11Features; + +typedef struct VkPhysicalDeviceVulkan11Properties { + VkStructureType sType; + void* pNext; + uint8_t deviceUUID[VK_UUID_SIZE]; + uint8_t driverUUID[VK_UUID_SIZE]; + uint8_t deviceLUID[VK_LUID_SIZE]; + uint32_t deviceNodeMask; + VkBool32 deviceLUIDValid; + uint32_t subgroupSize; + VkShaderStageFlags subgroupSupportedStages; + VkSubgroupFeatureFlags subgroupSupportedOperations; + VkBool32 subgroupQuadOperationsInAllStages; + VkPointClippingBehavior pointClippingBehavior; + uint32_t maxMultiviewViewCount; + uint32_t maxMultiviewInstanceIndex; + VkBool32 protectedNoFault; + uint32_t maxPerSetDescriptors; + VkDeviceSize maxMemoryAllocationSize; +} VkPhysicalDeviceVulkan11Properties; + +typedef struct VkPhysicalDeviceVulkan12Features { + VkStructureType sType; + void* pNext; + VkBool32 samplerMirrorClampToEdge; + VkBool32 drawIndirectCount; + VkBool32 storageBuffer8BitAccess; + VkBool32 uniformAndStorageBuffer8BitAccess; + VkBool32 storagePushConstant8; + VkBool32 shaderBufferInt64Atomics; + VkBool32 shaderSharedInt64Atomics; + VkBool32 shaderFloat16; + VkBool32 shaderInt8; + VkBool32 descriptorIndexing; + VkBool32 shaderInputAttachmentArrayDynamicIndexing; + VkBool32 shaderUniformTexelBufferArrayDynamicIndexing; + VkBool32 shaderStorageTexelBufferArrayDynamicIndexing; + VkBool32 shaderUniformBufferArrayNonUniformIndexing; + VkBool32 shaderSampledImageArrayNonUniformIndexing; + VkBool32 shaderStorageBufferArrayNonUniformIndexing; + VkBool32 shaderStorageImageArrayNonUniformIndexing; + VkBool32 shaderInputAttachmentArrayNonUniformIndexing; + VkBool32 shaderUniformTexelBufferArrayNonUniformIndexing; + VkBool32 shaderStorageTexelBufferArrayNonUniformIndexing; + VkBool32 descriptorBindingUniformBufferUpdateAfterBind; + VkBool32 descriptorBindingSampledImageUpdateAfterBind; + VkBool32 descriptorBindingStorageImageUpdateAfterBind; + VkBool32 descriptorBindingStorageBufferUpdateAfterBind; + VkBool32 descriptorBindingUniformTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingStorageTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingUpdateUnusedWhilePending; + VkBool32 descriptorBindingPartiallyBound; + VkBool32 descriptorBindingVariableDescriptorCount; + VkBool32 runtimeDescriptorArray; + VkBool32 samplerFilterMinmax; + VkBool32 scalarBlockLayout; + VkBool32 imagelessFramebuffer; + VkBool32 uniformBufferStandardLayout; + VkBool32 shaderSubgroupExtendedTypes; + VkBool32 separateDepthStencilLayouts; + VkBool32 hostQueryReset; + VkBool32 timelineSemaphore; + VkBool32 bufferDeviceAddress; + VkBool32 bufferDeviceAddressCaptureReplay; + VkBool32 bufferDeviceAddressMultiDevice; + VkBool32 vulkanMemoryModel; + VkBool32 vulkanMemoryModelDeviceScope; + VkBool32 vulkanMemoryModelAvailabilityVisibilityChains; + VkBool32 shaderOutputViewportIndex; + VkBool32 shaderOutputLayer; + VkBool32 subgroupBroadcastDynamicId; +} VkPhysicalDeviceVulkan12Features; + +typedef struct VkConformanceVersion { + uint8_t major; + uint8_t minor; + uint8_t subminor; + uint8_t patch; +} VkConformanceVersion; + +typedef struct VkPhysicalDeviceVulkan12Properties { + VkStructureType sType; + void* pNext; + VkDriverId driverID; + char driverName[VK_MAX_DRIVER_NAME_SIZE]; + char driverInfo[VK_MAX_DRIVER_INFO_SIZE]; + VkConformanceVersion conformanceVersion; + VkShaderFloatControlsIndependence denormBehaviorIndependence; + VkShaderFloatControlsIndependence roundingModeIndependence; + VkBool32 shaderSignedZeroInfNanPreserveFloat16; + VkBool32 shaderSignedZeroInfNanPreserveFloat32; + VkBool32 shaderSignedZeroInfNanPreserveFloat64; + VkBool32 shaderDenormPreserveFloat16; + VkBool32 shaderDenormPreserveFloat32; + VkBool32 shaderDenormPreserveFloat64; + VkBool32 shaderDenormFlushToZeroFloat16; + VkBool32 shaderDenormFlushToZeroFloat32; + VkBool32 shaderDenormFlushToZeroFloat64; + VkBool32 shaderRoundingModeRTEFloat16; + VkBool32 shaderRoundingModeRTEFloat32; + VkBool32 shaderRoundingModeRTEFloat64; + VkBool32 shaderRoundingModeRTZFloat16; + VkBool32 shaderRoundingModeRTZFloat32; + VkBool32 shaderRoundingModeRTZFloat64; + uint32_t maxUpdateAfterBindDescriptorsInAllPools; + VkBool32 shaderUniformBufferArrayNonUniformIndexingNative; + VkBool32 shaderSampledImageArrayNonUniformIndexingNative; + VkBool32 shaderStorageBufferArrayNonUniformIndexingNative; + VkBool32 shaderStorageImageArrayNonUniformIndexingNative; + VkBool32 shaderInputAttachmentArrayNonUniformIndexingNative; + VkBool32 robustBufferAccessUpdateAfterBind; + VkBool32 quadDivergentImplicitLod; + uint32_t maxPerStageDescriptorUpdateAfterBindSamplers; + uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages; + uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments; + uint32_t maxPerStageUpdateAfterBindResources; + uint32_t maxDescriptorSetUpdateAfterBindSamplers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindSampledImages; + uint32_t maxDescriptorSetUpdateAfterBindStorageImages; + uint32_t maxDescriptorSetUpdateAfterBindInputAttachments; + VkResolveModeFlags supportedDepthResolveModes; + VkResolveModeFlags supportedStencilResolveModes; + VkBool32 independentResolveNone; + VkBool32 independentResolve; + VkBool32 filterMinmaxSingleComponentFormats; + VkBool32 filterMinmaxImageComponentMapping; + uint64_t maxTimelineSemaphoreValueDifference; + VkSampleCountFlags framebufferIntegerColorSampleCounts; +} VkPhysicalDeviceVulkan12Properties; + +typedef struct VkImageFormatListCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t viewFormatCount; + const VkFormat* pViewFormats; +} VkImageFormatListCreateInfo; + +typedef struct VkAttachmentDescription2 { + VkStructureType sType; + const void* pNext; + VkAttachmentDescriptionFlags flags; + VkFormat format; + VkSampleCountFlagBits samples; + VkAttachmentLoadOp loadOp; + VkAttachmentStoreOp storeOp; + VkAttachmentLoadOp stencilLoadOp; + VkAttachmentStoreOp stencilStoreOp; + VkImageLayout initialLayout; + VkImageLayout finalLayout; +} VkAttachmentDescription2; + +typedef struct VkAttachmentReference2 { + VkStructureType sType; + const void* pNext; + uint32_t attachment; + VkImageLayout layout; + VkImageAspectFlags aspectMask; +} VkAttachmentReference2; + +typedef struct VkSubpassDescription2 { + VkStructureType sType; + const void* pNext; + VkSubpassDescriptionFlags flags; + VkPipelineBindPoint pipelineBindPoint; + uint32_t viewMask; + uint32_t inputAttachmentCount; + const VkAttachmentReference2* pInputAttachments; + uint32_t colorAttachmentCount; + const VkAttachmentReference2* pColorAttachments; + const VkAttachmentReference2* pResolveAttachments; + const VkAttachmentReference2* pDepthStencilAttachment; + uint32_t preserveAttachmentCount; + const uint32_t* pPreserveAttachments; +} VkSubpassDescription2; + +typedef struct VkSubpassDependency2 { + VkStructureType sType; + const void* pNext; + uint32_t srcSubpass; + uint32_t dstSubpass; + VkPipelineStageFlags srcStageMask; + VkPipelineStageFlags dstStageMask; + VkAccessFlags srcAccessMask; + VkAccessFlags dstAccessMask; + VkDependencyFlags dependencyFlags; + int32_t viewOffset; +} VkSubpassDependency2; + +typedef struct VkRenderPassCreateInfo2 { + VkStructureType sType; + const void* pNext; + VkRenderPassCreateFlags flags; + uint32_t attachmentCount; + const VkAttachmentDescription2* pAttachments; + uint32_t subpassCount; + const VkSubpassDescription2* pSubpasses; + uint32_t dependencyCount; + const VkSubpassDependency2* pDependencies; + uint32_t correlatedViewMaskCount; + const uint32_t* pCorrelatedViewMasks; +} VkRenderPassCreateInfo2; + +typedef struct VkSubpassBeginInfo { + VkStructureType sType; + const void* pNext; + VkSubpassContents contents; +} VkSubpassBeginInfo; + +typedef struct VkSubpassEndInfo { + VkStructureType sType; + const void* pNext; +} VkSubpassEndInfo; + +typedef struct VkPhysicalDevice8BitStorageFeatures { + VkStructureType sType; + void* pNext; + VkBool32 storageBuffer8BitAccess; + VkBool32 uniformAndStorageBuffer8BitAccess; + VkBool32 storagePushConstant8; +} VkPhysicalDevice8BitStorageFeatures; + +typedef struct VkPhysicalDeviceDriverProperties { + VkStructureType sType; + void* pNext; + VkDriverId driverID; + char driverName[VK_MAX_DRIVER_NAME_SIZE]; + char driverInfo[VK_MAX_DRIVER_INFO_SIZE]; + VkConformanceVersion conformanceVersion; +} VkPhysicalDeviceDriverProperties; + +typedef struct VkPhysicalDeviceShaderAtomicInt64Features { + VkStructureType sType; + void* pNext; + VkBool32 shaderBufferInt64Atomics; + VkBool32 shaderSharedInt64Atomics; +} VkPhysicalDeviceShaderAtomicInt64Features; + +typedef struct VkPhysicalDeviceShaderFloat16Int8Features { + VkStructureType sType; + void* pNext; + VkBool32 shaderFloat16; + VkBool32 shaderInt8; +} VkPhysicalDeviceShaderFloat16Int8Features; + +typedef struct VkPhysicalDeviceFloatControlsProperties { + VkStructureType sType; + void* pNext; + VkShaderFloatControlsIndependence denormBehaviorIndependence; + VkShaderFloatControlsIndependence roundingModeIndependence; + VkBool32 shaderSignedZeroInfNanPreserveFloat16; + VkBool32 shaderSignedZeroInfNanPreserveFloat32; + VkBool32 shaderSignedZeroInfNanPreserveFloat64; + VkBool32 shaderDenormPreserveFloat16; + VkBool32 shaderDenormPreserveFloat32; + VkBool32 shaderDenormPreserveFloat64; + VkBool32 shaderDenormFlushToZeroFloat16; + VkBool32 shaderDenormFlushToZeroFloat32; + VkBool32 shaderDenormFlushToZeroFloat64; + VkBool32 shaderRoundingModeRTEFloat16; + VkBool32 shaderRoundingModeRTEFloat32; + VkBool32 shaderRoundingModeRTEFloat64; + VkBool32 shaderRoundingModeRTZFloat16; + VkBool32 shaderRoundingModeRTZFloat32; + VkBool32 shaderRoundingModeRTZFloat64; +} VkPhysicalDeviceFloatControlsProperties; + +typedef struct VkDescriptorSetLayoutBindingFlagsCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t bindingCount; + const VkDescriptorBindingFlags* pBindingFlags; +} VkDescriptorSetLayoutBindingFlagsCreateInfo; + +typedef struct VkPhysicalDeviceDescriptorIndexingFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderInputAttachmentArrayDynamicIndexing; + VkBool32 shaderUniformTexelBufferArrayDynamicIndexing; + VkBool32 shaderStorageTexelBufferArrayDynamicIndexing; + VkBool32 shaderUniformBufferArrayNonUniformIndexing; + VkBool32 shaderSampledImageArrayNonUniformIndexing; + VkBool32 shaderStorageBufferArrayNonUniformIndexing; + VkBool32 shaderStorageImageArrayNonUniformIndexing; + VkBool32 shaderInputAttachmentArrayNonUniformIndexing; + VkBool32 shaderUniformTexelBufferArrayNonUniformIndexing; + VkBool32 shaderStorageTexelBufferArrayNonUniformIndexing; + VkBool32 descriptorBindingUniformBufferUpdateAfterBind; + VkBool32 descriptorBindingSampledImageUpdateAfterBind; + VkBool32 descriptorBindingStorageImageUpdateAfterBind; + VkBool32 descriptorBindingStorageBufferUpdateAfterBind; + VkBool32 descriptorBindingUniformTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingStorageTexelBufferUpdateAfterBind; + VkBool32 descriptorBindingUpdateUnusedWhilePending; + VkBool32 descriptorBindingPartiallyBound; + VkBool32 descriptorBindingVariableDescriptorCount; + VkBool32 runtimeDescriptorArray; +} VkPhysicalDeviceDescriptorIndexingFeatures; + +typedef struct VkPhysicalDeviceDescriptorIndexingProperties { + VkStructureType sType; + void* pNext; + uint32_t maxUpdateAfterBindDescriptorsInAllPools; + VkBool32 shaderUniformBufferArrayNonUniformIndexingNative; + VkBool32 shaderSampledImageArrayNonUniformIndexingNative; + VkBool32 shaderStorageBufferArrayNonUniformIndexingNative; + VkBool32 shaderStorageImageArrayNonUniformIndexingNative; + VkBool32 shaderInputAttachmentArrayNonUniformIndexingNative; + VkBool32 robustBufferAccessUpdateAfterBind; + VkBool32 quadDivergentImplicitLod; + uint32_t maxPerStageDescriptorUpdateAfterBindSamplers; + uint32_t maxPerStageDescriptorUpdateAfterBindUniformBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageBuffers; + uint32_t maxPerStageDescriptorUpdateAfterBindSampledImages; + uint32_t maxPerStageDescriptorUpdateAfterBindStorageImages; + uint32_t maxPerStageDescriptorUpdateAfterBindInputAttachments; + uint32_t maxPerStageUpdateAfterBindResources; + uint32_t maxDescriptorSetUpdateAfterBindSamplers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffers; + uint32_t maxDescriptorSetUpdateAfterBindUniformBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffers; + uint32_t maxDescriptorSetUpdateAfterBindStorageBuffersDynamic; + uint32_t maxDescriptorSetUpdateAfterBindSampledImages; + uint32_t maxDescriptorSetUpdateAfterBindStorageImages; + uint32_t maxDescriptorSetUpdateAfterBindInputAttachments; +} VkPhysicalDeviceDescriptorIndexingProperties; + +typedef struct VkDescriptorSetVariableDescriptorCountAllocateInfo { + VkStructureType sType; + const void* pNext; + uint32_t descriptorSetCount; + const uint32_t* pDescriptorCounts; +} VkDescriptorSetVariableDescriptorCountAllocateInfo; + +typedef struct VkDescriptorSetVariableDescriptorCountLayoutSupport { + VkStructureType sType; + void* pNext; + uint32_t maxVariableDescriptorCount; +} VkDescriptorSetVariableDescriptorCountLayoutSupport; + +typedef struct VkSubpassDescriptionDepthStencilResolve { + VkStructureType sType; + const void* pNext; + VkResolveModeFlagBits depthResolveMode; + VkResolveModeFlagBits stencilResolveMode; + const VkAttachmentReference2* pDepthStencilResolveAttachment; +} VkSubpassDescriptionDepthStencilResolve; + +typedef struct VkPhysicalDeviceDepthStencilResolveProperties { + VkStructureType sType; + void* pNext; + VkResolveModeFlags supportedDepthResolveModes; + VkResolveModeFlags supportedStencilResolveModes; + VkBool32 independentResolveNone; + VkBool32 independentResolve; +} VkPhysicalDeviceDepthStencilResolveProperties; + +typedef struct VkPhysicalDeviceScalarBlockLayoutFeatures { + VkStructureType sType; + void* pNext; + VkBool32 scalarBlockLayout; +} VkPhysicalDeviceScalarBlockLayoutFeatures; + +typedef struct VkImageStencilUsageCreateInfo { + VkStructureType sType; + const void* pNext; + VkImageUsageFlags stencilUsage; +} VkImageStencilUsageCreateInfo; + +typedef struct VkSamplerReductionModeCreateInfo { + VkStructureType sType; + const void* pNext; + VkSamplerReductionMode reductionMode; +} VkSamplerReductionModeCreateInfo; + +typedef struct VkPhysicalDeviceSamplerFilterMinmaxProperties { + VkStructureType sType; + void* pNext; + VkBool32 filterMinmaxSingleComponentFormats; + VkBool32 filterMinmaxImageComponentMapping; +} VkPhysicalDeviceSamplerFilterMinmaxProperties; + +typedef struct VkPhysicalDeviceVulkanMemoryModelFeatures { + VkStructureType sType; + void* pNext; + VkBool32 vulkanMemoryModel; + VkBool32 vulkanMemoryModelDeviceScope; + VkBool32 vulkanMemoryModelAvailabilityVisibilityChains; +} VkPhysicalDeviceVulkanMemoryModelFeatures; + +typedef struct VkPhysicalDeviceImagelessFramebufferFeatures { + VkStructureType sType; + void* pNext; + VkBool32 imagelessFramebuffer; +} VkPhysicalDeviceImagelessFramebufferFeatures; + +typedef struct VkFramebufferAttachmentImageInfo { + VkStructureType sType; + const void* pNext; + VkImageCreateFlags flags; + VkImageUsageFlags usage; + uint32_t width; + uint32_t height; + uint32_t layerCount; + uint32_t viewFormatCount; + const VkFormat* pViewFormats; +} VkFramebufferAttachmentImageInfo; + +typedef struct VkFramebufferAttachmentsCreateInfo { + VkStructureType sType; + const void* pNext; + uint32_t attachmentImageInfoCount; + const VkFramebufferAttachmentImageInfo* pAttachmentImageInfos; +} VkFramebufferAttachmentsCreateInfo; + +typedef struct VkRenderPassAttachmentBeginInfo { + VkStructureType sType; + const void* pNext; + uint32_t attachmentCount; + const VkImageView* pAttachments; +} VkRenderPassAttachmentBeginInfo; + +typedef struct VkPhysicalDeviceUniformBufferStandardLayoutFeatures { + VkStructureType sType; + void* pNext; + VkBool32 uniformBufferStandardLayout; +} VkPhysicalDeviceUniformBufferStandardLayoutFeatures; + +typedef struct VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures { + VkStructureType sType; + void* pNext; + VkBool32 shaderSubgroupExtendedTypes; +} VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures; + +typedef struct VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures { + VkStructureType sType; + void* pNext; + VkBool32 separateDepthStencilLayouts; +} VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures; + +typedef struct VkAttachmentReferenceStencilLayout { + VkStructureType sType; + void* pNext; + VkImageLayout stencilLayout; +} VkAttachmentReferenceStencilLayout; + +typedef struct VkAttachmentDescriptionStencilLayout { + VkStructureType sType; + void* pNext; + VkImageLayout stencilInitialLayout; + VkImageLayout stencilFinalLayout; +} VkAttachmentDescriptionStencilLayout; + +typedef struct VkPhysicalDeviceHostQueryResetFeatures { + VkStructureType sType; + void* pNext; + VkBool32 hostQueryReset; +} VkPhysicalDeviceHostQueryResetFeatures; + +typedef struct VkPhysicalDeviceTimelineSemaphoreFeatures { + VkStructureType sType; + void* pNext; + VkBool32 timelineSemaphore; +} VkPhysicalDeviceTimelineSemaphoreFeatures; + +typedef struct VkPhysicalDeviceTimelineSemaphoreProperties { + VkStructureType sType; + void* pNext; + uint64_t maxTimelineSemaphoreValueDifference; +} VkPhysicalDeviceTimelineSemaphoreProperties; + +typedef struct VkSemaphoreTypeCreateInfo { + VkStructureType sType; + const void* pNext; + VkSemaphoreType semaphoreType; + uint64_t initialValue; +} VkSemaphoreTypeCreateInfo; + +typedef struct VkTimelineSemaphoreSubmitInfo { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreValueCount; + const uint64_t* pWaitSemaphoreValues; + uint32_t signalSemaphoreValueCount; + const uint64_t* pSignalSemaphoreValues; +} VkTimelineSemaphoreSubmitInfo; + +typedef struct VkSemaphoreWaitInfo { + VkStructureType sType; + const void* pNext; + VkSemaphoreWaitFlags flags; + uint32_t semaphoreCount; + const VkSemaphore* pSemaphores; + const uint64_t* pValues; +} VkSemaphoreWaitInfo; + +typedef struct VkSemaphoreSignalInfo { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + uint64_t value; +} VkSemaphoreSignalInfo; + +typedef struct VkPhysicalDeviceBufferDeviceAddressFeatures { + VkStructureType sType; + void* pNext; + VkBool32 bufferDeviceAddress; + VkBool32 bufferDeviceAddressCaptureReplay; + VkBool32 bufferDeviceAddressMultiDevice; +} VkPhysicalDeviceBufferDeviceAddressFeatures; + +typedef struct VkBufferDeviceAddressInfo { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; +} VkBufferDeviceAddressInfo; + +typedef struct VkBufferOpaqueCaptureAddressCreateInfo { + VkStructureType sType; + const void* pNext; + uint64_t opaqueCaptureAddress; +} VkBufferOpaqueCaptureAddressCreateInfo; + +typedef struct VkMemoryOpaqueCaptureAddressAllocateInfo { + VkStructureType sType; + const void* pNext; + uint64_t opaqueCaptureAddress; +} VkMemoryOpaqueCaptureAddressAllocateInfo; + +typedef struct VkDeviceMemoryOpaqueCaptureAddressInfo { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; +} VkDeviceMemoryOpaqueCaptureAddressInfo; + +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCount)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCount)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass2)(VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); +typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass2)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo); +typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass2)(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo* pSubpassBeginInfo, const VkSubpassEndInfo* pSubpassEndInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass2)(VkCommandBuffer commandBuffer, const VkSubpassEndInfo* pSubpassEndInfo); +typedef void (VKAPI_PTR *PFN_vkResetQueryPool)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreCounterValue)(VkDevice device, VkSemaphore semaphore, uint64_t* pValue); +typedef VkResult (VKAPI_PTR *PFN_vkWaitSemaphores)(VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout); +typedef VkResult (VKAPI_PTR *PFN_vkSignalSemaphore)(VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo); +typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetBufferDeviceAddress)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); +typedef uint64_t (VKAPI_PTR *PFN_vkGetBufferOpaqueCaptureAddress)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); +typedef uint64_t (VKAPI_PTR *PFN_vkGetDeviceMemoryOpaqueCaptureAddress)(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCount( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCount( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass2( + VkDevice device, + const VkRenderPassCreateInfo2* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkRenderPass* pRenderPass); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass2( + VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + const VkSubpassBeginInfo* pSubpassBeginInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass2( + VkCommandBuffer commandBuffer, + const VkSubpassBeginInfo* pSubpassBeginInfo, + const VkSubpassEndInfo* pSubpassEndInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass2( + VkCommandBuffer commandBuffer, + const VkSubpassEndInfo* pSubpassEndInfo); + +VKAPI_ATTR void VKAPI_CALL vkResetQueryPool( + VkDevice device, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreCounterValue( + VkDevice device, + VkSemaphore semaphore, + uint64_t* pValue); + +VKAPI_ATTR VkResult VKAPI_CALL vkWaitSemaphores( + VkDevice device, + const VkSemaphoreWaitInfo* pWaitInfo, + uint64_t timeout); + +VKAPI_ATTR VkResult VKAPI_CALL vkSignalSemaphore( + VkDevice device, + const VkSemaphoreSignalInfo* pSignalInfo); + +VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddress( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); + +VKAPI_ATTR uint64_t VKAPI_CALL vkGetBufferOpaqueCaptureAddress( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); + +VKAPI_ATTR uint64_t VKAPI_CALL vkGetDeviceMemoryOpaqueCaptureAddress( + VkDevice device, + const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); +#endif + + +#define VK_KHR_surface 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSurfaceKHR) +#define VK_KHR_SURFACE_SPEC_VERSION 25 +#define VK_KHR_SURFACE_EXTENSION_NAME "VK_KHR_surface" + +typedef enum VkColorSpaceKHR { + VK_COLOR_SPACE_SRGB_NONLINEAR_KHR = 0, + VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT = 1000104001, + VK_COLOR_SPACE_EXTENDED_SRGB_LINEAR_EXT = 1000104002, + VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT = 1000104003, + VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT = 1000104004, + VK_COLOR_SPACE_BT709_LINEAR_EXT = 1000104005, + VK_COLOR_SPACE_BT709_NONLINEAR_EXT = 1000104006, + VK_COLOR_SPACE_BT2020_LINEAR_EXT = 1000104007, + VK_COLOR_SPACE_HDR10_ST2084_EXT = 1000104008, + VK_COLOR_SPACE_DOLBYVISION_EXT = 1000104009, + VK_COLOR_SPACE_HDR10_HLG_EXT = 1000104010, + VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT = 1000104011, + VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT = 1000104012, + VK_COLOR_SPACE_PASS_THROUGH_EXT = 1000104013, + VK_COLOR_SPACE_EXTENDED_SRGB_NONLINEAR_EXT = 1000104014, + VK_COLOR_SPACE_DISPLAY_NATIVE_AMD = 1000213000, + VK_COLORSPACE_SRGB_NONLINEAR_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + VK_COLOR_SPACE_DCI_P3_LINEAR_EXT = VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT, + VK_COLOR_SPACE_BEGIN_RANGE_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + VK_COLOR_SPACE_END_RANGE_KHR = VK_COLOR_SPACE_SRGB_NONLINEAR_KHR, + VK_COLOR_SPACE_RANGE_SIZE_KHR = (VK_COLOR_SPACE_SRGB_NONLINEAR_KHR - VK_COLOR_SPACE_SRGB_NONLINEAR_KHR + 1), + VK_COLOR_SPACE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkColorSpaceKHR; + +typedef enum VkPresentModeKHR { + VK_PRESENT_MODE_IMMEDIATE_KHR = 0, + VK_PRESENT_MODE_MAILBOX_KHR = 1, + VK_PRESENT_MODE_FIFO_KHR = 2, + VK_PRESENT_MODE_FIFO_RELAXED_KHR = 3, + VK_PRESENT_MODE_SHARED_DEMAND_REFRESH_KHR = 1000111000, + VK_PRESENT_MODE_SHARED_CONTINUOUS_REFRESH_KHR = 1000111001, + VK_PRESENT_MODE_BEGIN_RANGE_KHR = VK_PRESENT_MODE_IMMEDIATE_KHR, + VK_PRESENT_MODE_END_RANGE_KHR = VK_PRESENT_MODE_FIFO_RELAXED_KHR, + VK_PRESENT_MODE_RANGE_SIZE_KHR = (VK_PRESENT_MODE_FIFO_RELAXED_KHR - VK_PRESENT_MODE_IMMEDIATE_KHR + 1), + VK_PRESENT_MODE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPresentModeKHR; + +typedef enum VkSurfaceTransformFlagBitsKHR { + VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR = 0x00000001, + VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR = 0x00000002, + VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR = 0x00000004, + VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR = 0x00000008, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR = 0x00000010, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR = 0x00000020, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR = 0x00000040, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR = 0x00000080, + VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR = 0x00000100, + VK_SURFACE_TRANSFORM_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkSurfaceTransformFlagBitsKHR; +typedef VkFlags VkSurfaceTransformFlagsKHR; + +typedef enum VkCompositeAlphaFlagBitsKHR { + VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR = 0x00000001, + VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR = 0x00000002, + VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR = 0x00000004, + VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR = 0x00000008, + VK_COMPOSITE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkCompositeAlphaFlagBitsKHR; +typedef VkFlags VkCompositeAlphaFlagsKHR; +typedef struct VkSurfaceCapabilitiesKHR { + uint32_t minImageCount; + uint32_t maxImageCount; + VkExtent2D currentExtent; + VkExtent2D minImageExtent; + VkExtent2D maxImageExtent; + uint32_t maxImageArrayLayers; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkSurfaceTransformFlagBitsKHR currentTransform; + VkCompositeAlphaFlagsKHR supportedCompositeAlpha; + VkImageUsageFlags supportedUsageFlags; +} VkSurfaceCapabilitiesKHR; + +typedef struct VkSurfaceFormatKHR { + VkFormat format; + VkColorSpaceKHR colorSpace; +} VkSurfaceFormatKHR; + +typedef void (VKAPI_PTR *PFN_vkDestroySurfaceKHR)(VkInstance instance, VkSurfaceKHR surface, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, VkSurfaceKHR surface, VkBool32* pSupported); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR* pSurfaceCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pSurfaceFormatCount, VkSurfaceFormatKHR* pSurfaceFormats); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkDestroySurfaceKHR( + VkInstance instance, + VkSurfaceKHR surface, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + VkSurfaceKHR surface, + VkBool32* pSupported); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilitiesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + VkSurfaceCapabilitiesKHR* pSurfaceCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormatsKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pSurfaceFormatCount, + VkSurfaceFormatKHR* pSurfaceFormats); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pPresentModeCount, + VkPresentModeKHR* pPresentModes); +#endif + + +#define VK_KHR_swapchain 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkSwapchainKHR) +#define VK_KHR_SWAPCHAIN_SPEC_VERSION 70 +#define VK_KHR_SWAPCHAIN_EXTENSION_NAME "VK_KHR_swapchain" + +typedef enum VkSwapchainCreateFlagBitsKHR { + VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = 0x00000001, + VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR = 0x00000002, + VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR = 0x00000004, + VK_SWAPCHAIN_CREATE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkSwapchainCreateFlagBitsKHR; +typedef VkFlags VkSwapchainCreateFlagsKHR; + +typedef enum VkDeviceGroupPresentModeFlagBitsKHR { + VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_BIT_KHR = 0x00000001, + VK_DEVICE_GROUP_PRESENT_MODE_REMOTE_BIT_KHR = 0x00000002, + VK_DEVICE_GROUP_PRESENT_MODE_SUM_BIT_KHR = 0x00000004, + VK_DEVICE_GROUP_PRESENT_MODE_LOCAL_MULTI_DEVICE_BIT_KHR = 0x00000008, + VK_DEVICE_GROUP_PRESENT_MODE_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkDeviceGroupPresentModeFlagBitsKHR; +typedef VkFlags VkDeviceGroupPresentModeFlagsKHR; +typedef struct VkSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainCreateFlagsKHR flags; + VkSurfaceKHR surface; + uint32_t minImageCount; + VkFormat imageFormat; + VkColorSpaceKHR imageColorSpace; + VkExtent2D imageExtent; + uint32_t imageArrayLayers; + VkImageUsageFlags imageUsage; + VkSharingMode imageSharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; + VkSurfaceTransformFlagBitsKHR preTransform; + VkCompositeAlphaFlagBitsKHR compositeAlpha; + VkPresentModeKHR presentMode; + VkBool32 clipped; + VkSwapchainKHR oldSwapchain; +} VkSwapchainCreateInfoKHR; + +typedef struct VkPresentInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreCount; + const VkSemaphore* pWaitSemaphores; + uint32_t swapchainCount; + const VkSwapchainKHR* pSwapchains; + const uint32_t* pImageIndices; + VkResult* pResults; +} VkPresentInfoKHR; + +typedef struct VkImageSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; +} VkImageSwapchainCreateInfoKHR; + +typedef struct VkBindImageMemorySwapchainInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; + uint32_t imageIndex; +} VkBindImageMemorySwapchainInfoKHR; + +typedef struct VkAcquireNextImageInfoKHR { + VkStructureType sType; + const void* pNext; + VkSwapchainKHR swapchain; + uint64_t timeout; + VkSemaphore semaphore; + VkFence fence; + uint32_t deviceMask; +} VkAcquireNextImageInfoKHR; + +typedef struct VkDeviceGroupPresentCapabilitiesKHR { + VkStructureType sType; + const void* pNext; + uint32_t presentMask[VK_MAX_DEVICE_GROUP_SIZE]; + VkDeviceGroupPresentModeFlagsKHR modes; +} VkDeviceGroupPresentCapabilitiesKHR; + +typedef struct VkDeviceGroupPresentInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const uint32_t* pDeviceMasks; + VkDeviceGroupPresentModeFlagBitsKHR mode; +} VkDeviceGroupPresentInfoKHR; + +typedef struct VkDeviceGroupSwapchainCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceGroupPresentModeFlagsKHR modes; +} VkDeviceGroupSwapchainCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSwapchainKHR)(VkDevice device, const VkSwapchainCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchain); +typedef void (VKAPI_PTR *PFN_vkDestroySwapchainKHR)(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainImagesKHR)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pSwapchainImageCount, VkImage* pSwapchainImages); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImageKHR)(VkDevice device, VkSwapchainKHR swapchain, uint64_t timeout, VkSemaphore semaphore, VkFence fence, uint32_t* pImageIndex); +typedef VkResult (VKAPI_PTR *PFN_vkQueuePresentKHR)(VkQueue queue, const VkPresentInfoKHR* pPresentInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupPresentCapabilitiesKHR)(VkDevice device, VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupSurfacePresentModesKHR)(VkDevice device, VkSurfaceKHR surface, VkDeviceGroupPresentModeFlagsKHR* pModes); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDevicePresentRectanglesKHR)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, uint32_t* pRectCount, VkRect2D* pRects); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireNextImage2KHR)(VkDevice device, const VkAcquireNextImageInfoKHR* pAcquireInfo, uint32_t* pImageIndex); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSwapchainKHR( + VkDevice device, + const VkSwapchainCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSwapchainKHR* pSwapchain); + +VKAPI_ATTR void VKAPI_CALL vkDestroySwapchainKHR( + VkDevice device, + VkSwapchainKHR swapchain, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainImagesKHR( + VkDevice device, + VkSwapchainKHR swapchain, + uint32_t* pSwapchainImageCount, + VkImage* pSwapchainImages); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImageKHR( + VkDevice device, + VkSwapchainKHR swapchain, + uint64_t timeout, + VkSemaphore semaphore, + VkFence fence, + uint32_t* pImageIndex); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueuePresentKHR( + VkQueue queue, + const VkPresentInfoKHR* pPresentInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupPresentCapabilitiesKHR( + VkDevice device, + VkDeviceGroupPresentCapabilitiesKHR* pDeviceGroupPresentCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModesKHR( + VkDevice device, + VkSurfaceKHR surface, + VkDeviceGroupPresentModeFlagsKHR* pModes); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDevicePresentRectanglesKHR( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + uint32_t* pRectCount, + VkRect2D* pRects); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireNextImage2KHR( + VkDevice device, + const VkAcquireNextImageInfoKHR* pAcquireInfo, + uint32_t* pImageIndex); +#endif + + +#define VK_KHR_display 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayKHR) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDisplayModeKHR) +#define VK_KHR_DISPLAY_SPEC_VERSION 23 +#define VK_KHR_DISPLAY_EXTENSION_NAME "VK_KHR_display" + +typedef enum VkDisplayPlaneAlphaFlagBitsKHR { + VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR = 0x00000001, + VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR = 0x00000002, + VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR = 0x00000004, + VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR = 0x00000008, + VK_DISPLAY_PLANE_ALPHA_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkDisplayPlaneAlphaFlagBitsKHR; +typedef VkFlags VkDisplayPlaneAlphaFlagsKHR; +typedef VkFlags VkDisplayModeCreateFlagsKHR; +typedef VkFlags VkDisplaySurfaceCreateFlagsKHR; +typedef struct VkDisplayPropertiesKHR { + VkDisplayKHR display; + const char* displayName; + VkExtent2D physicalDimensions; + VkExtent2D physicalResolution; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkBool32 planeReorderPossible; + VkBool32 persistentContent; +} VkDisplayPropertiesKHR; + +typedef struct VkDisplayModeParametersKHR { + VkExtent2D visibleRegion; + uint32_t refreshRate; +} VkDisplayModeParametersKHR; + +typedef struct VkDisplayModePropertiesKHR { + VkDisplayModeKHR displayMode; + VkDisplayModeParametersKHR parameters; +} VkDisplayModePropertiesKHR; + +typedef struct VkDisplayModeCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDisplayModeCreateFlagsKHR flags; + VkDisplayModeParametersKHR parameters; +} VkDisplayModeCreateInfoKHR; + +typedef struct VkDisplayPlaneCapabilitiesKHR { + VkDisplayPlaneAlphaFlagsKHR supportedAlpha; + VkOffset2D minSrcPosition; + VkOffset2D maxSrcPosition; + VkExtent2D minSrcExtent; + VkExtent2D maxSrcExtent; + VkOffset2D minDstPosition; + VkOffset2D maxDstPosition; + VkExtent2D minDstExtent; + VkExtent2D maxDstExtent; +} VkDisplayPlaneCapabilitiesKHR; + +typedef struct VkDisplayPlanePropertiesKHR { + VkDisplayKHR currentDisplay; + uint32_t currentStackIndex; +} VkDisplayPlanePropertiesKHR; + +typedef struct VkDisplaySurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkDisplaySurfaceCreateFlagsKHR flags; + VkDisplayModeKHR displayMode; + uint32_t planeIndex; + uint32_t planeStackIndex; + VkSurfaceTransformFlagBitsKHR transform; + float globalAlpha; + VkDisplayPlaneAlphaFlagBitsKHR alphaMode; + VkExtent2D imageExtent; +} VkDisplaySurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlanePropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneSupportedDisplaysKHR)(VkPhysicalDevice physicalDevice, uint32_t planeIndex, uint32_t* pDisplayCount, VkDisplayKHR* pDisplays); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModePropertiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModePropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayModeKHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, const VkDisplayModeCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDisplayModeKHR* pMode); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilitiesKHR)(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR* pCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDisplayPlaneSurfaceKHR)(VkInstance instance, const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPropertiesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlanePropertiesKHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPlanePropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneSupportedDisplaysKHR( + VkPhysicalDevice physicalDevice, + uint32_t planeIndex, + uint32_t* pDisplayCount, + VkDisplayKHR* pDisplays); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModePropertiesKHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + uint32_t* pPropertyCount, + VkDisplayModePropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayModeKHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + const VkDisplayModeCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDisplayModeKHR* pMode); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilitiesKHR( + VkPhysicalDevice physicalDevice, + VkDisplayModeKHR mode, + uint32_t planeIndex, + VkDisplayPlaneCapabilitiesKHR* pCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDisplayPlaneSurfaceKHR( + VkInstance instance, + const VkDisplaySurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + + +#define VK_KHR_display_swapchain 1 +#define VK_KHR_DISPLAY_SWAPCHAIN_SPEC_VERSION 10 +#define VK_KHR_DISPLAY_SWAPCHAIN_EXTENSION_NAME "VK_KHR_display_swapchain" +typedef struct VkDisplayPresentInfoKHR { + VkStructureType sType; + const void* pNext; + VkRect2D srcRect; + VkRect2D dstRect; + VkBool32 persistent; +} VkDisplayPresentInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSharedSwapchainsKHR)(VkDevice device, uint32_t swapchainCount, const VkSwapchainCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkSwapchainKHR* pSwapchains); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSharedSwapchainsKHR( + VkDevice device, + uint32_t swapchainCount, + const VkSwapchainCreateInfoKHR* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkSwapchainKHR* pSwapchains); +#endif + + +#define VK_KHR_sampler_mirror_clamp_to_edge 1 +#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_SPEC_VERSION 3 +#define VK_KHR_SAMPLER_MIRROR_CLAMP_TO_EDGE_EXTENSION_NAME "VK_KHR_sampler_mirror_clamp_to_edge" + + +#define VK_KHR_multiview 1 +#define VK_KHR_MULTIVIEW_SPEC_VERSION 1 +#define VK_KHR_MULTIVIEW_EXTENSION_NAME "VK_KHR_multiview" +typedef VkRenderPassMultiviewCreateInfo VkRenderPassMultiviewCreateInfoKHR; + +typedef VkPhysicalDeviceMultiviewFeatures VkPhysicalDeviceMultiviewFeaturesKHR; + +typedef VkPhysicalDeviceMultiviewProperties VkPhysicalDeviceMultiviewPropertiesKHR; + + + +#define VK_KHR_get_physical_device_properties2 1 +#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_SPEC_VERSION 2 +#define VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME "VK_KHR_get_physical_device_properties2" +typedef VkPhysicalDeviceFeatures2 VkPhysicalDeviceFeatures2KHR; + +typedef VkPhysicalDeviceProperties2 VkPhysicalDeviceProperties2KHR; + +typedef VkFormatProperties2 VkFormatProperties2KHR; + +typedef VkImageFormatProperties2 VkImageFormatProperties2KHR; + +typedef VkPhysicalDeviceImageFormatInfo2 VkPhysicalDeviceImageFormatInfo2KHR; + +typedef VkQueueFamilyProperties2 VkQueueFamilyProperties2KHR; + +typedef VkPhysicalDeviceMemoryProperties2 VkPhysicalDeviceMemoryProperties2KHR; + +typedef VkSparseImageFormatProperties2 VkSparseImageFormatProperties2KHR; + +typedef VkPhysicalDeviceSparseImageFormatInfo2 VkPhysicalDeviceSparseImageFormatInfo2KHR; + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFeatures2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceFeatures2* pFeatures); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceProperties2* pProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceFormatProperties2KHR)(VkPhysicalDevice physicalDevice, VkFormat format, VkFormatProperties2* pFormatProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, VkImageFormatProperties2* pImageFormatProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pQueueFamilyPropertyCount, VkQueueFamilyProperties2* pQueueFamilyProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMemoryProperties2KHR)(VkPhysicalDevice physicalDevice, VkPhysicalDeviceMemoryProperties2* pMemoryProperties); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, uint32_t* pPropertyCount, VkSparseImageFormatProperties2* pProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFeatures2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceFeatures2* pFeatures); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceProperties2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceProperties2* pProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkFormatProperties2* pFormatProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceImageFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo, + VkImageFormatProperties2* pImageFormatProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyProperties2KHR( + VkPhysicalDevice physicalDevice, + uint32_t* pQueueFamilyPropertyCount, + VkQueueFamilyProperties2* pQueueFamilyProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMemoryProperties2KHR( + VkPhysicalDevice physicalDevice, + VkPhysicalDeviceMemoryProperties2* pMemoryProperties); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceSparseImageFormatProperties2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSparseImageFormatInfo2* pFormatInfo, + uint32_t* pPropertyCount, + VkSparseImageFormatProperties2* pProperties); +#endif + + +#define VK_KHR_device_group 1 +#define VK_KHR_DEVICE_GROUP_SPEC_VERSION 4 +#define VK_KHR_DEVICE_GROUP_EXTENSION_NAME "VK_KHR_device_group" +typedef VkPeerMemoryFeatureFlags VkPeerMemoryFeatureFlagsKHR; + +typedef VkPeerMemoryFeatureFlagBits VkPeerMemoryFeatureFlagBitsKHR; + +typedef VkMemoryAllocateFlags VkMemoryAllocateFlagsKHR; + +typedef VkMemoryAllocateFlagBits VkMemoryAllocateFlagBitsKHR; + +typedef VkMemoryAllocateFlagsInfo VkMemoryAllocateFlagsInfoKHR; + +typedef VkDeviceGroupRenderPassBeginInfo VkDeviceGroupRenderPassBeginInfoKHR; + +typedef VkDeviceGroupCommandBufferBeginInfo VkDeviceGroupCommandBufferBeginInfoKHR; + +typedef VkDeviceGroupSubmitInfo VkDeviceGroupSubmitInfoKHR; + +typedef VkDeviceGroupBindSparseInfo VkDeviceGroupBindSparseInfoKHR; + +typedef VkBindBufferMemoryDeviceGroupInfo VkBindBufferMemoryDeviceGroupInfoKHR; + +typedef VkBindImageMemoryDeviceGroupInfo VkBindImageMemoryDeviceGroupInfoKHR; + +typedef void (VKAPI_PTR *PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR)(VkDevice device, uint32_t heapIndex, uint32_t localDeviceIndex, uint32_t remoteDeviceIndex, VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); +typedef void (VKAPI_PTR *PFN_vkCmdSetDeviceMaskKHR)(VkCommandBuffer commandBuffer, uint32_t deviceMask); +typedef void (VKAPI_PTR *PFN_vkCmdDispatchBaseKHR)(VkCommandBuffer commandBuffer, uint32_t baseGroupX, uint32_t baseGroupY, uint32_t baseGroupZ, uint32_t groupCountX, uint32_t groupCountY, uint32_t groupCountZ); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDeviceGroupPeerMemoryFeaturesKHR( + VkDevice device, + uint32_t heapIndex, + uint32_t localDeviceIndex, + uint32_t remoteDeviceIndex, + VkPeerMemoryFeatureFlags* pPeerMemoryFeatures); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetDeviceMaskKHR( + VkCommandBuffer commandBuffer, + uint32_t deviceMask); + +VKAPI_ATTR void VKAPI_CALL vkCmdDispatchBaseKHR( + VkCommandBuffer commandBuffer, + uint32_t baseGroupX, + uint32_t baseGroupY, + uint32_t baseGroupZ, + uint32_t groupCountX, + uint32_t groupCountY, + uint32_t groupCountZ); +#endif + + +#define VK_KHR_shader_draw_parameters 1 +#define VK_KHR_SHADER_DRAW_PARAMETERS_SPEC_VERSION 1 +#define VK_KHR_SHADER_DRAW_PARAMETERS_EXTENSION_NAME "VK_KHR_shader_draw_parameters" + + +#define VK_KHR_maintenance1 1 +#define VK_KHR_MAINTENANCE1_SPEC_VERSION 2 +#define VK_KHR_MAINTENANCE1_EXTENSION_NAME "VK_KHR_maintenance1" +typedef VkCommandPoolTrimFlags VkCommandPoolTrimFlagsKHR; + +typedef void (VKAPI_PTR *PFN_vkTrimCommandPoolKHR)(VkDevice device, VkCommandPool commandPool, VkCommandPoolTrimFlags flags); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkTrimCommandPoolKHR( + VkDevice device, + VkCommandPool commandPool, + VkCommandPoolTrimFlags flags); +#endif + + +#define VK_KHR_device_group_creation 1 +#define VK_KHR_DEVICE_GROUP_CREATION_SPEC_VERSION 1 +#define VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME "VK_KHR_device_group_creation" +#define VK_MAX_DEVICE_GROUP_SIZE_KHR VK_MAX_DEVICE_GROUP_SIZE +typedef VkPhysicalDeviceGroupProperties VkPhysicalDeviceGroupPropertiesKHR; + +typedef VkDeviceGroupDeviceCreateInfo VkDeviceGroupDeviceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceGroupsKHR)(VkInstance instance, uint32_t* pPhysicalDeviceGroupCount, VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceGroupsKHR( + VkInstance instance, + uint32_t* pPhysicalDeviceGroupCount, + VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties); +#endif + + +#define VK_KHR_external_memory_capabilities 1 +#define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_memory_capabilities" +#define VK_LUID_SIZE_KHR VK_LUID_SIZE +typedef VkExternalMemoryHandleTypeFlags VkExternalMemoryHandleTypeFlagsKHR; + +typedef VkExternalMemoryHandleTypeFlagBits VkExternalMemoryHandleTypeFlagBitsKHR; + +typedef VkExternalMemoryFeatureFlags VkExternalMemoryFeatureFlagsKHR; + +typedef VkExternalMemoryFeatureFlagBits VkExternalMemoryFeatureFlagBitsKHR; + +typedef VkExternalMemoryProperties VkExternalMemoryPropertiesKHR; + +typedef VkPhysicalDeviceExternalImageFormatInfo VkPhysicalDeviceExternalImageFormatInfoKHR; + +typedef VkExternalImageFormatProperties VkExternalImageFormatPropertiesKHR; + +typedef VkPhysicalDeviceExternalBufferInfo VkPhysicalDeviceExternalBufferInfoKHR; + +typedef VkExternalBufferProperties VkExternalBufferPropertiesKHR; + +typedef VkPhysicalDeviceIDProperties VkPhysicalDeviceIDPropertiesKHR; + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, VkExternalBufferProperties* pExternalBufferProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalBufferPropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalBufferInfo* pExternalBufferInfo, + VkExternalBufferProperties* pExternalBufferProperties); +#endif + + +#define VK_KHR_external_memory 1 +#define VK_KHR_EXTERNAL_MEMORY_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME "VK_KHR_external_memory" +#define VK_QUEUE_FAMILY_EXTERNAL_KHR VK_QUEUE_FAMILY_EXTERNAL +typedef VkExternalMemoryImageCreateInfo VkExternalMemoryImageCreateInfoKHR; + +typedef VkExternalMemoryBufferCreateInfo VkExternalMemoryBufferCreateInfoKHR; + +typedef VkExportMemoryAllocateInfo VkExportMemoryAllocateInfoKHR; + + + +#define VK_KHR_external_memory_fd 1 +#define VK_KHR_EXTERNAL_MEMORY_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME "VK_KHR_external_memory_fd" +typedef struct VkImportMemoryFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; + int fd; +} VkImportMemoryFdInfoKHR; + +typedef struct VkMemoryFdPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryFdPropertiesKHR; + +typedef struct VkMemoryGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkMemoryGetFdInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdKHR)(VkDevice device, const VkMemoryGetFdInfoKHR* pGetFdInfo, int* pFd); +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryFdPropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, int fd, VkMemoryFdPropertiesKHR* pMemoryFdProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdKHR( + VkDevice device, + const VkMemoryGetFdInfoKHR* pGetFdInfo, + int* pFd); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryFdPropertiesKHR( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + int fd, + VkMemoryFdPropertiesKHR* pMemoryFdProperties); +#endif + + +#define VK_KHR_external_semaphore_capabilities 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_semaphore_capabilities" +typedef VkExternalSemaphoreHandleTypeFlags VkExternalSemaphoreHandleTypeFlagsKHR; + +typedef VkExternalSemaphoreHandleTypeFlagBits VkExternalSemaphoreHandleTypeFlagBitsKHR; + +typedef VkExternalSemaphoreFeatureFlags VkExternalSemaphoreFeatureFlagsKHR; + +typedef VkExternalSemaphoreFeatureFlagBits VkExternalSemaphoreFeatureFlagBitsKHR; + +typedef VkPhysicalDeviceExternalSemaphoreInfo VkPhysicalDeviceExternalSemaphoreInfoKHR; + +typedef VkExternalSemaphoreProperties VkExternalSemaphorePropertiesKHR; + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, VkExternalSemaphoreProperties* pExternalSemaphoreProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalSemaphorePropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalSemaphoreInfo* pExternalSemaphoreInfo, + VkExternalSemaphoreProperties* pExternalSemaphoreProperties); +#endif + + +#define VK_KHR_external_semaphore 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_EXTENSION_NAME "VK_KHR_external_semaphore" +typedef VkSemaphoreImportFlags VkSemaphoreImportFlagsKHR; + +typedef VkSemaphoreImportFlagBits VkSemaphoreImportFlagBitsKHR; + +typedef VkExportSemaphoreCreateInfo VkExportSemaphoreCreateInfoKHR; + + + +#define VK_KHR_external_semaphore_fd 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME "VK_KHR_external_semaphore_fd" +typedef struct VkImportSemaphoreFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkSemaphoreImportFlags flags; + VkExternalSemaphoreHandleTypeFlagBits handleType; + int fd; +} VkImportSemaphoreFdInfoKHR; + +typedef struct VkSemaphoreGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkExternalSemaphoreHandleTypeFlagBits handleType; +} VkSemaphoreGetFdInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreFdKHR)(VkDevice device, const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreFdKHR)(VkDevice device, const VkSemaphoreGetFdInfoKHR* pGetFdInfo, int* pFd); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreFdKHR( + VkDevice device, + const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreFdKHR( + VkDevice device, + const VkSemaphoreGetFdInfoKHR* pGetFdInfo, + int* pFd); +#endif + + +#define VK_KHR_push_descriptor 1 +#define VK_KHR_PUSH_DESCRIPTOR_SPEC_VERSION 2 +#define VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME "VK_KHR_push_descriptor" +typedef struct VkPhysicalDevicePushDescriptorPropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t maxPushDescriptors; +} VkPhysicalDevicePushDescriptorPropertiesKHR; + +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetKHR)(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint, VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites); +typedef void (VKAPI_PTR *PFN_vkCmdPushDescriptorSetWithTemplateKHR)(VkCommandBuffer commandBuffer, VkDescriptorUpdateTemplate descriptorUpdateTemplate, VkPipelineLayout layout, uint32_t set, const void* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetKHR( + VkCommandBuffer commandBuffer, + VkPipelineBindPoint pipelineBindPoint, + VkPipelineLayout layout, + uint32_t set, + uint32_t descriptorWriteCount, + const VkWriteDescriptorSet* pDescriptorWrites); + +VKAPI_ATTR void VKAPI_CALL vkCmdPushDescriptorSetWithTemplateKHR( + VkCommandBuffer commandBuffer, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + VkPipelineLayout layout, + uint32_t set, + const void* pData); +#endif + + +#define VK_KHR_shader_float16_int8 1 +#define VK_KHR_SHADER_FLOAT16_INT8_SPEC_VERSION 1 +#define VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME "VK_KHR_shader_float16_int8" +typedef VkPhysicalDeviceShaderFloat16Int8Features VkPhysicalDeviceShaderFloat16Int8FeaturesKHR; + +typedef VkPhysicalDeviceShaderFloat16Int8Features VkPhysicalDeviceFloat16Int8FeaturesKHR; + + + +#define VK_KHR_16bit_storage 1 +#define VK_KHR_16BIT_STORAGE_SPEC_VERSION 1 +#define VK_KHR_16BIT_STORAGE_EXTENSION_NAME "VK_KHR_16bit_storage" +typedef VkPhysicalDevice16BitStorageFeatures VkPhysicalDevice16BitStorageFeaturesKHR; + + + +#define VK_KHR_incremental_present 1 +#define VK_KHR_INCREMENTAL_PRESENT_SPEC_VERSION 1 +#define VK_KHR_INCREMENTAL_PRESENT_EXTENSION_NAME "VK_KHR_incremental_present" +typedef struct VkRectLayerKHR { + VkOffset2D offset; + VkExtent2D extent; + uint32_t layer; +} VkRectLayerKHR; + +typedef struct VkPresentRegionKHR { + uint32_t rectangleCount; + const VkRectLayerKHR* pRectangles; +} VkPresentRegionKHR; + +typedef struct VkPresentRegionsKHR { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const VkPresentRegionKHR* pRegions; +} VkPresentRegionsKHR; + + + +#define VK_KHR_descriptor_update_template 1 +typedef VkDescriptorUpdateTemplate VkDescriptorUpdateTemplateKHR; + +#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_SPEC_VERSION 1 +#define VK_KHR_DESCRIPTOR_UPDATE_TEMPLATE_EXTENSION_NAME "VK_KHR_descriptor_update_template" +typedef VkDescriptorUpdateTemplateType VkDescriptorUpdateTemplateTypeKHR; + +typedef VkDescriptorUpdateTemplateCreateFlags VkDescriptorUpdateTemplateCreateFlagsKHR; + +typedef VkDescriptorUpdateTemplateEntry VkDescriptorUpdateTemplateEntryKHR; + +typedef VkDescriptorUpdateTemplateCreateInfo VkDescriptorUpdateTemplateCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateDescriptorUpdateTemplateKHR)(VkDevice device, const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); +typedef void (VKAPI_PTR *PFN_vkDestroyDescriptorUpdateTemplateKHR)(VkDevice device, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkUpdateDescriptorSetWithTemplateKHR)(VkDevice device, VkDescriptorSet descriptorSet, VkDescriptorUpdateTemplate descriptorUpdateTemplate, const void* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDescriptorUpdateTemplateKHR( + VkDevice device, + const VkDescriptorUpdateTemplateCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDescriptorUpdateTemplate* pDescriptorUpdateTemplate); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDescriptorUpdateTemplateKHR( + VkDevice device, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkUpdateDescriptorSetWithTemplateKHR( + VkDevice device, + VkDescriptorSet descriptorSet, + VkDescriptorUpdateTemplate descriptorUpdateTemplate, + const void* pData); +#endif + + +#define VK_KHR_imageless_framebuffer 1 +#define VK_KHR_IMAGELESS_FRAMEBUFFER_SPEC_VERSION 1 +#define VK_KHR_IMAGELESS_FRAMEBUFFER_EXTENSION_NAME "VK_KHR_imageless_framebuffer" +typedef VkPhysicalDeviceImagelessFramebufferFeatures VkPhysicalDeviceImagelessFramebufferFeaturesKHR; + +typedef VkFramebufferAttachmentsCreateInfo VkFramebufferAttachmentsCreateInfoKHR; + +typedef VkFramebufferAttachmentImageInfo VkFramebufferAttachmentImageInfoKHR; + +typedef VkRenderPassAttachmentBeginInfo VkRenderPassAttachmentBeginInfoKHR; + + + +#define VK_KHR_create_renderpass2 1 +#define VK_KHR_CREATE_RENDERPASS_2_SPEC_VERSION 1 +#define VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME "VK_KHR_create_renderpass2" +typedef VkRenderPassCreateInfo2 VkRenderPassCreateInfo2KHR; + +typedef VkAttachmentDescription2 VkAttachmentDescription2KHR; + +typedef VkAttachmentReference2 VkAttachmentReference2KHR; + +typedef VkSubpassDescription2 VkSubpassDescription2KHR; + +typedef VkSubpassDependency2 VkSubpassDependency2KHR; + +typedef VkSubpassBeginInfo VkSubpassBeginInfoKHR; + +typedef VkSubpassEndInfo VkSubpassEndInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateRenderPass2KHR)(VkDevice device, const VkRenderPassCreateInfo2* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkRenderPass* pRenderPass); +typedef void (VKAPI_PTR *PFN_vkCmdBeginRenderPass2KHR)(VkCommandBuffer commandBuffer, const VkRenderPassBeginInfo* pRenderPassBegin, const VkSubpassBeginInfo* pSubpassBeginInfo); +typedef void (VKAPI_PTR *PFN_vkCmdNextSubpass2KHR)(VkCommandBuffer commandBuffer, const VkSubpassBeginInfo* pSubpassBeginInfo, const VkSubpassEndInfo* pSubpassEndInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndRenderPass2KHR)(VkCommandBuffer commandBuffer, const VkSubpassEndInfo* pSubpassEndInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRenderPass2KHR( + VkDevice device, + const VkRenderPassCreateInfo2* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkRenderPass* pRenderPass); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginRenderPass2KHR( + VkCommandBuffer commandBuffer, + const VkRenderPassBeginInfo* pRenderPassBegin, + const VkSubpassBeginInfo* pSubpassBeginInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdNextSubpass2KHR( + VkCommandBuffer commandBuffer, + const VkSubpassBeginInfo* pSubpassBeginInfo, + const VkSubpassEndInfo* pSubpassEndInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndRenderPass2KHR( + VkCommandBuffer commandBuffer, + const VkSubpassEndInfo* pSubpassEndInfo); +#endif + + +#define VK_KHR_shared_presentable_image 1 +#define VK_KHR_SHARED_PRESENTABLE_IMAGE_SPEC_VERSION 1 +#define VK_KHR_SHARED_PRESENTABLE_IMAGE_EXTENSION_NAME "VK_KHR_shared_presentable_image" +typedef struct VkSharedPresentSurfaceCapabilitiesKHR { + VkStructureType sType; + void* pNext; + VkImageUsageFlags sharedPresentSupportedUsageFlags; +} VkSharedPresentSurfaceCapabilitiesKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainStatusKHR)(VkDevice device, VkSwapchainKHR swapchain); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainStatusKHR( + VkDevice device, + VkSwapchainKHR swapchain); +#endif + + +#define VK_KHR_external_fence_capabilities 1 +#define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_CAPABILITIES_EXTENSION_NAME "VK_KHR_external_fence_capabilities" +typedef VkExternalFenceHandleTypeFlags VkExternalFenceHandleTypeFlagsKHR; + +typedef VkExternalFenceHandleTypeFlagBits VkExternalFenceHandleTypeFlagBitsKHR; + +typedef VkExternalFenceFeatureFlags VkExternalFenceFeatureFlagsKHR; + +typedef VkExternalFenceFeatureFlagBits VkExternalFenceFeatureFlagBitsKHR; + +typedef VkPhysicalDeviceExternalFenceInfo VkPhysicalDeviceExternalFenceInfoKHR; + +typedef VkExternalFenceProperties VkExternalFencePropertiesKHR; + +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, VkExternalFenceProperties* pExternalFenceProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceExternalFencePropertiesKHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceExternalFenceInfo* pExternalFenceInfo, + VkExternalFenceProperties* pExternalFenceProperties); +#endif + + +#define VK_KHR_external_fence 1 +#define VK_KHR_EXTERNAL_FENCE_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_EXTENSION_NAME "VK_KHR_external_fence" +typedef VkFenceImportFlags VkFenceImportFlagsKHR; + +typedef VkFenceImportFlagBits VkFenceImportFlagBitsKHR; + +typedef VkExportFenceCreateInfo VkExportFenceCreateInfoKHR; + + + +#define VK_KHR_external_fence_fd 1 +#define VK_KHR_EXTERNAL_FENCE_FD_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_FD_EXTENSION_NAME "VK_KHR_external_fence_fd" +typedef struct VkImportFenceFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkFenceImportFlags flags; + VkExternalFenceHandleTypeFlagBits handleType; + int fd; +} VkImportFenceFdInfoKHR; + +typedef struct VkFenceGetFdInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkExternalFenceHandleTypeFlagBits handleType; +} VkFenceGetFdInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkImportFenceFdKHR)(VkDevice device, const VkImportFenceFdInfoKHR* pImportFenceFdInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceFdKHR)(VkDevice device, const VkFenceGetFdInfoKHR* pGetFdInfo, int* pFd); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportFenceFdKHR( + VkDevice device, + const VkImportFenceFdInfoKHR* pImportFenceFdInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceFdKHR( + VkDevice device, + const VkFenceGetFdInfoKHR* pGetFdInfo, + int* pFd); +#endif + + +#define VK_KHR_performance_query 1 +#define VK_KHR_PERFORMANCE_QUERY_SPEC_VERSION 1 +#define VK_KHR_PERFORMANCE_QUERY_EXTENSION_NAME "VK_KHR_performance_query" + +typedef enum VkPerformanceCounterUnitKHR { + VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR = 0, + VK_PERFORMANCE_COUNTER_UNIT_PERCENTAGE_KHR = 1, + VK_PERFORMANCE_COUNTER_UNIT_NANOSECONDS_KHR = 2, + VK_PERFORMANCE_COUNTER_UNIT_BYTES_KHR = 3, + VK_PERFORMANCE_COUNTER_UNIT_BYTES_PER_SECOND_KHR = 4, + VK_PERFORMANCE_COUNTER_UNIT_KELVIN_KHR = 5, + VK_PERFORMANCE_COUNTER_UNIT_WATTS_KHR = 6, + VK_PERFORMANCE_COUNTER_UNIT_VOLTS_KHR = 7, + VK_PERFORMANCE_COUNTER_UNIT_AMPS_KHR = 8, + VK_PERFORMANCE_COUNTER_UNIT_HERTZ_KHR = 9, + VK_PERFORMANCE_COUNTER_UNIT_CYCLES_KHR = 10, + VK_PERFORMANCE_COUNTER_UNIT_BEGIN_RANGE_KHR = VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR, + VK_PERFORMANCE_COUNTER_UNIT_END_RANGE_KHR = VK_PERFORMANCE_COUNTER_UNIT_CYCLES_KHR, + VK_PERFORMANCE_COUNTER_UNIT_RANGE_SIZE_KHR = (VK_PERFORMANCE_COUNTER_UNIT_CYCLES_KHR - VK_PERFORMANCE_COUNTER_UNIT_GENERIC_KHR + 1), + VK_PERFORMANCE_COUNTER_UNIT_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPerformanceCounterUnitKHR; + +typedef enum VkPerformanceCounterScopeKHR { + VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR = 0, + VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR = 1, + VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR = 2, + VK_QUERY_SCOPE_COMMAND_BUFFER_KHR = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR, + VK_QUERY_SCOPE_RENDER_PASS_KHR = VK_PERFORMANCE_COUNTER_SCOPE_RENDER_PASS_KHR, + VK_QUERY_SCOPE_COMMAND_KHR = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR, + VK_PERFORMANCE_COUNTER_SCOPE_BEGIN_RANGE_KHR = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR, + VK_PERFORMANCE_COUNTER_SCOPE_END_RANGE_KHR = VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR, + VK_PERFORMANCE_COUNTER_SCOPE_RANGE_SIZE_KHR = (VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_KHR - VK_PERFORMANCE_COUNTER_SCOPE_COMMAND_BUFFER_KHR + 1), + VK_PERFORMANCE_COUNTER_SCOPE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPerformanceCounterScopeKHR; + +typedef enum VkPerformanceCounterStorageKHR { + VK_PERFORMANCE_COUNTER_STORAGE_INT32_KHR = 0, + VK_PERFORMANCE_COUNTER_STORAGE_INT64_KHR = 1, + VK_PERFORMANCE_COUNTER_STORAGE_UINT32_KHR = 2, + VK_PERFORMANCE_COUNTER_STORAGE_UINT64_KHR = 3, + VK_PERFORMANCE_COUNTER_STORAGE_FLOAT32_KHR = 4, + VK_PERFORMANCE_COUNTER_STORAGE_FLOAT64_KHR = 5, + VK_PERFORMANCE_COUNTER_STORAGE_BEGIN_RANGE_KHR = VK_PERFORMANCE_COUNTER_STORAGE_INT32_KHR, + VK_PERFORMANCE_COUNTER_STORAGE_END_RANGE_KHR = VK_PERFORMANCE_COUNTER_STORAGE_FLOAT64_KHR, + VK_PERFORMANCE_COUNTER_STORAGE_RANGE_SIZE_KHR = (VK_PERFORMANCE_COUNTER_STORAGE_FLOAT64_KHR - VK_PERFORMANCE_COUNTER_STORAGE_INT32_KHR + 1), + VK_PERFORMANCE_COUNTER_STORAGE_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPerformanceCounterStorageKHR; + +typedef enum VkPerformanceCounterDescriptionFlagBitsKHR { + VK_PERFORMANCE_COUNTER_DESCRIPTION_PERFORMANCE_IMPACTING_KHR = 0x00000001, + VK_PERFORMANCE_COUNTER_DESCRIPTION_CONCURRENTLY_IMPACTED_KHR = 0x00000002, + VK_PERFORMANCE_COUNTER_DESCRIPTION_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPerformanceCounterDescriptionFlagBitsKHR; +typedef VkFlags VkPerformanceCounterDescriptionFlagsKHR; + +typedef enum VkAcquireProfilingLockFlagBitsKHR { + VK_ACQUIRE_PROFILING_LOCK_FLAG_BITS_MAX_ENUM_KHR = 0x7FFFFFFF +} VkAcquireProfilingLockFlagBitsKHR; +typedef VkFlags VkAcquireProfilingLockFlagsKHR; +typedef struct VkPhysicalDevicePerformanceQueryFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 performanceCounterQueryPools; + VkBool32 performanceCounterMultipleQueryPools; +} VkPhysicalDevicePerformanceQueryFeaturesKHR; + +typedef struct VkPhysicalDevicePerformanceQueryPropertiesKHR { + VkStructureType sType; + void* pNext; + VkBool32 allowCommandBufferQueryCopies; +} VkPhysicalDevicePerformanceQueryPropertiesKHR; + +typedef struct VkPerformanceCounterKHR { + VkStructureType sType; + const void* pNext; + VkPerformanceCounterUnitKHR unit; + VkPerformanceCounterScopeKHR scope; + VkPerformanceCounterStorageKHR storage; + uint8_t uuid[VK_UUID_SIZE]; +} VkPerformanceCounterKHR; + +typedef struct VkPerformanceCounterDescriptionKHR { + VkStructureType sType; + const void* pNext; + VkPerformanceCounterDescriptionFlagsKHR flags; + char name[VK_MAX_DESCRIPTION_SIZE]; + char category[VK_MAX_DESCRIPTION_SIZE]; + char description[VK_MAX_DESCRIPTION_SIZE]; +} VkPerformanceCounterDescriptionKHR; + +typedef struct VkQueryPoolPerformanceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t queueFamilyIndex; + uint32_t counterIndexCount; + const uint32_t* pCounterIndices; +} VkQueryPoolPerformanceCreateInfoKHR; + +typedef union VkPerformanceCounterResultKHR { + int32_t int32; + int64_t int64; + uint32_t uint32; + uint64_t uint64; + float float32; + double float64; +} VkPerformanceCounterResultKHR; + +typedef struct VkAcquireProfilingLockInfoKHR { + VkStructureType sType; + const void* pNext; + VkAcquireProfilingLockFlagsKHR flags; + uint64_t timeout; +} VkAcquireProfilingLockInfoKHR; + +typedef struct VkPerformanceQuerySubmitInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t counterPassIndex; +} VkPerformanceQuerySubmitInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, uint32_t* pCounterCount, VkPerformanceCounterKHR* pCounters, VkPerformanceCounterDescriptionKHR* pCounterDescriptions); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR)(VkPhysicalDevice physicalDevice, const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, uint32_t* pNumPasses); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireProfilingLockKHR)(VkDevice device, const VkAcquireProfilingLockInfoKHR* pInfo); +typedef void (VKAPI_PTR *PFN_vkReleaseProfilingLockKHR)(VkDevice device); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + uint32_t* pCounterCount, + VkPerformanceCounterKHR* pCounters, + VkPerformanceCounterDescriptionKHR* pCounterDescriptions); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR( + VkPhysicalDevice physicalDevice, + const VkQueryPoolPerformanceCreateInfoKHR* pPerformanceQueryCreateInfo, + uint32_t* pNumPasses); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireProfilingLockKHR( + VkDevice device, + const VkAcquireProfilingLockInfoKHR* pInfo); + +VKAPI_ATTR void VKAPI_CALL vkReleaseProfilingLockKHR( + VkDevice device); +#endif + + +#define VK_KHR_maintenance2 1 +#define VK_KHR_MAINTENANCE2_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE2_EXTENSION_NAME "VK_KHR_maintenance2" +typedef VkPointClippingBehavior VkPointClippingBehaviorKHR; + +typedef VkTessellationDomainOrigin VkTessellationDomainOriginKHR; + +typedef VkPhysicalDevicePointClippingProperties VkPhysicalDevicePointClippingPropertiesKHR; + +typedef VkRenderPassInputAttachmentAspectCreateInfo VkRenderPassInputAttachmentAspectCreateInfoKHR; + +typedef VkInputAttachmentAspectReference VkInputAttachmentAspectReferenceKHR; + +typedef VkImageViewUsageCreateInfo VkImageViewUsageCreateInfoKHR; + +typedef VkPipelineTessellationDomainOriginStateCreateInfo VkPipelineTessellationDomainOriginStateCreateInfoKHR; + + + +#define VK_KHR_get_surface_capabilities2 1 +#define VK_KHR_GET_SURFACE_CAPABILITIES_2_SPEC_VERSION 1 +#define VK_KHR_GET_SURFACE_CAPABILITIES_2_EXTENSION_NAME "VK_KHR_get_surface_capabilities2" +typedef struct VkPhysicalDeviceSurfaceInfo2KHR { + VkStructureType sType; + const void* pNext; + VkSurfaceKHR surface; +} VkPhysicalDeviceSurfaceInfo2KHR; + +typedef struct VkSurfaceCapabilities2KHR { + VkStructureType sType; + void* pNext; + VkSurfaceCapabilitiesKHR surfaceCapabilities; +} VkSurfaceCapabilities2KHR; + +typedef struct VkSurfaceFormat2KHR { + VkStructureType sType; + void* pNext; + VkSurfaceFormatKHR surfaceFormat; +} VkSurfaceFormat2KHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkSurfaceCapabilities2KHR* pSurfaceCapabilities); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceFormats2KHR)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pSurfaceFormatCount, VkSurfaceFormat2KHR* pSurfaceFormats); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + VkSurfaceCapabilities2KHR* pSurfaceCapabilities); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceFormats2KHR( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + uint32_t* pSurfaceFormatCount, + VkSurfaceFormat2KHR* pSurfaceFormats); +#endif + + +#define VK_KHR_variable_pointers 1 +#define VK_KHR_VARIABLE_POINTERS_SPEC_VERSION 1 +#define VK_KHR_VARIABLE_POINTERS_EXTENSION_NAME "VK_KHR_variable_pointers" +typedef VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointerFeaturesKHR; + +typedef VkPhysicalDeviceVariablePointersFeatures VkPhysicalDeviceVariablePointersFeaturesKHR; + + + +#define VK_KHR_get_display_properties2 1 +#define VK_KHR_GET_DISPLAY_PROPERTIES_2_SPEC_VERSION 1 +#define VK_KHR_GET_DISPLAY_PROPERTIES_2_EXTENSION_NAME "VK_KHR_get_display_properties2" +typedef struct VkDisplayProperties2KHR { + VkStructureType sType; + void* pNext; + VkDisplayPropertiesKHR displayProperties; +} VkDisplayProperties2KHR; + +typedef struct VkDisplayPlaneProperties2KHR { + VkStructureType sType; + void* pNext; + VkDisplayPlanePropertiesKHR displayPlaneProperties; +} VkDisplayPlaneProperties2KHR; + +typedef struct VkDisplayModeProperties2KHR { + VkStructureType sType; + void* pNext; + VkDisplayModePropertiesKHR displayModeProperties; +} VkDisplayModeProperties2KHR; + +typedef struct VkDisplayPlaneInfo2KHR { + VkStructureType sType; + const void* pNext; + VkDisplayModeKHR mode; + uint32_t planeIndex; +} VkDisplayPlaneInfo2KHR; + +typedef struct VkDisplayPlaneCapabilities2KHR { + VkStructureType sType; + void* pNext; + VkDisplayPlaneCapabilitiesKHR capabilities; +} VkDisplayPlaneCapabilities2KHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayProperties2KHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPlaneProperties2KHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayModeProperties2KHR)(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModeProperties2KHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetDisplayPlaneCapabilities2KHR)(VkPhysicalDevice physicalDevice, const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo, VkDisplayPlaneCapabilities2KHR* pCapabilities); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayProperties2KHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayProperties2KHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceDisplayPlaneProperties2KHR( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkDisplayPlaneProperties2KHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayModeProperties2KHR( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display, + uint32_t* pPropertyCount, + VkDisplayModeProperties2KHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDisplayPlaneCapabilities2KHR( + VkPhysicalDevice physicalDevice, + const VkDisplayPlaneInfo2KHR* pDisplayPlaneInfo, + VkDisplayPlaneCapabilities2KHR* pCapabilities); +#endif + + +#define VK_KHR_dedicated_allocation 1 +#define VK_KHR_DEDICATED_ALLOCATION_SPEC_VERSION 3 +#define VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_KHR_dedicated_allocation" +typedef VkMemoryDedicatedRequirements VkMemoryDedicatedRequirementsKHR; + +typedef VkMemoryDedicatedAllocateInfo VkMemoryDedicatedAllocateInfoKHR; + + + +#define VK_KHR_storage_buffer_storage_class 1 +#define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_SPEC_VERSION 1 +#define VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_EXTENSION_NAME "VK_KHR_storage_buffer_storage_class" + + +#define VK_KHR_relaxed_block_layout 1 +#define VK_KHR_RELAXED_BLOCK_LAYOUT_SPEC_VERSION 1 +#define VK_KHR_RELAXED_BLOCK_LAYOUT_EXTENSION_NAME "VK_KHR_relaxed_block_layout" + + +#define VK_KHR_get_memory_requirements2 1 +#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_SPEC_VERSION 1 +#define VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME "VK_KHR_get_memory_requirements2" +typedef VkBufferMemoryRequirementsInfo2 VkBufferMemoryRequirementsInfo2KHR; + +typedef VkImageMemoryRequirementsInfo2 VkImageMemoryRequirementsInfo2KHR; + +typedef VkImageSparseMemoryRequirementsInfo2 VkImageSparseMemoryRequirementsInfo2KHR; + +typedef VkSparseImageMemoryRequirements2 VkSparseImageMemoryRequirements2KHR; + +typedef void (VKAPI_PTR *PFN_vkGetImageMemoryRequirements2KHR)(VkDevice device, const VkImageMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetBufferMemoryRequirements2KHR)(VkDevice device, const VkBufferMemoryRequirementsInfo2* pInfo, VkMemoryRequirements2* pMemoryRequirements); +typedef void (VKAPI_PTR *PFN_vkGetImageSparseMemoryRequirements2KHR)(VkDevice device, const VkImageSparseMemoryRequirementsInfo2* pInfo, uint32_t* pSparseMemoryRequirementCount, VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetImageMemoryRequirements2KHR( + VkDevice device, + const VkImageMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetBufferMemoryRequirements2KHR( + VkDevice device, + const VkBufferMemoryRequirementsInfo2* pInfo, + VkMemoryRequirements2* pMemoryRequirements); + +VKAPI_ATTR void VKAPI_CALL vkGetImageSparseMemoryRequirements2KHR( + VkDevice device, + const VkImageSparseMemoryRequirementsInfo2* pInfo, + uint32_t* pSparseMemoryRequirementCount, + VkSparseImageMemoryRequirements2* pSparseMemoryRequirements); +#endif + + +#define VK_KHR_image_format_list 1 +#define VK_KHR_IMAGE_FORMAT_LIST_SPEC_VERSION 1 +#define VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME "VK_KHR_image_format_list" +typedef VkImageFormatListCreateInfo VkImageFormatListCreateInfoKHR; + + + +#define VK_KHR_sampler_ycbcr_conversion 1 +typedef VkSamplerYcbcrConversion VkSamplerYcbcrConversionKHR; + +#define VK_KHR_SAMPLER_YCBCR_CONVERSION_SPEC_VERSION 14 +#define VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME "VK_KHR_sampler_ycbcr_conversion" +typedef VkSamplerYcbcrModelConversion VkSamplerYcbcrModelConversionKHR; + +typedef VkSamplerYcbcrRange VkSamplerYcbcrRangeKHR; + +typedef VkChromaLocation VkChromaLocationKHR; + +typedef VkSamplerYcbcrConversionCreateInfo VkSamplerYcbcrConversionCreateInfoKHR; + +typedef VkSamplerYcbcrConversionInfo VkSamplerYcbcrConversionInfoKHR; + +typedef VkBindImagePlaneMemoryInfo VkBindImagePlaneMemoryInfoKHR; + +typedef VkImagePlaneMemoryRequirementsInfo VkImagePlaneMemoryRequirementsInfoKHR; + +typedef VkPhysicalDeviceSamplerYcbcrConversionFeatures VkPhysicalDeviceSamplerYcbcrConversionFeaturesKHR; + +typedef VkSamplerYcbcrConversionImageFormatProperties VkSamplerYcbcrConversionImageFormatPropertiesKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateSamplerYcbcrConversionKHR)(VkDevice device, const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSamplerYcbcrConversion* pYcbcrConversion); +typedef void (VKAPI_PTR *PFN_vkDestroySamplerYcbcrConversionKHR)(VkDevice device, VkSamplerYcbcrConversion ycbcrConversion, const VkAllocationCallbacks* pAllocator); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateSamplerYcbcrConversionKHR( + VkDevice device, + const VkSamplerYcbcrConversionCreateInfo* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSamplerYcbcrConversion* pYcbcrConversion); + +VKAPI_ATTR void VKAPI_CALL vkDestroySamplerYcbcrConversionKHR( + VkDevice device, + VkSamplerYcbcrConversion ycbcrConversion, + const VkAllocationCallbacks* pAllocator); +#endif + + +#define VK_KHR_bind_memory2 1 +#define VK_KHR_BIND_MEMORY_2_SPEC_VERSION 1 +#define VK_KHR_BIND_MEMORY_2_EXTENSION_NAME "VK_KHR_bind_memory2" +typedef VkBindBufferMemoryInfo VkBindBufferMemoryInfoKHR; + +typedef VkBindImageMemoryInfo VkBindImageMemoryInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkBindBufferMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindBufferMemoryInfo* pBindInfos); +typedef VkResult (VKAPI_PTR *PFN_vkBindImageMemory2KHR)(VkDevice device, uint32_t bindInfoCount, const VkBindImageMemoryInfo* pBindInfos); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkBindBufferMemory2KHR( + VkDevice device, + uint32_t bindInfoCount, + const VkBindBufferMemoryInfo* pBindInfos); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindImageMemory2KHR( + VkDevice device, + uint32_t bindInfoCount, + const VkBindImageMemoryInfo* pBindInfos); +#endif + + +#define VK_KHR_maintenance3 1 +#define VK_KHR_MAINTENANCE3_SPEC_VERSION 1 +#define VK_KHR_MAINTENANCE3_EXTENSION_NAME "VK_KHR_maintenance3" +typedef VkPhysicalDeviceMaintenance3Properties VkPhysicalDeviceMaintenance3PropertiesKHR; + +typedef VkDescriptorSetLayoutSupport VkDescriptorSetLayoutSupportKHR; + +typedef void (VKAPI_PTR *PFN_vkGetDescriptorSetLayoutSupportKHR)(VkDevice device, const VkDescriptorSetLayoutCreateInfo* pCreateInfo, VkDescriptorSetLayoutSupport* pSupport); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkGetDescriptorSetLayoutSupportKHR( + VkDevice device, + const VkDescriptorSetLayoutCreateInfo* pCreateInfo, + VkDescriptorSetLayoutSupport* pSupport); +#endif + + +#define VK_KHR_draw_indirect_count 1 +#define VK_KHR_DRAW_INDIRECT_COUNT_SPEC_VERSION 1 +#define VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME "VK_KHR_draw_indirect_count" +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountKHR)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountKHR)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCountKHR( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountKHR( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif + + +#define VK_KHR_shader_subgroup_extended_types 1 +#define VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_SPEC_VERSION 1 +#define VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_EXTENSION_NAME "VK_KHR_shader_subgroup_extended_types" +typedef VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR; + + + +#define VK_KHR_8bit_storage 1 +#define VK_KHR_8BIT_STORAGE_SPEC_VERSION 1 +#define VK_KHR_8BIT_STORAGE_EXTENSION_NAME "VK_KHR_8bit_storage" +typedef VkPhysicalDevice8BitStorageFeatures VkPhysicalDevice8BitStorageFeaturesKHR; + + + +#define VK_KHR_shader_atomic_int64 1 +#define VK_KHR_SHADER_ATOMIC_INT64_SPEC_VERSION 1 +#define VK_KHR_SHADER_ATOMIC_INT64_EXTENSION_NAME "VK_KHR_shader_atomic_int64" +typedef VkPhysicalDeviceShaderAtomicInt64Features VkPhysicalDeviceShaderAtomicInt64FeaturesKHR; + + + +#define VK_KHR_shader_clock 1 +#define VK_KHR_SHADER_CLOCK_SPEC_VERSION 1 +#define VK_KHR_SHADER_CLOCK_EXTENSION_NAME "VK_KHR_shader_clock" +typedef struct VkPhysicalDeviceShaderClockFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 shaderSubgroupClock; + VkBool32 shaderDeviceClock; +} VkPhysicalDeviceShaderClockFeaturesKHR; + + + +#define VK_KHR_driver_properties 1 +#define VK_KHR_DRIVER_PROPERTIES_SPEC_VERSION 1 +#define VK_KHR_DRIVER_PROPERTIES_EXTENSION_NAME "VK_KHR_driver_properties" +#define VK_MAX_DRIVER_NAME_SIZE_KHR VK_MAX_DRIVER_NAME_SIZE +#define VK_MAX_DRIVER_INFO_SIZE_KHR VK_MAX_DRIVER_INFO_SIZE +typedef VkDriverId VkDriverIdKHR; + +typedef VkConformanceVersion VkConformanceVersionKHR; + +typedef VkPhysicalDeviceDriverProperties VkPhysicalDeviceDriverPropertiesKHR; + + + +#define VK_KHR_shader_float_controls 1 +#define VK_KHR_SHADER_FLOAT_CONTROLS_SPEC_VERSION 4 +#define VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME "VK_KHR_shader_float_controls" +typedef VkShaderFloatControlsIndependence VkShaderFloatControlsIndependenceKHR; + +typedef VkPhysicalDeviceFloatControlsProperties VkPhysicalDeviceFloatControlsPropertiesKHR; + + + +#define VK_KHR_depth_stencil_resolve 1 +#define VK_KHR_DEPTH_STENCIL_RESOLVE_SPEC_VERSION 1 +#define VK_KHR_DEPTH_STENCIL_RESOLVE_EXTENSION_NAME "VK_KHR_depth_stencil_resolve" +typedef VkResolveModeFlagBits VkResolveModeFlagBitsKHR; + +typedef VkResolveModeFlags VkResolveModeFlagsKHR; + +typedef VkSubpassDescriptionDepthStencilResolve VkSubpassDescriptionDepthStencilResolveKHR; + +typedef VkPhysicalDeviceDepthStencilResolveProperties VkPhysicalDeviceDepthStencilResolvePropertiesKHR; + + + +#define VK_KHR_swapchain_mutable_format 1 +#define VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_SPEC_VERSION 1 +#define VK_KHR_SWAPCHAIN_MUTABLE_FORMAT_EXTENSION_NAME "VK_KHR_swapchain_mutable_format" + + +#define VK_KHR_timeline_semaphore 1 +#define VK_KHR_TIMELINE_SEMAPHORE_SPEC_VERSION 2 +#define VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME "VK_KHR_timeline_semaphore" +typedef VkSemaphoreType VkSemaphoreTypeKHR; + +typedef VkSemaphoreWaitFlagBits VkSemaphoreWaitFlagBitsKHR; + +typedef VkSemaphoreWaitFlags VkSemaphoreWaitFlagsKHR; + +typedef VkPhysicalDeviceTimelineSemaphoreFeatures VkPhysicalDeviceTimelineSemaphoreFeaturesKHR; + +typedef VkPhysicalDeviceTimelineSemaphoreProperties VkPhysicalDeviceTimelineSemaphorePropertiesKHR; + +typedef VkSemaphoreTypeCreateInfo VkSemaphoreTypeCreateInfoKHR; + +typedef VkTimelineSemaphoreSubmitInfo VkTimelineSemaphoreSubmitInfoKHR; + +typedef VkSemaphoreWaitInfo VkSemaphoreWaitInfoKHR; + +typedef VkSemaphoreSignalInfo VkSemaphoreSignalInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreCounterValueKHR)(VkDevice device, VkSemaphore semaphore, uint64_t* pValue); +typedef VkResult (VKAPI_PTR *PFN_vkWaitSemaphoresKHR)(VkDevice device, const VkSemaphoreWaitInfo* pWaitInfo, uint64_t timeout); +typedef VkResult (VKAPI_PTR *PFN_vkSignalSemaphoreKHR)(VkDevice device, const VkSemaphoreSignalInfo* pSignalInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreCounterValueKHR( + VkDevice device, + VkSemaphore semaphore, + uint64_t* pValue); + +VKAPI_ATTR VkResult VKAPI_CALL vkWaitSemaphoresKHR( + VkDevice device, + const VkSemaphoreWaitInfo* pWaitInfo, + uint64_t timeout); + +VKAPI_ATTR VkResult VKAPI_CALL vkSignalSemaphoreKHR( + VkDevice device, + const VkSemaphoreSignalInfo* pSignalInfo); +#endif + + +#define VK_KHR_vulkan_memory_model 1 +#define VK_KHR_VULKAN_MEMORY_MODEL_SPEC_VERSION 3 +#define VK_KHR_VULKAN_MEMORY_MODEL_EXTENSION_NAME "VK_KHR_vulkan_memory_model" +typedef VkPhysicalDeviceVulkanMemoryModelFeatures VkPhysicalDeviceVulkanMemoryModelFeaturesKHR; + + + +#define VK_KHR_spirv_1_4 1 +#define VK_KHR_SPIRV_1_4_SPEC_VERSION 1 +#define VK_KHR_SPIRV_1_4_EXTENSION_NAME "VK_KHR_spirv_1_4" + + +#define VK_KHR_surface_protected_capabilities 1 +#define VK_KHR_SURFACE_PROTECTED_CAPABILITIES_SPEC_VERSION 1 +#define VK_KHR_SURFACE_PROTECTED_CAPABILITIES_EXTENSION_NAME "VK_KHR_surface_protected_capabilities" +typedef struct VkSurfaceProtectedCapabilitiesKHR { + VkStructureType sType; + const void* pNext; + VkBool32 supportsProtected; +} VkSurfaceProtectedCapabilitiesKHR; + + + +#define VK_KHR_separate_depth_stencil_layouts 1 +#define VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_SPEC_VERSION 1 +#define VK_KHR_SEPARATE_DEPTH_STENCIL_LAYOUTS_EXTENSION_NAME "VK_KHR_separate_depth_stencil_layouts" +typedef VkPhysicalDeviceSeparateDepthStencilLayoutsFeatures VkPhysicalDeviceSeparateDepthStencilLayoutsFeaturesKHR; + +typedef VkAttachmentReferenceStencilLayout VkAttachmentReferenceStencilLayoutKHR; + +typedef VkAttachmentDescriptionStencilLayout VkAttachmentDescriptionStencilLayoutKHR; + + + +#define VK_KHR_uniform_buffer_standard_layout 1 +#define VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_SPEC_VERSION 1 +#define VK_KHR_UNIFORM_BUFFER_STANDARD_LAYOUT_EXTENSION_NAME "VK_KHR_uniform_buffer_standard_layout" +typedef VkPhysicalDeviceUniformBufferStandardLayoutFeatures VkPhysicalDeviceUniformBufferStandardLayoutFeaturesKHR; + + + +#define VK_KHR_buffer_device_address 1 +#define VK_KHR_BUFFER_DEVICE_ADDRESS_SPEC_VERSION 1 +#define VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME "VK_KHR_buffer_device_address" +typedef VkPhysicalDeviceBufferDeviceAddressFeatures VkPhysicalDeviceBufferDeviceAddressFeaturesKHR; + +typedef VkBufferDeviceAddressInfo VkBufferDeviceAddressInfoKHR; + +typedef VkBufferOpaqueCaptureAddressCreateInfo VkBufferOpaqueCaptureAddressCreateInfoKHR; + +typedef VkMemoryOpaqueCaptureAddressAllocateInfo VkMemoryOpaqueCaptureAddressAllocateInfoKHR; + +typedef VkDeviceMemoryOpaqueCaptureAddressInfo VkDeviceMemoryOpaqueCaptureAddressInfoKHR; + +typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetBufferDeviceAddressKHR)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); +typedef uint64_t (VKAPI_PTR *PFN_vkGetBufferOpaqueCaptureAddressKHR)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); +typedef uint64_t (VKAPI_PTR *PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR)(VkDevice device, const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddressKHR( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); + +VKAPI_ATTR uint64_t VKAPI_CALL vkGetBufferOpaqueCaptureAddressKHR( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); + +VKAPI_ATTR uint64_t VKAPI_CALL vkGetDeviceMemoryOpaqueCaptureAddressKHR( + VkDevice device, + const VkDeviceMemoryOpaqueCaptureAddressInfo* pInfo); +#endif + + +#define VK_KHR_pipeline_executable_properties 1 +#define VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_SPEC_VERSION 1 +#define VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_EXTENSION_NAME "VK_KHR_pipeline_executable_properties" + +typedef enum VkPipelineExecutableStatisticFormatKHR { + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_BOOL32_KHR = 0, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_INT64_KHR = 1, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_UINT64_KHR = 2, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_FLOAT64_KHR = 3, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_BEGIN_RANGE_KHR = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_BOOL32_KHR, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_END_RANGE_KHR = VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_FLOAT64_KHR, + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_RANGE_SIZE_KHR = (VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_FLOAT64_KHR - VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_BOOL32_KHR + 1), + VK_PIPELINE_EXECUTABLE_STATISTIC_FORMAT_MAX_ENUM_KHR = 0x7FFFFFFF +} VkPipelineExecutableStatisticFormatKHR; +typedef struct VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR { + VkStructureType sType; + void* pNext; + VkBool32 pipelineExecutableInfo; +} VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR; + +typedef struct VkPipelineInfoKHR { + VkStructureType sType; + const void* pNext; + VkPipeline pipeline; +} VkPipelineInfoKHR; + +typedef struct VkPipelineExecutablePropertiesKHR { + VkStructureType sType; + void* pNext; + VkShaderStageFlags stages; + char name[VK_MAX_DESCRIPTION_SIZE]; + char description[VK_MAX_DESCRIPTION_SIZE]; + uint32_t subgroupSize; +} VkPipelineExecutablePropertiesKHR; + +typedef struct VkPipelineExecutableInfoKHR { + VkStructureType sType; + const void* pNext; + VkPipeline pipeline; + uint32_t executableIndex; +} VkPipelineExecutableInfoKHR; + +typedef union VkPipelineExecutableStatisticValueKHR { + VkBool32 b32; + int64_t i64; + uint64_t u64; + double f64; +} VkPipelineExecutableStatisticValueKHR; + +typedef struct VkPipelineExecutableStatisticKHR { + VkStructureType sType; + void* pNext; + char name[VK_MAX_DESCRIPTION_SIZE]; + char description[VK_MAX_DESCRIPTION_SIZE]; + VkPipelineExecutableStatisticFormatKHR format; + VkPipelineExecutableStatisticValueKHR value; +} VkPipelineExecutableStatisticKHR; + +typedef struct VkPipelineExecutableInternalRepresentationKHR { + VkStructureType sType; + void* pNext; + char name[VK_MAX_DESCRIPTION_SIZE]; + char description[VK_MAX_DESCRIPTION_SIZE]; + VkBool32 isText; + size_t dataSize; + void* pData; +} VkPipelineExecutableInternalRepresentationKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutablePropertiesKHR)(VkDevice device, const VkPipelineInfoKHR* pPipelineInfo, uint32_t* pExecutableCount, VkPipelineExecutablePropertiesKHR* pProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutableStatisticsKHR)(VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pStatisticCount, VkPipelineExecutableStatisticKHR* pStatistics); +typedef VkResult (VKAPI_PTR *PFN_vkGetPipelineExecutableInternalRepresentationsKHR)(VkDevice device, const VkPipelineExecutableInfoKHR* pExecutableInfo, uint32_t* pInternalRepresentationCount, VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutablePropertiesKHR( + VkDevice device, + const VkPipelineInfoKHR* pPipelineInfo, + uint32_t* pExecutableCount, + VkPipelineExecutablePropertiesKHR* pProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutableStatisticsKHR( + VkDevice device, + const VkPipelineExecutableInfoKHR* pExecutableInfo, + uint32_t* pStatisticCount, + VkPipelineExecutableStatisticKHR* pStatistics); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPipelineExecutableInternalRepresentationsKHR( + VkDevice device, + const VkPipelineExecutableInfoKHR* pExecutableInfo, + uint32_t* pInternalRepresentationCount, + VkPipelineExecutableInternalRepresentationKHR* pInternalRepresentations); +#endif + + +#define VK_EXT_debug_report 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugReportCallbackEXT) +#define VK_EXT_DEBUG_REPORT_SPEC_VERSION 9 +#define VK_EXT_DEBUG_REPORT_EXTENSION_NAME "VK_EXT_debug_report" + +typedef enum VkDebugReportObjectTypeEXT { + VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT = 0, + VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT = 1, + VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT = 2, + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT = 3, + VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT = 4, + VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT = 5, + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT = 6, + VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT = 7, + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT = 8, + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT = 9, + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT = 10, + VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT = 11, + VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT = 12, + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT = 13, + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT = 14, + VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT = 15, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT = 16, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT = 17, + VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT = 18, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT = 19, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT = 20, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT = 21, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT = 22, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT = 23, + VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT = 24, + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT = 25, + VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT = 26, + VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT = 27, + VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT = 28, + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT = 29, + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT = 30, + VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT = 31, + VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT = 32, + VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT = 33, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT = 1000156000, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT = 1000085000, + VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT = 1000165000, + VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_BEGIN_RANGE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_END_RANGE_EXT = VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT, + VK_DEBUG_REPORT_OBJECT_TYPE_RANGE_SIZE_EXT = (VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT - VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT + 1), + VK_DEBUG_REPORT_OBJECT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugReportObjectTypeEXT; + +typedef enum VkDebugReportFlagBitsEXT { + VK_DEBUG_REPORT_INFORMATION_BIT_EXT = 0x00000001, + VK_DEBUG_REPORT_WARNING_BIT_EXT = 0x00000002, + VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT = 0x00000004, + VK_DEBUG_REPORT_ERROR_BIT_EXT = 0x00000008, + VK_DEBUG_REPORT_DEBUG_BIT_EXT = 0x00000010, + VK_DEBUG_REPORT_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugReportFlagBitsEXT; +typedef VkFlags VkDebugReportFlagsEXT; +typedef VkBool32 (VKAPI_PTR *PFN_vkDebugReportCallbackEXT)( + VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objectType, + uint64_t object, + size_t location, + int32_t messageCode, + const char* pLayerPrefix, + const char* pMessage, + void* pUserData); + +typedef struct VkDebugReportCallbackCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportFlagsEXT flags; + PFN_vkDebugReportCallbackEXT pfnCallback; + void* pUserData; +} VkDebugReportCallbackCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateDebugReportCallbackEXT)(VkInstance instance, const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugReportCallbackEXT* pCallback); +typedef void (VKAPI_PTR *PFN_vkDestroyDebugReportCallbackEXT)(VkInstance instance, VkDebugReportCallbackEXT callback, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkDebugReportMessageEXT)(VkInstance instance, VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT objectType, uint64_t object, size_t location, int32_t messageCode, const char* pLayerPrefix, const char* pMessage); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT( + VkInstance instance, + const VkDebugReportCallbackCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDebugReportCallbackEXT* pCallback); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT( + VkInstance instance, + VkDebugReportCallbackEXT callback, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT( + VkInstance instance, + VkDebugReportFlagsEXT flags, + VkDebugReportObjectTypeEXT objectType, + uint64_t object, + size_t location, + int32_t messageCode, + const char* pLayerPrefix, + const char* pMessage); +#endif + + +#define VK_NV_glsl_shader 1 +#define VK_NV_GLSL_SHADER_SPEC_VERSION 1 +#define VK_NV_GLSL_SHADER_EXTENSION_NAME "VK_NV_glsl_shader" + + +#define VK_EXT_depth_range_unrestricted 1 +#define VK_EXT_DEPTH_RANGE_UNRESTRICTED_SPEC_VERSION 1 +#define VK_EXT_DEPTH_RANGE_UNRESTRICTED_EXTENSION_NAME "VK_EXT_depth_range_unrestricted" + + +#define VK_IMG_filter_cubic 1 +#define VK_IMG_FILTER_CUBIC_SPEC_VERSION 1 +#define VK_IMG_FILTER_CUBIC_EXTENSION_NAME "VK_IMG_filter_cubic" + + +#define VK_AMD_rasterization_order 1 +#define VK_AMD_RASTERIZATION_ORDER_SPEC_VERSION 1 +#define VK_AMD_RASTERIZATION_ORDER_EXTENSION_NAME "VK_AMD_rasterization_order" + +typedef enum VkRasterizationOrderAMD { + VK_RASTERIZATION_ORDER_STRICT_AMD = 0, + VK_RASTERIZATION_ORDER_RELAXED_AMD = 1, + VK_RASTERIZATION_ORDER_BEGIN_RANGE_AMD = VK_RASTERIZATION_ORDER_STRICT_AMD, + VK_RASTERIZATION_ORDER_END_RANGE_AMD = VK_RASTERIZATION_ORDER_RELAXED_AMD, + VK_RASTERIZATION_ORDER_RANGE_SIZE_AMD = (VK_RASTERIZATION_ORDER_RELAXED_AMD - VK_RASTERIZATION_ORDER_STRICT_AMD + 1), + VK_RASTERIZATION_ORDER_MAX_ENUM_AMD = 0x7FFFFFFF +} VkRasterizationOrderAMD; +typedef struct VkPipelineRasterizationStateRasterizationOrderAMD { + VkStructureType sType; + const void* pNext; + VkRasterizationOrderAMD rasterizationOrder; +} VkPipelineRasterizationStateRasterizationOrderAMD; + + + +#define VK_AMD_shader_trinary_minmax 1 +#define VK_AMD_SHADER_TRINARY_MINMAX_SPEC_VERSION 1 +#define VK_AMD_SHADER_TRINARY_MINMAX_EXTENSION_NAME "VK_AMD_shader_trinary_minmax" + + +#define VK_AMD_shader_explicit_vertex_parameter 1 +#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_SPEC_VERSION 1 +#define VK_AMD_SHADER_EXPLICIT_VERTEX_PARAMETER_EXTENSION_NAME "VK_AMD_shader_explicit_vertex_parameter" + + +#define VK_EXT_debug_marker 1 +#define VK_EXT_DEBUG_MARKER_SPEC_VERSION 4 +#define VK_EXT_DEBUG_MARKER_EXTENSION_NAME "VK_EXT_debug_marker" +typedef struct VkDebugMarkerObjectNameInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportObjectTypeEXT objectType; + uint64_t object; + const char* pObjectName; +} VkDebugMarkerObjectNameInfoEXT; + +typedef struct VkDebugMarkerObjectTagInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugReportObjectTypeEXT objectType; + uint64_t object; + uint64_t tagName; + size_t tagSize; + const void* pTag; +} VkDebugMarkerObjectTagInfoEXT; + +typedef struct VkDebugMarkerMarkerInfoEXT { + VkStructureType sType; + const void* pNext; + const char* pMarkerName; + float color[4]; +} VkDebugMarkerMarkerInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectTagEXT)(VkDevice device, const VkDebugMarkerObjectTagInfoEXT* pTagInfo); +typedef VkResult (VKAPI_PTR *PFN_vkDebugMarkerSetObjectNameEXT)(VkDevice device, const VkDebugMarkerObjectNameInfoEXT* pNameInfo); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerBeginEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerEndEXT)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdDebugMarkerInsertEXT)(VkCommandBuffer commandBuffer, const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectTagEXT( + VkDevice device, + const VkDebugMarkerObjectTagInfoEXT* pTagInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkDebugMarkerSetObjectNameEXT( + VkDevice device, + const VkDebugMarkerObjectNameInfoEXT* pNameInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerBeginEXT( + VkCommandBuffer commandBuffer, + const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerEndEXT( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR void VKAPI_CALL vkCmdDebugMarkerInsertEXT( + VkCommandBuffer commandBuffer, + const VkDebugMarkerMarkerInfoEXT* pMarkerInfo); +#endif + + +#define VK_AMD_gcn_shader 1 +#define VK_AMD_GCN_SHADER_SPEC_VERSION 1 +#define VK_AMD_GCN_SHADER_EXTENSION_NAME "VK_AMD_gcn_shader" + + +#define VK_NV_dedicated_allocation 1 +#define VK_NV_DEDICATED_ALLOCATION_SPEC_VERSION 1 +#define VK_NV_DEDICATED_ALLOCATION_EXTENSION_NAME "VK_NV_dedicated_allocation" +typedef struct VkDedicatedAllocationImageCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 dedicatedAllocation; +} VkDedicatedAllocationImageCreateInfoNV; + +typedef struct VkDedicatedAllocationBufferCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 dedicatedAllocation; +} VkDedicatedAllocationBufferCreateInfoNV; + +typedef struct VkDedicatedAllocationMemoryAllocateInfoNV { + VkStructureType sType; + const void* pNext; + VkImage image; + VkBuffer buffer; +} VkDedicatedAllocationMemoryAllocateInfoNV; + + + +#define VK_EXT_transform_feedback 1 +#define VK_EXT_TRANSFORM_FEEDBACK_SPEC_VERSION 1 +#define VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME "VK_EXT_transform_feedback" +typedef VkFlags VkPipelineRasterizationStateStreamCreateFlagsEXT; +typedef struct VkPhysicalDeviceTransformFeedbackFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 transformFeedback; + VkBool32 geometryStreams; +} VkPhysicalDeviceTransformFeedbackFeaturesEXT; + +typedef struct VkPhysicalDeviceTransformFeedbackPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxTransformFeedbackStreams; + uint32_t maxTransformFeedbackBuffers; + VkDeviceSize maxTransformFeedbackBufferSize; + uint32_t maxTransformFeedbackStreamDataSize; + uint32_t maxTransformFeedbackBufferDataSize; + uint32_t maxTransformFeedbackBufferDataStride; + VkBool32 transformFeedbackQueries; + VkBool32 transformFeedbackStreamsLinesTriangles; + VkBool32 transformFeedbackRasterizationStreamSelect; + VkBool32 transformFeedbackDraw; +} VkPhysicalDeviceTransformFeedbackPropertiesEXT; + +typedef struct VkPipelineRasterizationStateStreamCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationStateStreamCreateFlagsEXT flags; + uint32_t rasterizationStream; +} VkPipelineRasterizationStateStreamCreateInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdBindTransformFeedbackBuffersEXT)(VkCommandBuffer commandBuffer, uint32_t firstBinding, uint32_t bindingCount, const VkBuffer* pBuffers, const VkDeviceSize* pOffsets, const VkDeviceSize* pSizes); +typedef void (VKAPI_PTR *PFN_vkCmdBeginTransformFeedbackEXT)(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdEndTransformFeedbackEXT)(VkCommandBuffer commandBuffer, uint32_t firstCounterBuffer, uint32_t counterBufferCount, const VkBuffer* pCounterBuffers, const VkDeviceSize* pCounterBufferOffsets); +typedef void (VKAPI_PTR *PFN_vkCmdBeginQueryIndexedEXT)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, VkQueryControlFlags flags, uint32_t index); +typedef void (VKAPI_PTR *PFN_vkCmdEndQueryIndexedEXT)(VkCommandBuffer commandBuffer, VkQueryPool queryPool, uint32_t query, uint32_t index); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectByteCountEXT)(VkCommandBuffer commandBuffer, uint32_t instanceCount, uint32_t firstInstance, VkBuffer counterBuffer, VkDeviceSize counterBufferOffset, uint32_t counterOffset, uint32_t vertexStride); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindTransformFeedbackBuffersEXT( + VkCommandBuffer commandBuffer, + uint32_t firstBinding, + uint32_t bindingCount, + const VkBuffer* pBuffers, + const VkDeviceSize* pOffsets, + const VkDeviceSize* pSizes); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginTransformFeedbackEXT( + VkCommandBuffer commandBuffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer* pCounterBuffers, + const VkDeviceSize* pCounterBufferOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndTransformFeedbackEXT( + VkCommandBuffer commandBuffer, + uint32_t firstCounterBuffer, + uint32_t counterBufferCount, + const VkBuffer* pCounterBuffers, + const VkDeviceSize* pCounterBufferOffsets); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginQueryIndexedEXT( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query, + VkQueryControlFlags flags, + uint32_t index); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndQueryIndexedEXT( + VkCommandBuffer commandBuffer, + VkQueryPool queryPool, + uint32_t query, + uint32_t index); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectByteCountEXT( + VkCommandBuffer commandBuffer, + uint32_t instanceCount, + uint32_t firstInstance, + VkBuffer counterBuffer, + VkDeviceSize counterBufferOffset, + uint32_t counterOffset, + uint32_t vertexStride); +#endif + + +#define VK_NVX_image_view_handle 1 +#define VK_NVX_IMAGE_VIEW_HANDLE_SPEC_VERSION 1 +#define VK_NVX_IMAGE_VIEW_HANDLE_EXTENSION_NAME "VK_NVX_image_view_handle" +typedef struct VkImageViewHandleInfoNVX { + VkStructureType sType; + const void* pNext; + VkImageView imageView; + VkDescriptorType descriptorType; + VkSampler sampler; +} VkImageViewHandleInfoNVX; + +typedef uint32_t (VKAPI_PTR *PFN_vkGetImageViewHandleNVX)(VkDevice device, const VkImageViewHandleInfoNVX* pInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR uint32_t VKAPI_CALL vkGetImageViewHandleNVX( + VkDevice device, + const VkImageViewHandleInfoNVX* pInfo); +#endif + + +#define VK_AMD_draw_indirect_count 1 +#define VK_AMD_DRAW_INDIRECT_COUNT_SPEC_VERSION 2 +#define VK_AMD_DRAW_INDIRECT_COUNT_EXTENSION_NAME "VK_AMD_draw_indirect_count" +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawIndexedIndirectCountAMD)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndirectCountAMD( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawIndexedIndirectCountAMD( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif + + +#define VK_AMD_negative_viewport_height 1 +#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_SPEC_VERSION 1 +#define VK_AMD_NEGATIVE_VIEWPORT_HEIGHT_EXTENSION_NAME "VK_AMD_negative_viewport_height" + + +#define VK_AMD_gpu_shader_half_float 1 +#define VK_AMD_GPU_SHADER_HALF_FLOAT_SPEC_VERSION 2 +#define VK_AMD_GPU_SHADER_HALF_FLOAT_EXTENSION_NAME "VK_AMD_gpu_shader_half_float" + + +#define VK_AMD_shader_ballot 1 +#define VK_AMD_SHADER_BALLOT_SPEC_VERSION 1 +#define VK_AMD_SHADER_BALLOT_EXTENSION_NAME "VK_AMD_shader_ballot" + + +#define VK_AMD_texture_gather_bias_lod 1 +#define VK_AMD_TEXTURE_GATHER_BIAS_LOD_SPEC_VERSION 1 +#define VK_AMD_TEXTURE_GATHER_BIAS_LOD_EXTENSION_NAME "VK_AMD_texture_gather_bias_lod" +typedef struct VkTextureLODGatherFormatPropertiesAMD { + VkStructureType sType; + void* pNext; + VkBool32 supportsTextureGatherLODBiasAMD; +} VkTextureLODGatherFormatPropertiesAMD; + + + +#define VK_AMD_shader_info 1 +#define VK_AMD_SHADER_INFO_SPEC_VERSION 1 +#define VK_AMD_SHADER_INFO_EXTENSION_NAME "VK_AMD_shader_info" + +typedef enum VkShaderInfoTypeAMD { + VK_SHADER_INFO_TYPE_STATISTICS_AMD = 0, + VK_SHADER_INFO_TYPE_BINARY_AMD = 1, + VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD = 2, + VK_SHADER_INFO_TYPE_BEGIN_RANGE_AMD = VK_SHADER_INFO_TYPE_STATISTICS_AMD, + VK_SHADER_INFO_TYPE_END_RANGE_AMD = VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD, + VK_SHADER_INFO_TYPE_RANGE_SIZE_AMD = (VK_SHADER_INFO_TYPE_DISASSEMBLY_AMD - VK_SHADER_INFO_TYPE_STATISTICS_AMD + 1), + VK_SHADER_INFO_TYPE_MAX_ENUM_AMD = 0x7FFFFFFF +} VkShaderInfoTypeAMD; +typedef struct VkShaderResourceUsageAMD { + uint32_t numUsedVgprs; + uint32_t numUsedSgprs; + uint32_t ldsSizePerLocalWorkGroup; + size_t ldsUsageSizeInBytes; + size_t scratchMemUsageInBytes; +} VkShaderResourceUsageAMD; + +typedef struct VkShaderStatisticsInfoAMD { + VkShaderStageFlags shaderStageMask; + VkShaderResourceUsageAMD resourceUsage; + uint32_t numPhysicalVgprs; + uint32_t numPhysicalSgprs; + uint32_t numAvailableVgprs; + uint32_t numAvailableSgprs; + uint32_t computeWorkGroupSize[3]; +} VkShaderStatisticsInfoAMD; + +typedef VkResult (VKAPI_PTR *PFN_vkGetShaderInfoAMD)(VkDevice device, VkPipeline pipeline, VkShaderStageFlagBits shaderStage, VkShaderInfoTypeAMD infoType, size_t* pInfoSize, void* pInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetShaderInfoAMD( + VkDevice device, + VkPipeline pipeline, + VkShaderStageFlagBits shaderStage, + VkShaderInfoTypeAMD infoType, + size_t* pInfoSize, + void* pInfo); +#endif + + +#define VK_AMD_shader_image_load_store_lod 1 +#define VK_AMD_SHADER_IMAGE_LOAD_STORE_LOD_SPEC_VERSION 1 +#define VK_AMD_SHADER_IMAGE_LOAD_STORE_LOD_EXTENSION_NAME "VK_AMD_shader_image_load_store_lod" + + +#define VK_NV_corner_sampled_image 1 +#define VK_NV_CORNER_SAMPLED_IMAGE_SPEC_VERSION 2 +#define VK_NV_CORNER_SAMPLED_IMAGE_EXTENSION_NAME "VK_NV_corner_sampled_image" +typedef struct VkPhysicalDeviceCornerSampledImageFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 cornerSampledImage; +} VkPhysicalDeviceCornerSampledImageFeaturesNV; + + + +#define VK_IMG_format_pvrtc 1 +#define VK_IMG_FORMAT_PVRTC_SPEC_VERSION 1 +#define VK_IMG_FORMAT_PVRTC_EXTENSION_NAME "VK_IMG_format_pvrtc" + + +#define VK_NV_external_memory_capabilities 1 +#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME "VK_NV_external_memory_capabilities" + +typedef enum VkExternalMemoryHandleTypeFlagBitsNV { + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT_NV = 0x00000001, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT_NV = 0x00000002, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_BIT_NV = 0x00000004, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_IMAGE_KMT_BIT_NV = 0x00000008, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkExternalMemoryHandleTypeFlagBitsNV; +typedef VkFlags VkExternalMemoryHandleTypeFlagsNV; + +typedef enum VkExternalMemoryFeatureFlagBitsNV { + VK_EXTERNAL_MEMORY_FEATURE_DEDICATED_ONLY_BIT_NV = 0x00000001, + VK_EXTERNAL_MEMORY_FEATURE_EXPORTABLE_BIT_NV = 0x00000002, + VK_EXTERNAL_MEMORY_FEATURE_IMPORTABLE_BIT_NV = 0x00000004, + VK_EXTERNAL_MEMORY_FEATURE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkExternalMemoryFeatureFlagBitsNV; +typedef VkFlags VkExternalMemoryFeatureFlagsNV; +typedef struct VkExternalImageFormatPropertiesNV { + VkImageFormatProperties imageFormatProperties; + VkExternalMemoryFeatureFlagsNV externalMemoryFeatures; + VkExternalMemoryHandleTypeFlagsNV exportFromImportedHandleTypes; + VkExternalMemoryHandleTypeFlagsNV compatibleHandleTypes; +} VkExternalImageFormatPropertiesNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV)(VkPhysicalDevice physicalDevice, VkFormat format, VkImageType type, VkImageTiling tiling, VkImageUsageFlags usage, VkImageCreateFlags flags, VkExternalMemoryHandleTypeFlagsNV externalHandleType, VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceExternalImageFormatPropertiesNV( + VkPhysicalDevice physicalDevice, + VkFormat format, + VkImageType type, + VkImageTiling tiling, + VkImageUsageFlags usage, + VkImageCreateFlags flags, + VkExternalMemoryHandleTypeFlagsNV externalHandleType, + VkExternalImageFormatPropertiesNV* pExternalImageFormatProperties); +#endif + + +#define VK_NV_external_memory 1 +#define VK_NV_EXTERNAL_MEMORY_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_EXTENSION_NAME "VK_NV_external_memory" +typedef struct VkExternalMemoryImageCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleTypes; +} VkExternalMemoryImageCreateInfoNV; + +typedef struct VkExportMemoryAllocateInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleTypes; +} VkExportMemoryAllocateInfoNV; + + + +#define VK_EXT_validation_flags 1 +#define VK_EXT_VALIDATION_FLAGS_SPEC_VERSION 2 +#define VK_EXT_VALIDATION_FLAGS_EXTENSION_NAME "VK_EXT_validation_flags" + +typedef enum VkValidationCheckEXT { + VK_VALIDATION_CHECK_ALL_EXT = 0, + VK_VALIDATION_CHECK_SHADERS_EXT = 1, + VK_VALIDATION_CHECK_BEGIN_RANGE_EXT = VK_VALIDATION_CHECK_ALL_EXT, + VK_VALIDATION_CHECK_END_RANGE_EXT = VK_VALIDATION_CHECK_SHADERS_EXT, + VK_VALIDATION_CHECK_RANGE_SIZE_EXT = (VK_VALIDATION_CHECK_SHADERS_EXT - VK_VALIDATION_CHECK_ALL_EXT + 1), + VK_VALIDATION_CHECK_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationCheckEXT; +typedef struct VkValidationFlagsEXT { + VkStructureType sType; + const void* pNext; + uint32_t disabledValidationCheckCount; + const VkValidationCheckEXT* pDisabledValidationChecks; +} VkValidationFlagsEXT; + + + +#define VK_EXT_shader_subgroup_ballot 1 +#define VK_EXT_SHADER_SUBGROUP_BALLOT_SPEC_VERSION 1 +#define VK_EXT_SHADER_SUBGROUP_BALLOT_EXTENSION_NAME "VK_EXT_shader_subgroup_ballot" + + +#define VK_EXT_shader_subgroup_vote 1 +#define VK_EXT_SHADER_SUBGROUP_VOTE_SPEC_VERSION 1 +#define VK_EXT_SHADER_SUBGROUP_VOTE_EXTENSION_NAME "VK_EXT_shader_subgroup_vote" + + +#define VK_EXT_texture_compression_astc_hdr 1 +#define VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_SPEC_VERSION 1 +#define VK_EXT_TEXTURE_COMPRESSION_ASTC_HDR_EXTENSION_NAME "VK_EXT_texture_compression_astc_hdr" +typedef struct VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 textureCompressionASTC_HDR; +} VkPhysicalDeviceTextureCompressionASTCHDRFeaturesEXT; + + + +#define VK_EXT_astc_decode_mode 1 +#define VK_EXT_ASTC_DECODE_MODE_SPEC_VERSION 1 +#define VK_EXT_ASTC_DECODE_MODE_EXTENSION_NAME "VK_EXT_astc_decode_mode" +typedef struct VkImageViewASTCDecodeModeEXT { + VkStructureType sType; + const void* pNext; + VkFormat decodeMode; +} VkImageViewASTCDecodeModeEXT; + +typedef struct VkPhysicalDeviceASTCDecodeFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 decodeModeSharedExponent; +} VkPhysicalDeviceASTCDecodeFeaturesEXT; + + + +#define VK_EXT_conditional_rendering 1 +#define VK_EXT_CONDITIONAL_RENDERING_SPEC_VERSION 2 +#define VK_EXT_CONDITIONAL_RENDERING_EXTENSION_NAME "VK_EXT_conditional_rendering" + +typedef enum VkConditionalRenderingFlagBitsEXT { + VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT = 0x00000001, + VK_CONDITIONAL_RENDERING_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkConditionalRenderingFlagBitsEXT; +typedef VkFlags VkConditionalRenderingFlagsEXT; +typedef struct VkConditionalRenderingBeginInfoEXT { + VkStructureType sType; + const void* pNext; + VkBuffer buffer; + VkDeviceSize offset; + VkConditionalRenderingFlagsEXT flags; +} VkConditionalRenderingBeginInfoEXT; + +typedef struct VkPhysicalDeviceConditionalRenderingFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 conditionalRendering; + VkBool32 inheritedConditionalRendering; +} VkPhysicalDeviceConditionalRenderingFeaturesEXT; + +typedef struct VkCommandBufferInheritanceConditionalRenderingInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 conditionalRenderingEnable; +} VkCommandBufferInheritanceConditionalRenderingInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdBeginConditionalRenderingEXT)(VkCommandBuffer commandBuffer, const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin); +typedef void (VKAPI_PTR *PFN_vkCmdEndConditionalRenderingEXT)(VkCommandBuffer commandBuffer); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBeginConditionalRenderingEXT( + VkCommandBuffer commandBuffer, + const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndConditionalRenderingEXT( + VkCommandBuffer commandBuffer); +#endif + + +#define VK_NVX_device_generated_commands 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkObjectTableNVX) +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkIndirectCommandsLayoutNVX) +#define VK_NVX_DEVICE_GENERATED_COMMANDS_SPEC_VERSION 3 +#define VK_NVX_DEVICE_GENERATED_COMMANDS_EXTENSION_NAME "VK_NVX_device_generated_commands" + +typedef enum VkIndirectCommandsTokenTypeNVX { + VK_INDIRECT_COMMANDS_TOKEN_TYPE_PIPELINE_NVX = 0, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DESCRIPTOR_SET_NVX = 1, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_INDEX_BUFFER_NVX = 2, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_VERTEX_BUFFER_NVX = 3, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_PUSH_CONSTANT_NVX = 4, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_INDEXED_NVX = 5, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DRAW_NVX = 6, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_DISPATCH_NVX = 7, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_BEGIN_RANGE_NVX = VK_INDIRECT_COMMANDS_TOKEN_TYPE_PIPELINE_NVX, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_END_RANGE_NVX = VK_INDIRECT_COMMANDS_TOKEN_TYPE_DISPATCH_NVX, + VK_INDIRECT_COMMANDS_TOKEN_TYPE_RANGE_SIZE_NVX = (VK_INDIRECT_COMMANDS_TOKEN_TYPE_DISPATCH_NVX - VK_INDIRECT_COMMANDS_TOKEN_TYPE_PIPELINE_NVX + 1), + VK_INDIRECT_COMMANDS_TOKEN_TYPE_MAX_ENUM_NVX = 0x7FFFFFFF +} VkIndirectCommandsTokenTypeNVX; + +typedef enum VkObjectEntryTypeNVX { + VK_OBJECT_ENTRY_TYPE_DESCRIPTOR_SET_NVX = 0, + VK_OBJECT_ENTRY_TYPE_PIPELINE_NVX = 1, + VK_OBJECT_ENTRY_TYPE_INDEX_BUFFER_NVX = 2, + VK_OBJECT_ENTRY_TYPE_VERTEX_BUFFER_NVX = 3, + VK_OBJECT_ENTRY_TYPE_PUSH_CONSTANT_NVX = 4, + VK_OBJECT_ENTRY_TYPE_BEGIN_RANGE_NVX = VK_OBJECT_ENTRY_TYPE_DESCRIPTOR_SET_NVX, + VK_OBJECT_ENTRY_TYPE_END_RANGE_NVX = VK_OBJECT_ENTRY_TYPE_PUSH_CONSTANT_NVX, + VK_OBJECT_ENTRY_TYPE_RANGE_SIZE_NVX = (VK_OBJECT_ENTRY_TYPE_PUSH_CONSTANT_NVX - VK_OBJECT_ENTRY_TYPE_DESCRIPTOR_SET_NVX + 1), + VK_OBJECT_ENTRY_TYPE_MAX_ENUM_NVX = 0x7FFFFFFF +} VkObjectEntryTypeNVX; + +typedef enum VkIndirectCommandsLayoutUsageFlagBitsNVX { + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_UNORDERED_SEQUENCES_BIT_NVX = 0x00000001, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_SPARSE_SEQUENCES_BIT_NVX = 0x00000002, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_EMPTY_EXECUTIONS_BIT_NVX = 0x00000004, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_INDEXED_SEQUENCES_BIT_NVX = 0x00000008, + VK_INDIRECT_COMMANDS_LAYOUT_USAGE_FLAG_BITS_MAX_ENUM_NVX = 0x7FFFFFFF +} VkIndirectCommandsLayoutUsageFlagBitsNVX; +typedef VkFlags VkIndirectCommandsLayoutUsageFlagsNVX; + +typedef enum VkObjectEntryUsageFlagBitsNVX { + VK_OBJECT_ENTRY_USAGE_GRAPHICS_BIT_NVX = 0x00000001, + VK_OBJECT_ENTRY_USAGE_COMPUTE_BIT_NVX = 0x00000002, + VK_OBJECT_ENTRY_USAGE_FLAG_BITS_MAX_ENUM_NVX = 0x7FFFFFFF +} VkObjectEntryUsageFlagBitsNVX; +typedef VkFlags VkObjectEntryUsageFlagsNVX; +typedef struct VkDeviceGeneratedCommandsFeaturesNVX { + VkStructureType sType; + const void* pNext; + VkBool32 computeBindingPointSupport; +} VkDeviceGeneratedCommandsFeaturesNVX; + +typedef struct VkDeviceGeneratedCommandsLimitsNVX { + VkStructureType sType; + const void* pNext; + uint32_t maxIndirectCommandsLayoutTokenCount; + uint32_t maxObjectEntryCounts; + uint32_t minSequenceCountBufferOffsetAlignment; + uint32_t minSequenceIndexBufferOffsetAlignment; + uint32_t minCommandsTokenBufferOffsetAlignment; +} VkDeviceGeneratedCommandsLimitsNVX; + +typedef struct VkIndirectCommandsTokenNVX { + VkIndirectCommandsTokenTypeNVX tokenType; + VkBuffer buffer; + VkDeviceSize offset; +} VkIndirectCommandsTokenNVX; + +typedef struct VkIndirectCommandsLayoutTokenNVX { + VkIndirectCommandsTokenTypeNVX tokenType; + uint32_t bindingUnit; + uint32_t dynamicCount; + uint32_t divisor; +} VkIndirectCommandsLayoutTokenNVX; + +typedef struct VkIndirectCommandsLayoutCreateInfoNVX { + VkStructureType sType; + const void* pNext; + VkPipelineBindPoint pipelineBindPoint; + VkIndirectCommandsLayoutUsageFlagsNVX flags; + uint32_t tokenCount; + const VkIndirectCommandsLayoutTokenNVX* pTokens; +} VkIndirectCommandsLayoutCreateInfoNVX; + +typedef struct VkCmdProcessCommandsInfoNVX { + VkStructureType sType; + const void* pNext; + VkObjectTableNVX objectTable; + VkIndirectCommandsLayoutNVX indirectCommandsLayout; + uint32_t indirectCommandsTokenCount; + const VkIndirectCommandsTokenNVX* pIndirectCommandsTokens; + uint32_t maxSequencesCount; + VkCommandBuffer targetCommandBuffer; + VkBuffer sequencesCountBuffer; + VkDeviceSize sequencesCountOffset; + VkBuffer sequencesIndexBuffer; + VkDeviceSize sequencesIndexOffset; +} VkCmdProcessCommandsInfoNVX; + +typedef struct VkCmdReserveSpaceForCommandsInfoNVX { + VkStructureType sType; + const void* pNext; + VkObjectTableNVX objectTable; + VkIndirectCommandsLayoutNVX indirectCommandsLayout; + uint32_t maxSequencesCount; +} VkCmdReserveSpaceForCommandsInfoNVX; + +typedef struct VkObjectTableCreateInfoNVX { + VkStructureType sType; + const void* pNext; + uint32_t objectCount; + const VkObjectEntryTypeNVX* pObjectEntryTypes; + const uint32_t* pObjectEntryCounts; + const VkObjectEntryUsageFlagsNVX* pObjectEntryUsageFlags; + uint32_t maxUniformBuffersPerDescriptor; + uint32_t maxStorageBuffersPerDescriptor; + uint32_t maxStorageImagesPerDescriptor; + uint32_t maxSampledImagesPerDescriptor; + uint32_t maxPipelineLayouts; +} VkObjectTableCreateInfoNVX; + +typedef struct VkObjectTableEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; +} VkObjectTableEntryNVX; + +typedef struct VkObjectTablePipelineEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; + VkPipeline pipeline; +} VkObjectTablePipelineEntryNVX; + +typedef struct VkObjectTableDescriptorSetEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; + VkPipelineLayout pipelineLayout; + VkDescriptorSet descriptorSet; +} VkObjectTableDescriptorSetEntryNVX; + +typedef struct VkObjectTableVertexBufferEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; + VkBuffer buffer; +} VkObjectTableVertexBufferEntryNVX; + +typedef struct VkObjectTableIndexBufferEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; + VkBuffer buffer; + VkIndexType indexType; +} VkObjectTableIndexBufferEntryNVX; + +typedef struct VkObjectTablePushConstantEntryNVX { + VkObjectEntryTypeNVX type; + VkObjectEntryUsageFlagsNVX flags; + VkPipelineLayout pipelineLayout; + VkShaderStageFlags stageFlags; +} VkObjectTablePushConstantEntryNVX; + +typedef void (VKAPI_PTR *PFN_vkCmdProcessCommandsNVX)(VkCommandBuffer commandBuffer, const VkCmdProcessCommandsInfoNVX* pProcessCommandsInfo); +typedef void (VKAPI_PTR *PFN_vkCmdReserveSpaceForCommandsNVX)(VkCommandBuffer commandBuffer, const VkCmdReserveSpaceForCommandsInfoNVX* pReserveSpaceInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCreateIndirectCommandsLayoutNVX)(VkDevice device, const VkIndirectCommandsLayoutCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkIndirectCommandsLayoutNVX* pIndirectCommandsLayout); +typedef void (VKAPI_PTR *PFN_vkDestroyIndirectCommandsLayoutNVX)(VkDevice device, VkIndirectCommandsLayoutNVX indirectCommandsLayout, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkCreateObjectTableNVX)(VkDevice device, const VkObjectTableCreateInfoNVX* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkObjectTableNVX* pObjectTable); +typedef void (VKAPI_PTR *PFN_vkDestroyObjectTableNVX)(VkDevice device, VkObjectTableNVX objectTable, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkRegisterObjectsNVX)(VkDevice device, VkObjectTableNVX objectTable, uint32_t objectCount, const VkObjectTableEntryNVX* const* ppObjectTableEntries, const uint32_t* pObjectIndices); +typedef VkResult (VKAPI_PTR *PFN_vkUnregisterObjectsNVX)(VkDevice device, VkObjectTableNVX objectTable, uint32_t objectCount, const VkObjectEntryTypeNVX* pObjectEntryTypes, const uint32_t* pObjectIndices); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX)(VkPhysicalDevice physicalDevice, VkDeviceGeneratedCommandsFeaturesNVX* pFeatures, VkDeviceGeneratedCommandsLimitsNVX* pLimits); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdProcessCommandsNVX( + VkCommandBuffer commandBuffer, + const VkCmdProcessCommandsInfoNVX* pProcessCommandsInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdReserveSpaceForCommandsNVX( + VkCommandBuffer commandBuffer, + const VkCmdReserveSpaceForCommandsInfoNVX* pReserveSpaceInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateIndirectCommandsLayoutNVX( + VkDevice device, + const VkIndirectCommandsLayoutCreateInfoNVX* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkIndirectCommandsLayoutNVX* pIndirectCommandsLayout); + +VKAPI_ATTR void VKAPI_CALL vkDestroyIndirectCommandsLayoutNVX( + VkDevice device, + VkIndirectCommandsLayoutNVX indirectCommandsLayout, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateObjectTableNVX( + VkDevice device, + const VkObjectTableCreateInfoNVX* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkObjectTableNVX* pObjectTable); + +VKAPI_ATTR void VKAPI_CALL vkDestroyObjectTableNVX( + VkDevice device, + VkObjectTableNVX objectTable, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkRegisterObjectsNVX( + VkDevice device, + VkObjectTableNVX objectTable, + uint32_t objectCount, + const VkObjectTableEntryNVX* const* ppObjectTableEntries, + const uint32_t* pObjectIndices); + +VKAPI_ATTR VkResult VKAPI_CALL vkUnregisterObjectsNVX( + VkDevice device, + VkObjectTableNVX objectTable, + uint32_t objectCount, + const VkObjectEntryTypeNVX* pObjectEntryTypes, + const uint32_t* pObjectIndices); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX( + VkPhysicalDevice physicalDevice, + VkDeviceGeneratedCommandsFeaturesNVX* pFeatures, + VkDeviceGeneratedCommandsLimitsNVX* pLimits); +#endif + + +#define VK_NV_clip_space_w_scaling 1 +#define VK_NV_CLIP_SPACE_W_SCALING_SPEC_VERSION 1 +#define VK_NV_CLIP_SPACE_W_SCALING_EXTENSION_NAME "VK_NV_clip_space_w_scaling" +typedef struct VkViewportWScalingNV { + float xcoeff; + float ycoeff; +} VkViewportWScalingNV; + +typedef struct VkPipelineViewportWScalingStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 viewportWScalingEnable; + uint32_t viewportCount; + const VkViewportWScalingNV* pViewportWScalings; +} VkPipelineViewportWScalingStateCreateInfoNV; + +typedef void (VKAPI_PTR *PFN_vkCmdSetViewportWScalingNV)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkViewportWScalingNV* pViewportWScalings); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportWScalingNV( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkViewportWScalingNV* pViewportWScalings); +#endif + + +#define VK_EXT_direct_mode_display 1 +#define VK_EXT_DIRECT_MODE_DISPLAY_SPEC_VERSION 1 +#define VK_EXT_DIRECT_MODE_DISPLAY_EXTENSION_NAME "VK_EXT_direct_mode_display" +typedef VkResult (VKAPI_PTR *PFN_vkReleaseDisplayEXT)(VkPhysicalDevice physicalDevice, VkDisplayKHR display); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkReleaseDisplayEXT( + VkPhysicalDevice physicalDevice, + VkDisplayKHR display); +#endif + + +#define VK_EXT_display_surface_counter 1 +#define VK_EXT_DISPLAY_SURFACE_COUNTER_SPEC_VERSION 1 +#define VK_EXT_DISPLAY_SURFACE_COUNTER_EXTENSION_NAME "VK_EXT_display_surface_counter" + +typedef enum VkSurfaceCounterFlagBitsEXT { + VK_SURFACE_COUNTER_VBLANK_EXT = 0x00000001, + VK_SURFACE_COUNTER_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkSurfaceCounterFlagBitsEXT; +typedef VkFlags VkSurfaceCounterFlagsEXT; +typedef struct VkSurfaceCapabilities2EXT { + VkStructureType sType; + void* pNext; + uint32_t minImageCount; + uint32_t maxImageCount; + VkExtent2D currentExtent; + VkExtent2D minImageExtent; + VkExtent2D maxImageExtent; + uint32_t maxImageArrayLayers; + VkSurfaceTransformFlagsKHR supportedTransforms; + VkSurfaceTransformFlagBitsKHR currentTransform; + VkCompositeAlphaFlagsKHR supportedCompositeAlpha; + VkImageUsageFlags supportedUsageFlags; + VkSurfaceCounterFlagsEXT supportedSurfaceCounters; +} VkSurfaceCapabilities2EXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT)(VkPhysicalDevice physicalDevice, VkSurfaceKHR surface, VkSurfaceCapabilities2EXT* pSurfaceCapabilities); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfaceCapabilities2EXT( + VkPhysicalDevice physicalDevice, + VkSurfaceKHR surface, + VkSurfaceCapabilities2EXT* pSurfaceCapabilities); +#endif + + +#define VK_EXT_display_control 1 +#define VK_EXT_DISPLAY_CONTROL_SPEC_VERSION 1 +#define VK_EXT_DISPLAY_CONTROL_EXTENSION_NAME "VK_EXT_display_control" + +typedef enum VkDisplayPowerStateEXT { + VK_DISPLAY_POWER_STATE_OFF_EXT = 0, + VK_DISPLAY_POWER_STATE_SUSPEND_EXT = 1, + VK_DISPLAY_POWER_STATE_ON_EXT = 2, + VK_DISPLAY_POWER_STATE_BEGIN_RANGE_EXT = VK_DISPLAY_POWER_STATE_OFF_EXT, + VK_DISPLAY_POWER_STATE_END_RANGE_EXT = VK_DISPLAY_POWER_STATE_ON_EXT, + VK_DISPLAY_POWER_STATE_RANGE_SIZE_EXT = (VK_DISPLAY_POWER_STATE_ON_EXT - VK_DISPLAY_POWER_STATE_OFF_EXT + 1), + VK_DISPLAY_POWER_STATE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDisplayPowerStateEXT; + +typedef enum VkDeviceEventTypeEXT { + VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT = 0, + VK_DEVICE_EVENT_TYPE_BEGIN_RANGE_EXT = VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT, + VK_DEVICE_EVENT_TYPE_END_RANGE_EXT = VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT, + VK_DEVICE_EVENT_TYPE_RANGE_SIZE_EXT = (VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT - VK_DEVICE_EVENT_TYPE_DISPLAY_HOTPLUG_EXT + 1), + VK_DEVICE_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDeviceEventTypeEXT; + +typedef enum VkDisplayEventTypeEXT { + VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT = 0, + VK_DISPLAY_EVENT_TYPE_BEGIN_RANGE_EXT = VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT, + VK_DISPLAY_EVENT_TYPE_END_RANGE_EXT = VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT, + VK_DISPLAY_EVENT_TYPE_RANGE_SIZE_EXT = (VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT - VK_DISPLAY_EVENT_TYPE_FIRST_PIXEL_OUT_EXT + 1), + VK_DISPLAY_EVENT_TYPE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDisplayEventTypeEXT; +typedef struct VkDisplayPowerInfoEXT { + VkStructureType sType; + const void* pNext; + VkDisplayPowerStateEXT powerState; +} VkDisplayPowerInfoEXT; + +typedef struct VkDeviceEventInfoEXT { + VkStructureType sType; + const void* pNext; + VkDeviceEventTypeEXT deviceEvent; +} VkDeviceEventInfoEXT; + +typedef struct VkDisplayEventInfoEXT { + VkStructureType sType; + const void* pNext; + VkDisplayEventTypeEXT displayEvent; +} VkDisplayEventInfoEXT; + +typedef struct VkSwapchainCounterCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkSurfaceCounterFlagsEXT surfaceCounters; +} VkSwapchainCounterCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkDisplayPowerControlEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayPowerInfoEXT* pDisplayPowerInfo); +typedef VkResult (VKAPI_PTR *PFN_vkRegisterDeviceEventEXT)(VkDevice device, const VkDeviceEventInfoEXT* pDeviceEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef VkResult (VKAPI_PTR *PFN_vkRegisterDisplayEventEXT)(VkDevice device, VkDisplayKHR display, const VkDisplayEventInfoEXT* pDisplayEventInfo, const VkAllocationCallbacks* pAllocator, VkFence* pFence); +typedef VkResult (VKAPI_PTR *PFN_vkGetSwapchainCounterEXT)(VkDevice device, VkSwapchainKHR swapchain, VkSurfaceCounterFlagBitsEXT counter, uint64_t* pCounterValue); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkDisplayPowerControlEXT( + VkDevice device, + VkDisplayKHR display, + const VkDisplayPowerInfoEXT* pDisplayPowerInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDeviceEventEXT( + VkDevice device, + const VkDeviceEventInfoEXT* pDeviceEventInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR VkResult VKAPI_CALL vkRegisterDisplayEventEXT( + VkDevice device, + VkDisplayKHR display, + const VkDisplayEventInfoEXT* pDisplayEventInfo, + const VkAllocationCallbacks* pAllocator, + VkFence* pFence); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSwapchainCounterEXT( + VkDevice device, + VkSwapchainKHR swapchain, + VkSurfaceCounterFlagBitsEXT counter, + uint64_t* pCounterValue); +#endif + + +#define VK_GOOGLE_display_timing 1 +#define VK_GOOGLE_DISPLAY_TIMING_SPEC_VERSION 1 +#define VK_GOOGLE_DISPLAY_TIMING_EXTENSION_NAME "VK_GOOGLE_display_timing" +typedef struct VkRefreshCycleDurationGOOGLE { + uint64_t refreshDuration; +} VkRefreshCycleDurationGOOGLE; + +typedef struct VkPastPresentationTimingGOOGLE { + uint32_t presentID; + uint64_t desiredPresentTime; + uint64_t actualPresentTime; + uint64_t earliestPresentTime; + uint64_t presentMargin; +} VkPastPresentationTimingGOOGLE; + +typedef struct VkPresentTimeGOOGLE { + uint32_t presentID; + uint64_t desiredPresentTime; +} VkPresentTimeGOOGLE; + +typedef struct VkPresentTimesInfoGOOGLE { + VkStructureType sType; + const void* pNext; + uint32_t swapchainCount; + const VkPresentTimeGOOGLE* pTimes; +} VkPresentTimesInfoGOOGLE; + +typedef VkResult (VKAPI_PTR *PFN_vkGetRefreshCycleDurationGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties); +typedef VkResult (VKAPI_PTR *PFN_vkGetPastPresentationTimingGOOGLE)(VkDevice device, VkSwapchainKHR swapchain, uint32_t* pPresentationTimingCount, VkPastPresentationTimingGOOGLE* pPresentationTimings); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetRefreshCycleDurationGOOGLE( + VkDevice device, + VkSwapchainKHR swapchain, + VkRefreshCycleDurationGOOGLE* pDisplayTimingProperties); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPastPresentationTimingGOOGLE( + VkDevice device, + VkSwapchainKHR swapchain, + uint32_t* pPresentationTimingCount, + VkPastPresentationTimingGOOGLE* pPresentationTimings); +#endif + + +#define VK_NV_sample_mask_override_coverage 1 +#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_SPEC_VERSION 1 +#define VK_NV_SAMPLE_MASK_OVERRIDE_COVERAGE_EXTENSION_NAME "VK_NV_sample_mask_override_coverage" + + +#define VK_NV_geometry_shader_passthrough 1 +#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_SPEC_VERSION 1 +#define VK_NV_GEOMETRY_SHADER_PASSTHROUGH_EXTENSION_NAME "VK_NV_geometry_shader_passthrough" + + +#define VK_NV_viewport_array2 1 +#define VK_NV_VIEWPORT_ARRAY2_SPEC_VERSION 1 +#define VK_NV_VIEWPORT_ARRAY2_EXTENSION_NAME "VK_NV_viewport_array2" + + +#define VK_NVX_multiview_per_view_attributes 1 +#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_SPEC_VERSION 1 +#define VK_NVX_MULTIVIEW_PER_VIEW_ATTRIBUTES_EXTENSION_NAME "VK_NVX_multiview_per_view_attributes" +typedef struct VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX { + VkStructureType sType; + void* pNext; + VkBool32 perViewPositionAllComponents; +} VkPhysicalDeviceMultiviewPerViewAttributesPropertiesNVX; + + + +#define VK_NV_viewport_swizzle 1 +#define VK_NV_VIEWPORT_SWIZZLE_SPEC_VERSION 1 +#define VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME "VK_NV_viewport_swizzle" + +typedef enum VkViewportCoordinateSwizzleNV { + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV = 0, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_X_NV = 1, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV = 2, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Y_NV = 3, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV = 4, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_Z_NV = 5, + VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV = 6, + VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV = 7, + VK_VIEWPORT_COORDINATE_SWIZZLE_BEGIN_RANGE_NV = VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV, + VK_VIEWPORT_COORDINATE_SWIZZLE_END_RANGE_NV = VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV, + VK_VIEWPORT_COORDINATE_SWIZZLE_RANGE_SIZE_NV = (VK_VIEWPORT_COORDINATE_SWIZZLE_NEGATIVE_W_NV - VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV + 1), + VK_VIEWPORT_COORDINATE_SWIZZLE_MAX_ENUM_NV = 0x7FFFFFFF +} VkViewportCoordinateSwizzleNV; +typedef VkFlags VkPipelineViewportSwizzleStateCreateFlagsNV; +typedef struct VkViewportSwizzleNV { + VkViewportCoordinateSwizzleNV x; + VkViewportCoordinateSwizzleNV y; + VkViewportCoordinateSwizzleNV z; + VkViewportCoordinateSwizzleNV w; +} VkViewportSwizzleNV; + +typedef struct VkPipelineViewportSwizzleStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineViewportSwizzleStateCreateFlagsNV flags; + uint32_t viewportCount; + const VkViewportSwizzleNV* pViewportSwizzles; +} VkPipelineViewportSwizzleStateCreateInfoNV; + + + +#define VK_EXT_discard_rectangles 1 +#define VK_EXT_DISCARD_RECTANGLES_SPEC_VERSION 1 +#define VK_EXT_DISCARD_RECTANGLES_EXTENSION_NAME "VK_EXT_discard_rectangles" + +typedef enum VkDiscardRectangleModeEXT { + VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT = 0, + VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT = 1, + VK_DISCARD_RECTANGLE_MODE_BEGIN_RANGE_EXT = VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT, + VK_DISCARD_RECTANGLE_MODE_END_RANGE_EXT = VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT, + VK_DISCARD_RECTANGLE_MODE_RANGE_SIZE_EXT = (VK_DISCARD_RECTANGLE_MODE_EXCLUSIVE_EXT - VK_DISCARD_RECTANGLE_MODE_INCLUSIVE_EXT + 1), + VK_DISCARD_RECTANGLE_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDiscardRectangleModeEXT; +typedef VkFlags VkPipelineDiscardRectangleStateCreateFlagsEXT; +typedef struct VkPhysicalDeviceDiscardRectanglePropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxDiscardRectangles; +} VkPhysicalDeviceDiscardRectanglePropertiesEXT; + +typedef struct VkPipelineDiscardRectangleStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineDiscardRectangleStateCreateFlagsEXT flags; + VkDiscardRectangleModeEXT discardRectangleMode; + uint32_t discardRectangleCount; + const VkRect2D* pDiscardRectangles; +} VkPipelineDiscardRectangleStateCreateInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetDiscardRectangleEXT)(VkCommandBuffer commandBuffer, uint32_t firstDiscardRectangle, uint32_t discardRectangleCount, const VkRect2D* pDiscardRectangles); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetDiscardRectangleEXT( + VkCommandBuffer commandBuffer, + uint32_t firstDiscardRectangle, + uint32_t discardRectangleCount, + const VkRect2D* pDiscardRectangles); +#endif + + +#define VK_EXT_conservative_rasterization 1 +#define VK_EXT_CONSERVATIVE_RASTERIZATION_SPEC_VERSION 1 +#define VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME "VK_EXT_conservative_rasterization" + +typedef enum VkConservativeRasterizationModeEXT { + VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT = 0, + VK_CONSERVATIVE_RASTERIZATION_MODE_OVERESTIMATE_EXT = 1, + VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT = 2, + VK_CONSERVATIVE_RASTERIZATION_MODE_BEGIN_RANGE_EXT = VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT, + VK_CONSERVATIVE_RASTERIZATION_MODE_END_RANGE_EXT = VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT, + VK_CONSERVATIVE_RASTERIZATION_MODE_RANGE_SIZE_EXT = (VK_CONSERVATIVE_RASTERIZATION_MODE_UNDERESTIMATE_EXT - VK_CONSERVATIVE_RASTERIZATION_MODE_DISABLED_EXT + 1), + VK_CONSERVATIVE_RASTERIZATION_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkConservativeRasterizationModeEXT; +typedef VkFlags VkPipelineRasterizationConservativeStateCreateFlagsEXT; +typedef struct VkPhysicalDeviceConservativeRasterizationPropertiesEXT { + VkStructureType sType; + void* pNext; + float primitiveOverestimationSize; + float maxExtraPrimitiveOverestimationSize; + float extraPrimitiveOverestimationSizeGranularity; + VkBool32 primitiveUnderestimation; + VkBool32 conservativePointAndLineRasterization; + VkBool32 degenerateTrianglesRasterized; + VkBool32 degenerateLinesRasterized; + VkBool32 fullyCoveredFragmentShaderInputVariable; + VkBool32 conservativeRasterizationPostDepthCoverage; +} VkPhysicalDeviceConservativeRasterizationPropertiesEXT; + +typedef struct VkPipelineRasterizationConservativeStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationConservativeStateCreateFlagsEXT flags; + VkConservativeRasterizationModeEXT conservativeRasterizationMode; + float extraPrimitiveOverestimationSize; +} VkPipelineRasterizationConservativeStateCreateInfoEXT; + + + +#define VK_EXT_depth_clip_enable 1 +#define VK_EXT_DEPTH_CLIP_ENABLE_SPEC_VERSION 1 +#define VK_EXT_DEPTH_CLIP_ENABLE_EXTENSION_NAME "VK_EXT_depth_clip_enable" +typedef VkFlags VkPipelineRasterizationDepthClipStateCreateFlagsEXT; +typedef struct VkPhysicalDeviceDepthClipEnableFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 depthClipEnable; +} VkPhysicalDeviceDepthClipEnableFeaturesEXT; + +typedef struct VkPipelineRasterizationDepthClipStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineRasterizationDepthClipStateCreateFlagsEXT flags; + VkBool32 depthClipEnable; +} VkPipelineRasterizationDepthClipStateCreateInfoEXT; + + + +#define VK_EXT_swapchain_colorspace 1 +#define VK_EXT_SWAPCHAIN_COLOR_SPACE_SPEC_VERSION 4 +#define VK_EXT_SWAPCHAIN_COLOR_SPACE_EXTENSION_NAME "VK_EXT_swapchain_colorspace" + + +#define VK_EXT_hdr_metadata 1 +#define VK_EXT_HDR_METADATA_SPEC_VERSION 2 +#define VK_EXT_HDR_METADATA_EXTENSION_NAME "VK_EXT_hdr_metadata" +typedef struct VkXYColorEXT { + float x; + float y; +} VkXYColorEXT; + +typedef struct VkHdrMetadataEXT { + VkStructureType sType; + const void* pNext; + VkXYColorEXT displayPrimaryRed; + VkXYColorEXT displayPrimaryGreen; + VkXYColorEXT displayPrimaryBlue; + VkXYColorEXT whitePoint; + float maxLuminance; + float minLuminance; + float maxContentLightLevel; + float maxFrameAverageLightLevel; +} VkHdrMetadataEXT; + +typedef void (VKAPI_PTR *PFN_vkSetHdrMetadataEXT)(VkDevice device, uint32_t swapchainCount, const VkSwapchainKHR* pSwapchains, const VkHdrMetadataEXT* pMetadata); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkSetHdrMetadataEXT( + VkDevice device, + uint32_t swapchainCount, + const VkSwapchainKHR* pSwapchains, + const VkHdrMetadataEXT* pMetadata); +#endif + + +#define VK_EXT_external_memory_dma_buf 1 +#define VK_EXT_EXTERNAL_MEMORY_DMA_BUF_SPEC_VERSION 1 +#define VK_EXT_EXTERNAL_MEMORY_DMA_BUF_EXTENSION_NAME "VK_EXT_external_memory_dma_buf" + + +#define VK_EXT_queue_family_foreign 1 +#define VK_EXT_QUEUE_FAMILY_FOREIGN_SPEC_VERSION 1 +#define VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME "VK_EXT_queue_family_foreign" +#define VK_QUEUE_FAMILY_FOREIGN_EXT (~0U-2) + + +#define VK_EXT_debug_utils 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDebugUtilsMessengerEXT) +#define VK_EXT_DEBUG_UTILS_SPEC_VERSION 1 +#define VK_EXT_DEBUG_UTILS_EXTENSION_NAME "VK_EXT_debug_utils" +typedef VkFlags VkDebugUtilsMessengerCallbackDataFlagsEXT; +typedef VkFlags VkDebugUtilsMessengerCreateFlagsEXT; + +typedef enum VkDebugUtilsMessageSeverityFlagBitsEXT { + VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT = 0x00000001, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT = 0x00000010, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT = 0x00000100, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT = 0x00001000, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugUtilsMessageSeverityFlagBitsEXT; +typedef VkFlags VkDebugUtilsMessageSeverityFlagsEXT; + +typedef enum VkDebugUtilsMessageTypeFlagBitsEXT { + VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT = 0x00000001, + VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT = 0x00000002, + VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT = 0x00000004, + VK_DEBUG_UTILS_MESSAGE_TYPE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkDebugUtilsMessageTypeFlagBitsEXT; +typedef VkFlags VkDebugUtilsMessageTypeFlagsEXT; +typedef struct VkDebugUtilsObjectNameInfoEXT { + VkStructureType sType; + const void* pNext; + VkObjectType objectType; + uint64_t objectHandle; + const char* pObjectName; +} VkDebugUtilsObjectNameInfoEXT; + +typedef struct VkDebugUtilsObjectTagInfoEXT { + VkStructureType sType; + const void* pNext; + VkObjectType objectType; + uint64_t objectHandle; + uint64_t tagName; + size_t tagSize; + const void* pTag; +} VkDebugUtilsObjectTagInfoEXT; + +typedef struct VkDebugUtilsLabelEXT { + VkStructureType sType; + const void* pNext; + const char* pLabelName; + float color[4]; +} VkDebugUtilsLabelEXT; + +typedef struct VkDebugUtilsMessengerCallbackDataEXT { + VkStructureType sType; + const void* pNext; + VkDebugUtilsMessengerCallbackDataFlagsEXT flags; + const char* pMessageIdName; + int32_t messageIdNumber; + const char* pMessage; + uint32_t queueLabelCount; + const VkDebugUtilsLabelEXT* pQueueLabels; + uint32_t cmdBufLabelCount; + const VkDebugUtilsLabelEXT* pCmdBufLabels; + uint32_t objectCount; + const VkDebugUtilsObjectNameInfoEXT* pObjects; +} VkDebugUtilsMessengerCallbackDataEXT; + +typedef VkBool32 (VKAPI_PTR *PFN_vkDebugUtilsMessengerCallbackEXT)( + VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, + void* pUserData); + +typedef struct VkDebugUtilsMessengerCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDebugUtilsMessengerCreateFlagsEXT flags; + VkDebugUtilsMessageSeverityFlagsEXT messageSeverity; + VkDebugUtilsMessageTypeFlagsEXT messageType; + PFN_vkDebugUtilsMessengerCallbackEXT pfnUserCallback; + void* pUserData; +} VkDebugUtilsMessengerCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkSetDebugUtilsObjectNameEXT)(VkDevice device, const VkDebugUtilsObjectNameInfoEXT* pNameInfo); +typedef VkResult (VKAPI_PTR *PFN_vkSetDebugUtilsObjectTagEXT)(VkDevice device, const VkDebugUtilsObjectTagInfoEXT* pTagInfo); +typedef void (VKAPI_PTR *PFN_vkQueueBeginDebugUtilsLabelEXT)(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef void (VKAPI_PTR *PFN_vkQueueEndDebugUtilsLabelEXT)(VkQueue queue); +typedef void (VKAPI_PTR *PFN_vkQueueInsertDebugUtilsLabelEXT)(VkQueue queue, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef void (VKAPI_PTR *PFN_vkCmdBeginDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef void (VKAPI_PTR *PFN_vkCmdEndDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer); +typedef void (VKAPI_PTR *PFN_vkCmdInsertDebugUtilsLabelEXT)(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT* pLabelInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCreateDebugUtilsMessengerEXT)(VkInstance instance, const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkDebugUtilsMessengerEXT* pMessenger); +typedef void (VKAPI_PTR *PFN_vkDestroyDebugUtilsMessengerEXT)(VkInstance instance, VkDebugUtilsMessengerEXT messenger, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkSubmitDebugUtilsMessageEXT)(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageTypes, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkSetDebugUtilsObjectNameEXT( + VkDevice device, + const VkDebugUtilsObjectNameInfoEXT* pNameInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkSetDebugUtilsObjectTagEXT( + VkDevice device, + const VkDebugUtilsObjectTagInfoEXT* pTagInfo); + +VKAPI_ATTR void VKAPI_CALL vkQueueBeginDebugUtilsLabelEXT( + VkQueue queue, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR void VKAPI_CALL vkQueueEndDebugUtilsLabelEXT( + VkQueue queue); + +VKAPI_ATTR void VKAPI_CALL vkQueueInsertDebugUtilsLabelEXT( + VkQueue queue, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdBeginDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR void VKAPI_CALL vkCmdEndDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer); + +VKAPI_ATTR void VKAPI_CALL vkCmdInsertDebugUtilsLabelEXT( + VkCommandBuffer commandBuffer, + const VkDebugUtilsLabelEXT* pLabelInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugUtilsMessengerEXT( + VkInstance instance, + const VkDebugUtilsMessengerCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkDebugUtilsMessengerEXT* pMessenger); + +VKAPI_ATTR void VKAPI_CALL vkDestroyDebugUtilsMessengerEXT( + VkInstance instance, + VkDebugUtilsMessengerEXT messenger, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkSubmitDebugUtilsMessageEXT( + VkInstance instance, + VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT messageTypes, + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData); +#endif + + +#define VK_EXT_sampler_filter_minmax 1 +#define VK_EXT_SAMPLER_FILTER_MINMAX_SPEC_VERSION 2 +#define VK_EXT_SAMPLER_FILTER_MINMAX_EXTENSION_NAME "VK_EXT_sampler_filter_minmax" +typedef VkSamplerReductionMode VkSamplerReductionModeEXT; + +typedef VkSamplerReductionModeCreateInfo VkSamplerReductionModeCreateInfoEXT; + +typedef VkPhysicalDeviceSamplerFilterMinmaxProperties VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT; + + + +#define VK_AMD_gpu_shader_int16 1 +#define VK_AMD_GPU_SHADER_INT16_SPEC_VERSION 2 +#define VK_AMD_GPU_SHADER_INT16_EXTENSION_NAME "VK_AMD_gpu_shader_int16" + + +#define VK_AMD_mixed_attachment_samples 1 +#define VK_AMD_MIXED_ATTACHMENT_SAMPLES_SPEC_VERSION 1 +#define VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME "VK_AMD_mixed_attachment_samples" + + +#define VK_AMD_shader_fragment_mask 1 +#define VK_AMD_SHADER_FRAGMENT_MASK_SPEC_VERSION 1 +#define VK_AMD_SHADER_FRAGMENT_MASK_EXTENSION_NAME "VK_AMD_shader_fragment_mask" + + +#define VK_EXT_inline_uniform_block 1 +#define VK_EXT_INLINE_UNIFORM_BLOCK_SPEC_VERSION 1 +#define VK_EXT_INLINE_UNIFORM_BLOCK_EXTENSION_NAME "VK_EXT_inline_uniform_block" +typedef struct VkPhysicalDeviceInlineUniformBlockFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 inlineUniformBlock; + VkBool32 descriptorBindingInlineUniformBlockUpdateAfterBind; +} VkPhysicalDeviceInlineUniformBlockFeaturesEXT; + +typedef struct VkPhysicalDeviceInlineUniformBlockPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxInlineUniformBlockSize; + uint32_t maxPerStageDescriptorInlineUniformBlocks; + uint32_t maxPerStageDescriptorUpdateAfterBindInlineUniformBlocks; + uint32_t maxDescriptorSetInlineUniformBlocks; + uint32_t maxDescriptorSetUpdateAfterBindInlineUniformBlocks; +} VkPhysicalDeviceInlineUniformBlockPropertiesEXT; + +typedef struct VkWriteDescriptorSetInlineUniformBlockEXT { + VkStructureType sType; + const void* pNext; + uint32_t dataSize; + const void* pData; +} VkWriteDescriptorSetInlineUniformBlockEXT; + +typedef struct VkDescriptorPoolInlineUniformBlockCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t maxInlineUniformBlockBindings; +} VkDescriptorPoolInlineUniformBlockCreateInfoEXT; + + + +#define VK_EXT_shader_stencil_export 1 +#define VK_EXT_SHADER_STENCIL_EXPORT_SPEC_VERSION 1 +#define VK_EXT_SHADER_STENCIL_EXPORT_EXTENSION_NAME "VK_EXT_shader_stencil_export" + + +#define VK_EXT_sample_locations 1 +#define VK_EXT_SAMPLE_LOCATIONS_SPEC_VERSION 1 +#define VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME "VK_EXT_sample_locations" +typedef struct VkSampleLocationEXT { + float x; + float y; +} VkSampleLocationEXT; + +typedef struct VkSampleLocationsInfoEXT { + VkStructureType sType; + const void* pNext; + VkSampleCountFlagBits sampleLocationsPerPixel; + VkExtent2D sampleLocationGridSize; + uint32_t sampleLocationsCount; + const VkSampleLocationEXT* pSampleLocations; +} VkSampleLocationsInfoEXT; + +typedef struct VkAttachmentSampleLocationsEXT { + uint32_t attachmentIndex; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkAttachmentSampleLocationsEXT; + +typedef struct VkSubpassSampleLocationsEXT { + uint32_t subpassIndex; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkSubpassSampleLocationsEXT; + +typedef struct VkRenderPassSampleLocationsBeginInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t attachmentInitialSampleLocationsCount; + const VkAttachmentSampleLocationsEXT* pAttachmentInitialSampleLocations; + uint32_t postSubpassSampleLocationsCount; + const VkSubpassSampleLocationsEXT* pPostSubpassSampleLocations; +} VkRenderPassSampleLocationsBeginInfoEXT; + +typedef struct VkPipelineSampleLocationsStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 sampleLocationsEnable; + VkSampleLocationsInfoEXT sampleLocationsInfo; +} VkPipelineSampleLocationsStateCreateInfoEXT; + +typedef struct VkPhysicalDeviceSampleLocationsPropertiesEXT { + VkStructureType sType; + void* pNext; + VkSampleCountFlags sampleLocationSampleCounts; + VkExtent2D maxSampleLocationGridSize; + float sampleLocationCoordinateRange[2]; + uint32_t sampleLocationSubPixelBits; + VkBool32 variableSampleLocations; +} VkPhysicalDeviceSampleLocationsPropertiesEXT; + +typedef struct VkMultisamplePropertiesEXT { + VkStructureType sType; + void* pNext; + VkExtent2D maxSampleLocationGridSize; +} VkMultisamplePropertiesEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetSampleLocationsEXT)(VkCommandBuffer commandBuffer, const VkSampleLocationsInfoEXT* pSampleLocationsInfo); +typedef void (VKAPI_PTR *PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT)(VkPhysicalDevice physicalDevice, VkSampleCountFlagBits samples, VkMultisamplePropertiesEXT* pMultisampleProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetSampleLocationsEXT( + VkCommandBuffer commandBuffer, + const VkSampleLocationsInfoEXT* pSampleLocationsInfo); + +VKAPI_ATTR void VKAPI_CALL vkGetPhysicalDeviceMultisamplePropertiesEXT( + VkPhysicalDevice physicalDevice, + VkSampleCountFlagBits samples, + VkMultisamplePropertiesEXT* pMultisampleProperties); +#endif + + +#define VK_EXT_blend_operation_advanced 1 +#define VK_EXT_BLEND_OPERATION_ADVANCED_SPEC_VERSION 2 +#define VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME "VK_EXT_blend_operation_advanced" + +typedef enum VkBlendOverlapEXT { + VK_BLEND_OVERLAP_UNCORRELATED_EXT = 0, + VK_BLEND_OVERLAP_DISJOINT_EXT = 1, + VK_BLEND_OVERLAP_CONJOINT_EXT = 2, + VK_BLEND_OVERLAP_BEGIN_RANGE_EXT = VK_BLEND_OVERLAP_UNCORRELATED_EXT, + VK_BLEND_OVERLAP_END_RANGE_EXT = VK_BLEND_OVERLAP_CONJOINT_EXT, + VK_BLEND_OVERLAP_RANGE_SIZE_EXT = (VK_BLEND_OVERLAP_CONJOINT_EXT - VK_BLEND_OVERLAP_UNCORRELATED_EXT + 1), + VK_BLEND_OVERLAP_MAX_ENUM_EXT = 0x7FFFFFFF +} VkBlendOverlapEXT; +typedef struct VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 advancedBlendCoherentOperations; +} VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT; + +typedef struct VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t advancedBlendMaxColorAttachments; + VkBool32 advancedBlendIndependentBlend; + VkBool32 advancedBlendNonPremultipliedSrcColor; + VkBool32 advancedBlendNonPremultipliedDstColor; + VkBool32 advancedBlendCorrelatedOverlap; + VkBool32 advancedBlendAllOperations; +} VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT; + +typedef struct VkPipelineColorBlendAdvancedStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkBool32 srcPremultiplied; + VkBool32 dstPremultiplied; + VkBlendOverlapEXT blendOverlap; +} VkPipelineColorBlendAdvancedStateCreateInfoEXT; + + + +#define VK_NV_fragment_coverage_to_color 1 +#define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_SPEC_VERSION 1 +#define VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME "VK_NV_fragment_coverage_to_color" +typedef VkFlags VkPipelineCoverageToColorStateCreateFlagsNV; +typedef struct VkPipelineCoverageToColorStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCoverageToColorStateCreateFlagsNV flags; + VkBool32 coverageToColorEnable; + uint32_t coverageToColorLocation; +} VkPipelineCoverageToColorStateCreateInfoNV; + + + +#define VK_NV_framebuffer_mixed_samples 1 +#define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_SPEC_VERSION 1 +#define VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME "VK_NV_framebuffer_mixed_samples" + +typedef enum VkCoverageModulationModeNV { + VK_COVERAGE_MODULATION_MODE_NONE_NV = 0, + VK_COVERAGE_MODULATION_MODE_RGB_NV = 1, + VK_COVERAGE_MODULATION_MODE_ALPHA_NV = 2, + VK_COVERAGE_MODULATION_MODE_RGBA_NV = 3, + VK_COVERAGE_MODULATION_MODE_BEGIN_RANGE_NV = VK_COVERAGE_MODULATION_MODE_NONE_NV, + VK_COVERAGE_MODULATION_MODE_END_RANGE_NV = VK_COVERAGE_MODULATION_MODE_RGBA_NV, + VK_COVERAGE_MODULATION_MODE_RANGE_SIZE_NV = (VK_COVERAGE_MODULATION_MODE_RGBA_NV - VK_COVERAGE_MODULATION_MODE_NONE_NV + 1), + VK_COVERAGE_MODULATION_MODE_MAX_ENUM_NV = 0x7FFFFFFF +} VkCoverageModulationModeNV; +typedef VkFlags VkPipelineCoverageModulationStateCreateFlagsNV; +typedef struct VkPipelineCoverageModulationStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCoverageModulationStateCreateFlagsNV flags; + VkCoverageModulationModeNV coverageModulationMode; + VkBool32 coverageModulationTableEnable; + uint32_t coverageModulationTableCount; + const float* pCoverageModulationTable; +} VkPipelineCoverageModulationStateCreateInfoNV; + + + +#define VK_NV_fill_rectangle 1 +#define VK_NV_FILL_RECTANGLE_SPEC_VERSION 1 +#define VK_NV_FILL_RECTANGLE_EXTENSION_NAME "VK_NV_fill_rectangle" + + +#define VK_NV_shader_sm_builtins 1 +#define VK_NV_SHADER_SM_BUILTINS_SPEC_VERSION 1 +#define VK_NV_SHADER_SM_BUILTINS_EXTENSION_NAME "VK_NV_shader_sm_builtins" +typedef struct VkPhysicalDeviceShaderSMBuiltinsPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t shaderSMCount; + uint32_t shaderWarpsPerSM; +} VkPhysicalDeviceShaderSMBuiltinsPropertiesNV; + +typedef struct VkPhysicalDeviceShaderSMBuiltinsFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 shaderSMBuiltins; +} VkPhysicalDeviceShaderSMBuiltinsFeaturesNV; + + + +#define VK_EXT_post_depth_coverage 1 +#define VK_EXT_POST_DEPTH_COVERAGE_SPEC_VERSION 1 +#define VK_EXT_POST_DEPTH_COVERAGE_EXTENSION_NAME "VK_EXT_post_depth_coverage" + + +#define VK_EXT_image_drm_format_modifier 1 +#define VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_SPEC_VERSION 1 +#define VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME "VK_EXT_image_drm_format_modifier" +typedef struct VkDrmFormatModifierPropertiesEXT { + uint64_t drmFormatModifier; + uint32_t drmFormatModifierPlaneCount; + VkFormatFeatureFlags drmFormatModifierTilingFeatures; +} VkDrmFormatModifierPropertiesEXT; + +typedef struct VkDrmFormatModifierPropertiesListEXT { + VkStructureType sType; + void* pNext; + uint32_t drmFormatModifierCount; + VkDrmFormatModifierPropertiesEXT* pDrmFormatModifierProperties; +} VkDrmFormatModifierPropertiesListEXT; + +typedef struct VkPhysicalDeviceImageDrmFormatModifierInfoEXT { + VkStructureType sType; + const void* pNext; + uint64_t drmFormatModifier; + VkSharingMode sharingMode; + uint32_t queueFamilyIndexCount; + const uint32_t* pQueueFamilyIndices; +} VkPhysicalDeviceImageDrmFormatModifierInfoEXT; + +typedef struct VkImageDrmFormatModifierListCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t drmFormatModifierCount; + const uint64_t* pDrmFormatModifiers; +} VkImageDrmFormatModifierListCreateInfoEXT; + +typedef struct VkImageDrmFormatModifierExplicitCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint64_t drmFormatModifier; + uint32_t drmFormatModifierPlaneCount; + const VkSubresourceLayout* pPlaneLayouts; +} VkImageDrmFormatModifierExplicitCreateInfoEXT; + +typedef struct VkImageDrmFormatModifierPropertiesEXT { + VkStructureType sType; + void* pNext; + uint64_t drmFormatModifier; +} VkImageDrmFormatModifierPropertiesEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetImageDrmFormatModifierPropertiesEXT)(VkDevice device, VkImage image, VkImageDrmFormatModifierPropertiesEXT* pProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetImageDrmFormatModifierPropertiesEXT( + VkDevice device, + VkImage image, + VkImageDrmFormatModifierPropertiesEXT* pProperties); +#endif + + +#define VK_EXT_validation_cache 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkValidationCacheEXT) +#define VK_EXT_VALIDATION_CACHE_SPEC_VERSION 1 +#define VK_EXT_VALIDATION_CACHE_EXTENSION_NAME "VK_EXT_validation_cache" + +typedef enum VkValidationCacheHeaderVersionEXT { + VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT = 1, + VK_VALIDATION_CACHE_HEADER_VERSION_BEGIN_RANGE_EXT = VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT, + VK_VALIDATION_CACHE_HEADER_VERSION_END_RANGE_EXT = VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT, + VK_VALIDATION_CACHE_HEADER_VERSION_RANGE_SIZE_EXT = (VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT - VK_VALIDATION_CACHE_HEADER_VERSION_ONE_EXT + 1), + VK_VALIDATION_CACHE_HEADER_VERSION_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationCacheHeaderVersionEXT; +typedef VkFlags VkValidationCacheCreateFlagsEXT; +typedef struct VkValidationCacheCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkValidationCacheCreateFlagsEXT flags; + size_t initialDataSize; + const void* pInitialData; +} VkValidationCacheCreateInfoEXT; + +typedef struct VkShaderModuleValidationCacheCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkValidationCacheEXT validationCache; +} VkShaderModuleValidationCacheCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateValidationCacheEXT)(VkDevice device, const VkValidationCacheCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkValidationCacheEXT* pValidationCache); +typedef void (VKAPI_PTR *PFN_vkDestroyValidationCacheEXT)(VkDevice device, VkValidationCacheEXT validationCache, const VkAllocationCallbacks* pAllocator); +typedef VkResult (VKAPI_PTR *PFN_vkMergeValidationCachesEXT)(VkDevice device, VkValidationCacheEXT dstCache, uint32_t srcCacheCount, const VkValidationCacheEXT* pSrcCaches); +typedef VkResult (VKAPI_PTR *PFN_vkGetValidationCacheDataEXT)(VkDevice device, VkValidationCacheEXT validationCache, size_t* pDataSize, void* pData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateValidationCacheEXT( + VkDevice device, + const VkValidationCacheCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkValidationCacheEXT* pValidationCache); + +VKAPI_ATTR void VKAPI_CALL vkDestroyValidationCacheEXT( + VkDevice device, + VkValidationCacheEXT validationCache, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR VkResult VKAPI_CALL vkMergeValidationCachesEXT( + VkDevice device, + VkValidationCacheEXT dstCache, + uint32_t srcCacheCount, + const VkValidationCacheEXT* pSrcCaches); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetValidationCacheDataEXT( + VkDevice device, + VkValidationCacheEXT validationCache, + size_t* pDataSize, + void* pData); +#endif + + +#define VK_EXT_descriptor_indexing 1 +#define VK_EXT_DESCRIPTOR_INDEXING_SPEC_VERSION 2 +#define VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME "VK_EXT_descriptor_indexing" +typedef VkDescriptorBindingFlagBits VkDescriptorBindingFlagBitsEXT; + +typedef VkDescriptorBindingFlags VkDescriptorBindingFlagsEXT; + +typedef VkDescriptorSetLayoutBindingFlagsCreateInfo VkDescriptorSetLayoutBindingFlagsCreateInfoEXT; + +typedef VkPhysicalDeviceDescriptorIndexingFeatures VkPhysicalDeviceDescriptorIndexingFeaturesEXT; + +typedef VkPhysicalDeviceDescriptorIndexingProperties VkPhysicalDeviceDescriptorIndexingPropertiesEXT; + +typedef VkDescriptorSetVariableDescriptorCountAllocateInfo VkDescriptorSetVariableDescriptorCountAllocateInfoEXT; + +typedef VkDescriptorSetVariableDescriptorCountLayoutSupport VkDescriptorSetVariableDescriptorCountLayoutSupportEXT; + + + +#define VK_EXT_shader_viewport_index_layer 1 +#define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_SPEC_VERSION 1 +#define VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME "VK_EXT_shader_viewport_index_layer" + + +#define VK_NV_shading_rate_image 1 +#define VK_NV_SHADING_RATE_IMAGE_SPEC_VERSION 3 +#define VK_NV_SHADING_RATE_IMAGE_EXTENSION_NAME "VK_NV_shading_rate_image" + +typedef enum VkShadingRatePaletteEntryNV { + VK_SHADING_RATE_PALETTE_ENTRY_NO_INVOCATIONS_NV = 0, + VK_SHADING_RATE_PALETTE_ENTRY_16_INVOCATIONS_PER_PIXEL_NV = 1, + VK_SHADING_RATE_PALETTE_ENTRY_8_INVOCATIONS_PER_PIXEL_NV = 2, + VK_SHADING_RATE_PALETTE_ENTRY_4_INVOCATIONS_PER_PIXEL_NV = 3, + VK_SHADING_RATE_PALETTE_ENTRY_2_INVOCATIONS_PER_PIXEL_NV = 4, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_PIXEL_NV = 5, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X1_PIXELS_NV = 6, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_1X2_PIXELS_NV = 7, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X2_PIXELS_NV = 8, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X2_PIXELS_NV = 9, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_2X4_PIXELS_NV = 10, + VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X4_PIXELS_NV = 11, + VK_SHADING_RATE_PALETTE_ENTRY_BEGIN_RANGE_NV = VK_SHADING_RATE_PALETTE_ENTRY_NO_INVOCATIONS_NV, + VK_SHADING_RATE_PALETTE_ENTRY_END_RANGE_NV = VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X4_PIXELS_NV, + VK_SHADING_RATE_PALETTE_ENTRY_RANGE_SIZE_NV = (VK_SHADING_RATE_PALETTE_ENTRY_1_INVOCATION_PER_4X4_PIXELS_NV - VK_SHADING_RATE_PALETTE_ENTRY_NO_INVOCATIONS_NV + 1), + VK_SHADING_RATE_PALETTE_ENTRY_MAX_ENUM_NV = 0x7FFFFFFF +} VkShadingRatePaletteEntryNV; + +typedef enum VkCoarseSampleOrderTypeNV { + VK_COARSE_SAMPLE_ORDER_TYPE_DEFAULT_NV = 0, + VK_COARSE_SAMPLE_ORDER_TYPE_CUSTOM_NV = 1, + VK_COARSE_SAMPLE_ORDER_TYPE_PIXEL_MAJOR_NV = 2, + VK_COARSE_SAMPLE_ORDER_TYPE_SAMPLE_MAJOR_NV = 3, + VK_COARSE_SAMPLE_ORDER_TYPE_BEGIN_RANGE_NV = VK_COARSE_SAMPLE_ORDER_TYPE_DEFAULT_NV, + VK_COARSE_SAMPLE_ORDER_TYPE_END_RANGE_NV = VK_COARSE_SAMPLE_ORDER_TYPE_SAMPLE_MAJOR_NV, + VK_COARSE_SAMPLE_ORDER_TYPE_RANGE_SIZE_NV = (VK_COARSE_SAMPLE_ORDER_TYPE_SAMPLE_MAJOR_NV - VK_COARSE_SAMPLE_ORDER_TYPE_DEFAULT_NV + 1), + VK_COARSE_SAMPLE_ORDER_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkCoarseSampleOrderTypeNV; +typedef struct VkShadingRatePaletteNV { + uint32_t shadingRatePaletteEntryCount; + const VkShadingRatePaletteEntryNV* pShadingRatePaletteEntries; +} VkShadingRatePaletteNV; + +typedef struct VkPipelineViewportShadingRateImageStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 shadingRateImageEnable; + uint32_t viewportCount; + const VkShadingRatePaletteNV* pShadingRatePalettes; +} VkPipelineViewportShadingRateImageStateCreateInfoNV; + +typedef struct VkPhysicalDeviceShadingRateImageFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 shadingRateImage; + VkBool32 shadingRateCoarseSampleOrder; +} VkPhysicalDeviceShadingRateImageFeaturesNV; + +typedef struct VkPhysicalDeviceShadingRateImagePropertiesNV { + VkStructureType sType; + void* pNext; + VkExtent2D shadingRateTexelSize; + uint32_t shadingRatePaletteSize; + uint32_t shadingRateMaxCoarseSamples; +} VkPhysicalDeviceShadingRateImagePropertiesNV; + +typedef struct VkCoarseSampleLocationNV { + uint32_t pixelX; + uint32_t pixelY; + uint32_t sample; +} VkCoarseSampleLocationNV; + +typedef struct VkCoarseSampleOrderCustomNV { + VkShadingRatePaletteEntryNV shadingRate; + uint32_t sampleCount; + uint32_t sampleLocationCount; + const VkCoarseSampleLocationNV* pSampleLocations; +} VkCoarseSampleOrderCustomNV; + +typedef struct VkPipelineViewportCoarseSampleOrderStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkCoarseSampleOrderTypeNV sampleOrderType; + uint32_t customSampleOrderCount; + const VkCoarseSampleOrderCustomNV* pCustomSampleOrders; +} VkPipelineViewportCoarseSampleOrderStateCreateInfoNV; + +typedef void (VKAPI_PTR *PFN_vkCmdBindShadingRateImageNV)(VkCommandBuffer commandBuffer, VkImageView imageView, VkImageLayout imageLayout); +typedef void (VKAPI_PTR *PFN_vkCmdSetViewportShadingRatePaletteNV)(VkCommandBuffer commandBuffer, uint32_t firstViewport, uint32_t viewportCount, const VkShadingRatePaletteNV* pShadingRatePalettes); +typedef void (VKAPI_PTR *PFN_vkCmdSetCoarseSampleOrderNV)(VkCommandBuffer commandBuffer, VkCoarseSampleOrderTypeNV sampleOrderType, uint32_t customSampleOrderCount, const VkCoarseSampleOrderCustomNV* pCustomSampleOrders); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdBindShadingRateImageNV( + VkCommandBuffer commandBuffer, + VkImageView imageView, + VkImageLayout imageLayout); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetViewportShadingRatePaletteNV( + VkCommandBuffer commandBuffer, + uint32_t firstViewport, + uint32_t viewportCount, + const VkShadingRatePaletteNV* pShadingRatePalettes); + +VKAPI_ATTR void VKAPI_CALL vkCmdSetCoarseSampleOrderNV( + VkCommandBuffer commandBuffer, + VkCoarseSampleOrderTypeNV sampleOrderType, + uint32_t customSampleOrderCount, + const VkCoarseSampleOrderCustomNV* pCustomSampleOrders); +#endif + + +#define VK_NV_ray_tracing 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkAccelerationStructureNV) +#define VK_NV_RAY_TRACING_SPEC_VERSION 3 +#define VK_NV_RAY_TRACING_EXTENSION_NAME "VK_NV_ray_tracing" +#define VK_SHADER_UNUSED_NV (~0U) + +typedef enum VkAccelerationStructureTypeNV { + VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV = 0, + VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV = 1, + VK_ACCELERATION_STRUCTURE_TYPE_BEGIN_RANGE_NV = VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV, + VK_ACCELERATION_STRUCTURE_TYPE_END_RANGE_NV = VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV, + VK_ACCELERATION_STRUCTURE_TYPE_RANGE_SIZE_NV = (VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_NV - VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_NV + 1), + VK_ACCELERATION_STRUCTURE_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkAccelerationStructureTypeNV; + +typedef enum VkRayTracingShaderGroupTypeNV { + VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV = 0, + VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV = 1, + VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV = 2, + VK_RAY_TRACING_SHADER_GROUP_TYPE_BEGIN_RANGE_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV, + VK_RAY_TRACING_SHADER_GROUP_TYPE_END_RANGE_NV = VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV, + VK_RAY_TRACING_SHADER_GROUP_TYPE_RANGE_SIZE_NV = (VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV - VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV + 1), + VK_RAY_TRACING_SHADER_GROUP_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkRayTracingShaderGroupTypeNV; + +typedef enum VkGeometryTypeNV { + VK_GEOMETRY_TYPE_TRIANGLES_NV = 0, + VK_GEOMETRY_TYPE_AABBS_NV = 1, + VK_GEOMETRY_TYPE_BEGIN_RANGE_NV = VK_GEOMETRY_TYPE_TRIANGLES_NV, + VK_GEOMETRY_TYPE_END_RANGE_NV = VK_GEOMETRY_TYPE_AABBS_NV, + VK_GEOMETRY_TYPE_RANGE_SIZE_NV = (VK_GEOMETRY_TYPE_AABBS_NV - VK_GEOMETRY_TYPE_TRIANGLES_NV + 1), + VK_GEOMETRY_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkGeometryTypeNV; + +typedef enum VkCopyAccelerationStructureModeNV { + VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV = 0, + VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV = 1, + VK_COPY_ACCELERATION_STRUCTURE_MODE_BEGIN_RANGE_NV = VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV, + VK_COPY_ACCELERATION_STRUCTURE_MODE_END_RANGE_NV = VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV, + VK_COPY_ACCELERATION_STRUCTURE_MODE_RANGE_SIZE_NV = (VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_NV - VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_NV + 1), + VK_COPY_ACCELERATION_STRUCTURE_MODE_MAX_ENUM_NV = 0x7FFFFFFF +} VkCopyAccelerationStructureModeNV; + +typedef enum VkAccelerationStructureMemoryRequirementsTypeNV { + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV = 0, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_NV = 1, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV = 2, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BEGIN_RANGE_NV = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_END_RANGE_NV = VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_RANGE_SIZE_NV = (VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_NV - VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_NV + 1), + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkAccelerationStructureMemoryRequirementsTypeNV; + +typedef enum VkGeometryFlagBitsNV { + VK_GEOMETRY_OPAQUE_BIT_NV = 0x00000001, + VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_NV = 0x00000002, + VK_GEOMETRY_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkGeometryFlagBitsNV; +typedef VkFlags VkGeometryFlagsNV; + +typedef enum VkGeometryInstanceFlagBitsNV { + VK_GEOMETRY_INSTANCE_TRIANGLE_CULL_DISABLE_BIT_NV = 0x00000001, + VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_NV = 0x00000002, + VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_NV = 0x00000004, + VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_NV = 0x00000008, + VK_GEOMETRY_INSTANCE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkGeometryInstanceFlagBitsNV; +typedef VkFlags VkGeometryInstanceFlagsNV; + +typedef enum VkBuildAccelerationStructureFlagBitsNV { + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_NV = 0x00000001, + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_NV = 0x00000002, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_NV = 0x00000004, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_NV = 0x00000008, + VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_NV = 0x00000010, + VK_BUILD_ACCELERATION_STRUCTURE_FLAG_BITS_MAX_ENUM_NV = 0x7FFFFFFF +} VkBuildAccelerationStructureFlagBitsNV; +typedef VkFlags VkBuildAccelerationStructureFlagsNV; +typedef struct VkRayTracingShaderGroupCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkRayTracingShaderGroupTypeNV type; + uint32_t generalShader; + uint32_t closestHitShader; + uint32_t anyHitShader; + uint32_t intersectionShader; +} VkRayTracingShaderGroupCreateInfoNV; + +typedef struct VkRayTracingPipelineCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCreateFlags flags; + uint32_t stageCount; + const VkPipelineShaderStageCreateInfo* pStages; + uint32_t groupCount; + const VkRayTracingShaderGroupCreateInfoNV* pGroups; + uint32_t maxRecursionDepth; + VkPipelineLayout layout; + VkPipeline basePipelineHandle; + int32_t basePipelineIndex; +} VkRayTracingPipelineCreateInfoNV; + +typedef struct VkGeometryTrianglesNV { + VkStructureType sType; + const void* pNext; + VkBuffer vertexData; + VkDeviceSize vertexOffset; + uint32_t vertexCount; + VkDeviceSize vertexStride; + VkFormat vertexFormat; + VkBuffer indexData; + VkDeviceSize indexOffset; + uint32_t indexCount; + VkIndexType indexType; + VkBuffer transformData; + VkDeviceSize transformOffset; +} VkGeometryTrianglesNV; + +typedef struct VkGeometryAABBNV { + VkStructureType sType; + const void* pNext; + VkBuffer aabbData; + uint32_t numAABBs; + uint32_t stride; + VkDeviceSize offset; +} VkGeometryAABBNV; + +typedef struct VkGeometryDataNV { + VkGeometryTrianglesNV triangles; + VkGeometryAABBNV aabbs; +} VkGeometryDataNV; + +typedef struct VkGeometryNV { + VkStructureType sType; + const void* pNext; + VkGeometryTypeNV geometryType; + VkGeometryDataNV geometry; + VkGeometryFlagsNV flags; +} VkGeometryNV; + +typedef struct VkAccelerationStructureInfoNV { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureTypeNV type; + VkBuildAccelerationStructureFlagsNV flags; + uint32_t instanceCount; + uint32_t geometryCount; + const VkGeometryNV* pGeometries; +} VkAccelerationStructureInfoNV; + +typedef struct VkAccelerationStructureCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkDeviceSize compactedSize; + VkAccelerationStructureInfoNV info; +} VkAccelerationStructureCreateInfoNV; + +typedef struct VkBindAccelerationStructureMemoryInfoNV { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureNV accelerationStructure; + VkDeviceMemory memory; + VkDeviceSize memoryOffset; + uint32_t deviceIndexCount; + const uint32_t* pDeviceIndices; +} VkBindAccelerationStructureMemoryInfoNV; + +typedef struct VkWriteDescriptorSetAccelerationStructureNV { + VkStructureType sType; + const void* pNext; + uint32_t accelerationStructureCount; + const VkAccelerationStructureNV* pAccelerationStructures; +} VkWriteDescriptorSetAccelerationStructureNV; + +typedef struct VkAccelerationStructureMemoryRequirementsInfoNV { + VkStructureType sType; + const void* pNext; + VkAccelerationStructureMemoryRequirementsTypeNV type; + VkAccelerationStructureNV accelerationStructure; +} VkAccelerationStructureMemoryRequirementsInfoNV; + +typedef struct VkPhysicalDeviceRayTracingPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t shaderGroupHandleSize; + uint32_t maxRecursionDepth; + uint32_t maxShaderGroupStride; + uint32_t shaderGroupBaseAlignment; + uint64_t maxGeometryCount; + uint64_t maxInstanceCount; + uint64_t maxTriangleCount; + uint32_t maxDescriptorSetAccelerationStructures; +} VkPhysicalDeviceRayTracingPropertiesNV; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureNV)(VkDevice device, const VkAccelerationStructureCreateInfoNV* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureNV* pAccelerationStructure); +typedef void (VKAPI_PTR *PFN_vkDestroyAccelerationStructureNV)(VkDevice device, VkAccelerationStructureNV accelerationStructure, const VkAllocationCallbacks* pAllocator); +typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureMemoryRequirementsNV)(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, VkMemoryRequirements2KHR* pMemoryRequirements); +typedef VkResult (VKAPI_PTR *PFN_vkBindAccelerationStructureMemoryNV)(VkDevice device, uint32_t bindInfoCount, const VkBindAccelerationStructureMemoryInfoNV* pBindInfos); +typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureNV)(VkCommandBuffer commandBuffer, const VkAccelerationStructureInfoNV* pInfo, VkBuffer instanceData, VkDeviceSize instanceOffset, VkBool32 update, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkBuffer scratch, VkDeviceSize scratchOffset); +typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureNV)(VkCommandBuffer commandBuffer, VkAccelerationStructureNV dst, VkAccelerationStructureNV src, VkCopyAccelerationStructureModeNV mode); +typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysNV)(VkCommandBuffer commandBuffer, VkBuffer raygenShaderBindingTableBuffer, VkDeviceSize raygenShaderBindingOffset, VkBuffer missShaderBindingTableBuffer, VkDeviceSize missShaderBindingOffset, VkDeviceSize missShaderBindingStride, VkBuffer hitShaderBindingTableBuffer, VkDeviceSize hitShaderBindingOffset, VkDeviceSize hitShaderBindingStride, VkBuffer callableShaderBindingTableBuffer, VkDeviceSize callableShaderBindingOffset, VkDeviceSize callableShaderBindingStride, uint32_t width, uint32_t height, uint32_t depth); +typedef VkResult (VKAPI_PTR *PFN_vkCreateRayTracingPipelinesNV)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoNV* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines); +typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingShaderGroupHandlesNV)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData); +typedef VkResult (VKAPI_PTR *PFN_vkGetAccelerationStructureHandleNV)(VkDevice device, VkAccelerationStructureNV accelerationStructure, size_t dataSize, void* pData); +typedef void (VKAPI_PTR *PFN_vkCmdWriteAccelerationStructuresPropertiesNV)(VkCommandBuffer commandBuffer, uint32_t accelerationStructureCount, const VkAccelerationStructureNV* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery); +typedef VkResult (VKAPI_PTR *PFN_vkCompileDeferredNV)(VkDevice device, VkPipeline pipeline, uint32_t shader); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureNV( + VkDevice device, + const VkAccelerationStructureCreateInfoNV* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkAccelerationStructureNV* pAccelerationStructure); + +VKAPI_ATTR void VKAPI_CALL vkDestroyAccelerationStructureNV( + VkDevice device, + VkAccelerationStructureNV accelerationStructure, + const VkAllocationCallbacks* pAllocator); + +VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureMemoryRequirementsNV( + VkDevice device, + const VkAccelerationStructureMemoryRequirementsInfoNV* pInfo, + VkMemoryRequirements2KHR* pMemoryRequirements); + +VKAPI_ATTR VkResult VKAPI_CALL vkBindAccelerationStructureMemoryNV( + VkDevice device, + uint32_t bindInfoCount, + const VkBindAccelerationStructureMemoryInfoNV* pBindInfos); + +VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureNV( + VkCommandBuffer commandBuffer, + const VkAccelerationStructureInfoNV* pInfo, + VkBuffer instanceData, + VkDeviceSize instanceOffset, + VkBool32 update, + VkAccelerationStructureNV dst, + VkAccelerationStructureNV src, + VkBuffer scratch, + VkDeviceSize scratchOffset); + +VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureNV( + VkCommandBuffer commandBuffer, + VkAccelerationStructureNV dst, + VkAccelerationStructureNV src, + VkCopyAccelerationStructureModeNV mode); + +VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysNV( + VkCommandBuffer commandBuffer, + VkBuffer raygenShaderBindingTableBuffer, + VkDeviceSize raygenShaderBindingOffset, + VkBuffer missShaderBindingTableBuffer, + VkDeviceSize missShaderBindingOffset, + VkDeviceSize missShaderBindingStride, + VkBuffer hitShaderBindingTableBuffer, + VkDeviceSize hitShaderBindingOffset, + VkDeviceSize hitShaderBindingStride, + VkBuffer callableShaderBindingTableBuffer, + VkDeviceSize callableShaderBindingOffset, + VkDeviceSize callableShaderBindingStride, + uint32_t width, + uint32_t height, + uint32_t depth); + +VKAPI_ATTR VkResult VKAPI_CALL vkCreateRayTracingPipelinesNV( + VkDevice device, + VkPipelineCache pipelineCache, + uint32_t createInfoCount, + const VkRayTracingPipelineCreateInfoNV* pCreateInfos, + const VkAllocationCallbacks* pAllocator, + VkPipeline* pPipelines); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingShaderGroupHandlesNV( + VkDevice device, + VkPipeline pipeline, + uint32_t firstGroup, + uint32_t groupCount, + size_t dataSize, + void* pData); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetAccelerationStructureHandleNV( + VkDevice device, + VkAccelerationStructureNV accelerationStructure, + size_t dataSize, + void* pData); + +VKAPI_ATTR void VKAPI_CALL vkCmdWriteAccelerationStructuresPropertiesNV( + VkCommandBuffer commandBuffer, + uint32_t accelerationStructureCount, + const VkAccelerationStructureNV* pAccelerationStructures, + VkQueryType queryType, + VkQueryPool queryPool, + uint32_t firstQuery); + +VKAPI_ATTR VkResult VKAPI_CALL vkCompileDeferredNV( + VkDevice device, + VkPipeline pipeline, + uint32_t shader); +#endif + + +#define VK_NV_representative_fragment_test 1 +#define VK_NV_REPRESENTATIVE_FRAGMENT_TEST_SPEC_VERSION 2 +#define VK_NV_REPRESENTATIVE_FRAGMENT_TEST_EXTENSION_NAME "VK_NV_representative_fragment_test" +typedef struct VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 representativeFragmentTest; +} VkPhysicalDeviceRepresentativeFragmentTestFeaturesNV; + +typedef struct VkPipelineRepresentativeFragmentTestStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkBool32 representativeFragmentTestEnable; +} VkPipelineRepresentativeFragmentTestStateCreateInfoNV; + + + +#define VK_EXT_filter_cubic 1 +#define VK_EXT_FILTER_CUBIC_SPEC_VERSION 3 +#define VK_EXT_FILTER_CUBIC_EXTENSION_NAME "VK_EXT_filter_cubic" +typedef struct VkPhysicalDeviceImageViewImageFormatInfoEXT { + VkStructureType sType; + void* pNext; + VkImageViewType imageViewType; +} VkPhysicalDeviceImageViewImageFormatInfoEXT; + +typedef struct VkFilterCubicImageViewImageFormatPropertiesEXT { + VkStructureType sType; + void* pNext; + VkBool32 filterCubic; + VkBool32 filterCubicMinmax; +} VkFilterCubicImageViewImageFormatPropertiesEXT; + + + +#define VK_EXT_global_priority 1 +#define VK_EXT_GLOBAL_PRIORITY_SPEC_VERSION 2 +#define VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME "VK_EXT_global_priority" + +typedef enum VkQueueGlobalPriorityEXT { + VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT = 128, + VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT = 256, + VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT = 512, + VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT = 1024, + VK_QUEUE_GLOBAL_PRIORITY_BEGIN_RANGE_EXT = VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT, + VK_QUEUE_GLOBAL_PRIORITY_END_RANGE_EXT = VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT, + VK_QUEUE_GLOBAL_PRIORITY_RANGE_SIZE_EXT = (VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT - VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT + 1), + VK_QUEUE_GLOBAL_PRIORITY_MAX_ENUM_EXT = 0x7FFFFFFF +} VkQueueGlobalPriorityEXT; +typedef struct VkDeviceQueueGlobalPriorityCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkQueueGlobalPriorityEXT globalPriority; +} VkDeviceQueueGlobalPriorityCreateInfoEXT; + + + +#define VK_EXT_external_memory_host 1 +#define VK_EXT_EXTERNAL_MEMORY_HOST_SPEC_VERSION 1 +#define VK_EXT_EXTERNAL_MEMORY_HOST_EXTENSION_NAME "VK_EXT_external_memory_host" +typedef struct VkImportMemoryHostPointerInfoEXT { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; + void* pHostPointer; +} VkImportMemoryHostPointerInfoEXT; + +typedef struct VkMemoryHostPointerPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryHostPointerPropertiesEXT; + +typedef struct VkPhysicalDeviceExternalMemoryHostPropertiesEXT { + VkStructureType sType; + void* pNext; + VkDeviceSize minImportedHostPointerAlignment; +} VkPhysicalDeviceExternalMemoryHostPropertiesEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryHostPointerPropertiesEXT)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, const void* pHostPointer, VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryHostPointerPropertiesEXT( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + const void* pHostPointer, + VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties); +#endif + + +#define VK_AMD_buffer_marker 1 +#define VK_AMD_BUFFER_MARKER_SPEC_VERSION 1 +#define VK_AMD_BUFFER_MARKER_EXTENSION_NAME "VK_AMD_buffer_marker" +typedef void (VKAPI_PTR *PFN_vkCmdWriteBufferMarkerAMD)(VkCommandBuffer commandBuffer, VkPipelineStageFlagBits pipelineStage, VkBuffer dstBuffer, VkDeviceSize dstOffset, uint32_t marker); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdWriteBufferMarkerAMD( + VkCommandBuffer commandBuffer, + VkPipelineStageFlagBits pipelineStage, + VkBuffer dstBuffer, + VkDeviceSize dstOffset, + uint32_t marker); +#endif + + +#define VK_AMD_pipeline_compiler_control 1 +#define VK_AMD_PIPELINE_COMPILER_CONTROL_SPEC_VERSION 1 +#define VK_AMD_PIPELINE_COMPILER_CONTROL_EXTENSION_NAME "VK_AMD_pipeline_compiler_control" + +typedef enum VkPipelineCompilerControlFlagBitsAMD { + VK_PIPELINE_COMPILER_CONTROL_FLAG_BITS_MAX_ENUM_AMD = 0x7FFFFFFF +} VkPipelineCompilerControlFlagBitsAMD; +typedef VkFlags VkPipelineCompilerControlFlagsAMD; +typedef struct VkPipelineCompilerControlCreateInfoAMD { + VkStructureType sType; + const void* pNext; + VkPipelineCompilerControlFlagsAMD compilerControlFlags; +} VkPipelineCompilerControlCreateInfoAMD; + + + +#define VK_EXT_calibrated_timestamps 1 +#define VK_EXT_CALIBRATED_TIMESTAMPS_SPEC_VERSION 1 +#define VK_EXT_CALIBRATED_TIMESTAMPS_EXTENSION_NAME "VK_EXT_calibrated_timestamps" + +typedef enum VkTimeDomainEXT { + VK_TIME_DOMAIN_DEVICE_EXT = 0, + VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT = 1, + VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT = 2, + VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT = 3, + VK_TIME_DOMAIN_BEGIN_RANGE_EXT = VK_TIME_DOMAIN_DEVICE_EXT, + VK_TIME_DOMAIN_END_RANGE_EXT = VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT, + VK_TIME_DOMAIN_RANGE_SIZE_EXT = (VK_TIME_DOMAIN_QUERY_PERFORMANCE_COUNTER_EXT - VK_TIME_DOMAIN_DEVICE_EXT + 1), + VK_TIME_DOMAIN_MAX_ENUM_EXT = 0x7FFFFFFF +} VkTimeDomainEXT; +typedef struct VkCalibratedTimestampInfoEXT { + VkStructureType sType; + const void* pNext; + VkTimeDomainEXT timeDomain; +} VkCalibratedTimestampInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT)(VkPhysicalDevice physicalDevice, uint32_t* pTimeDomainCount, VkTimeDomainEXT* pTimeDomains); +typedef VkResult (VKAPI_PTR *PFN_vkGetCalibratedTimestampsEXT)(VkDevice device, uint32_t timestampCount, const VkCalibratedTimestampInfoEXT* pTimestampInfos, uint64_t* pTimestamps, uint64_t* pMaxDeviation); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceCalibrateableTimeDomainsEXT( + VkPhysicalDevice physicalDevice, + uint32_t* pTimeDomainCount, + VkTimeDomainEXT* pTimeDomains); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetCalibratedTimestampsEXT( + VkDevice device, + uint32_t timestampCount, + const VkCalibratedTimestampInfoEXT* pTimestampInfos, + uint64_t* pTimestamps, + uint64_t* pMaxDeviation); +#endif + + +#define VK_AMD_shader_core_properties 1 +#define VK_AMD_SHADER_CORE_PROPERTIES_SPEC_VERSION 2 +#define VK_AMD_SHADER_CORE_PROPERTIES_EXTENSION_NAME "VK_AMD_shader_core_properties" +typedef struct VkPhysicalDeviceShaderCorePropertiesAMD { + VkStructureType sType; + void* pNext; + uint32_t shaderEngineCount; + uint32_t shaderArraysPerEngineCount; + uint32_t computeUnitsPerShaderArray; + uint32_t simdPerComputeUnit; + uint32_t wavefrontsPerSimd; + uint32_t wavefrontSize; + uint32_t sgprsPerSimd; + uint32_t minSgprAllocation; + uint32_t maxSgprAllocation; + uint32_t sgprAllocationGranularity; + uint32_t vgprsPerSimd; + uint32_t minVgprAllocation; + uint32_t maxVgprAllocation; + uint32_t vgprAllocationGranularity; +} VkPhysicalDeviceShaderCorePropertiesAMD; + + + +#define VK_AMD_memory_overallocation_behavior 1 +#define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_SPEC_VERSION 1 +#define VK_AMD_MEMORY_OVERALLOCATION_BEHAVIOR_EXTENSION_NAME "VK_AMD_memory_overallocation_behavior" + +typedef enum VkMemoryOverallocationBehaviorAMD { + VK_MEMORY_OVERALLOCATION_BEHAVIOR_DEFAULT_AMD = 0, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_ALLOWED_AMD = 1, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD = 2, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_BEGIN_RANGE_AMD = VK_MEMORY_OVERALLOCATION_BEHAVIOR_DEFAULT_AMD, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_END_RANGE_AMD = VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD, + VK_MEMORY_OVERALLOCATION_BEHAVIOR_RANGE_SIZE_AMD = (VK_MEMORY_OVERALLOCATION_BEHAVIOR_DISALLOWED_AMD - VK_MEMORY_OVERALLOCATION_BEHAVIOR_DEFAULT_AMD + 1), + VK_MEMORY_OVERALLOCATION_BEHAVIOR_MAX_ENUM_AMD = 0x7FFFFFFF +} VkMemoryOverallocationBehaviorAMD; +typedef struct VkDeviceMemoryOverallocationCreateInfoAMD { + VkStructureType sType; + const void* pNext; + VkMemoryOverallocationBehaviorAMD overallocationBehavior; +} VkDeviceMemoryOverallocationCreateInfoAMD; + + + +#define VK_EXT_vertex_attribute_divisor 1 +#define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_SPEC_VERSION 3 +#define VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME "VK_EXT_vertex_attribute_divisor" +typedef struct VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t maxVertexAttribDivisor; +} VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT; + +typedef struct VkVertexInputBindingDivisorDescriptionEXT { + uint32_t binding; + uint32_t divisor; +} VkVertexInputBindingDivisorDescriptionEXT; + +typedef struct VkPipelineVertexInputDivisorStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + uint32_t vertexBindingDivisorCount; + const VkVertexInputBindingDivisorDescriptionEXT* pVertexBindingDivisors; +} VkPipelineVertexInputDivisorStateCreateInfoEXT; + +typedef struct VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 vertexAttributeInstanceRateDivisor; + VkBool32 vertexAttributeInstanceRateZeroDivisor; +} VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT; + + + +#define VK_EXT_pipeline_creation_feedback 1 +#define VK_EXT_PIPELINE_CREATION_FEEDBACK_SPEC_VERSION 1 +#define VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME "VK_EXT_pipeline_creation_feedback" + +typedef enum VkPipelineCreationFeedbackFlagBitsEXT { + VK_PIPELINE_CREATION_FEEDBACK_VALID_BIT_EXT = 0x00000001, + VK_PIPELINE_CREATION_FEEDBACK_APPLICATION_PIPELINE_CACHE_HIT_BIT_EXT = 0x00000002, + VK_PIPELINE_CREATION_FEEDBACK_BASE_PIPELINE_ACCELERATION_BIT_EXT = 0x00000004, + VK_PIPELINE_CREATION_FEEDBACK_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkPipelineCreationFeedbackFlagBitsEXT; +typedef VkFlags VkPipelineCreationFeedbackFlagsEXT; +typedef struct VkPipelineCreationFeedbackEXT { + VkPipelineCreationFeedbackFlagsEXT flags; + uint64_t duration; +} VkPipelineCreationFeedbackEXT; + +typedef struct VkPipelineCreationFeedbackCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkPipelineCreationFeedbackEXT* pPipelineCreationFeedback; + uint32_t pipelineStageCreationFeedbackCount; + VkPipelineCreationFeedbackEXT* pPipelineStageCreationFeedbacks; +} VkPipelineCreationFeedbackCreateInfoEXT; + + + +#define VK_NV_shader_subgroup_partitioned 1 +#define VK_NV_SHADER_SUBGROUP_PARTITIONED_SPEC_VERSION 1 +#define VK_NV_SHADER_SUBGROUP_PARTITIONED_EXTENSION_NAME "VK_NV_shader_subgroup_partitioned" + + +#define VK_NV_compute_shader_derivatives 1 +#define VK_NV_COMPUTE_SHADER_DERIVATIVES_SPEC_VERSION 1 +#define VK_NV_COMPUTE_SHADER_DERIVATIVES_EXTENSION_NAME "VK_NV_compute_shader_derivatives" +typedef struct VkPhysicalDeviceComputeShaderDerivativesFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 computeDerivativeGroupQuads; + VkBool32 computeDerivativeGroupLinear; +} VkPhysicalDeviceComputeShaderDerivativesFeaturesNV; + + + +#define VK_NV_mesh_shader 1 +#define VK_NV_MESH_SHADER_SPEC_VERSION 1 +#define VK_NV_MESH_SHADER_EXTENSION_NAME "VK_NV_mesh_shader" +typedef struct VkPhysicalDeviceMeshShaderFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 taskShader; + VkBool32 meshShader; +} VkPhysicalDeviceMeshShaderFeaturesNV; + +typedef struct VkPhysicalDeviceMeshShaderPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t maxDrawMeshTasksCount; + uint32_t maxTaskWorkGroupInvocations; + uint32_t maxTaskWorkGroupSize[3]; + uint32_t maxTaskTotalMemorySize; + uint32_t maxTaskOutputCount; + uint32_t maxMeshWorkGroupInvocations; + uint32_t maxMeshWorkGroupSize[3]; + uint32_t maxMeshTotalMemorySize; + uint32_t maxMeshOutputVertices; + uint32_t maxMeshOutputPrimitives; + uint32_t maxMeshMultiviewViewCount; + uint32_t meshOutputPerVertexGranularity; + uint32_t meshOutputPerPrimitiveGranularity; +} VkPhysicalDeviceMeshShaderPropertiesNV; + +typedef struct VkDrawMeshTasksIndirectCommandNV { + uint32_t taskCount; + uint32_t firstTask; +} VkDrawMeshTasksIndirectCommandNV; + +typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksNV)(VkCommandBuffer commandBuffer, uint32_t taskCount, uint32_t firstTask); +typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksIndirectNV)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, uint32_t drawCount, uint32_t stride); +typedef void (VKAPI_PTR *PFN_vkCmdDrawMeshTasksIndirectCountNV)(VkCommandBuffer commandBuffer, VkBuffer buffer, VkDeviceSize offset, VkBuffer countBuffer, VkDeviceSize countBufferOffset, uint32_t maxDrawCount, uint32_t stride); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksNV( + VkCommandBuffer commandBuffer, + uint32_t taskCount, + uint32_t firstTask); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksIndirectNV( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + uint32_t drawCount, + uint32_t stride); + +VKAPI_ATTR void VKAPI_CALL vkCmdDrawMeshTasksIndirectCountNV( + VkCommandBuffer commandBuffer, + VkBuffer buffer, + VkDeviceSize offset, + VkBuffer countBuffer, + VkDeviceSize countBufferOffset, + uint32_t maxDrawCount, + uint32_t stride); +#endif + + +#define VK_NV_fragment_shader_barycentric 1 +#define VK_NV_FRAGMENT_SHADER_BARYCENTRIC_SPEC_VERSION 1 +#define VK_NV_FRAGMENT_SHADER_BARYCENTRIC_EXTENSION_NAME "VK_NV_fragment_shader_barycentric" +typedef struct VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 fragmentShaderBarycentric; +} VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV; + + + +#define VK_NV_shader_image_footprint 1 +#define VK_NV_SHADER_IMAGE_FOOTPRINT_SPEC_VERSION 2 +#define VK_NV_SHADER_IMAGE_FOOTPRINT_EXTENSION_NAME "VK_NV_shader_image_footprint" +typedef struct VkPhysicalDeviceShaderImageFootprintFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 imageFootprint; +} VkPhysicalDeviceShaderImageFootprintFeaturesNV; + + + +#define VK_NV_scissor_exclusive 1 +#define VK_NV_SCISSOR_EXCLUSIVE_SPEC_VERSION 1 +#define VK_NV_SCISSOR_EXCLUSIVE_EXTENSION_NAME "VK_NV_scissor_exclusive" +typedef struct VkPipelineViewportExclusiveScissorStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t exclusiveScissorCount; + const VkRect2D* pExclusiveScissors; +} VkPipelineViewportExclusiveScissorStateCreateInfoNV; + +typedef struct VkPhysicalDeviceExclusiveScissorFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 exclusiveScissor; +} VkPhysicalDeviceExclusiveScissorFeaturesNV; + +typedef void (VKAPI_PTR *PFN_vkCmdSetExclusiveScissorNV)(VkCommandBuffer commandBuffer, uint32_t firstExclusiveScissor, uint32_t exclusiveScissorCount, const VkRect2D* pExclusiveScissors); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetExclusiveScissorNV( + VkCommandBuffer commandBuffer, + uint32_t firstExclusiveScissor, + uint32_t exclusiveScissorCount, + const VkRect2D* pExclusiveScissors); +#endif + + +#define VK_NV_device_diagnostic_checkpoints 1 +#define VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_SPEC_VERSION 2 +#define VK_NV_DEVICE_DIAGNOSTIC_CHECKPOINTS_EXTENSION_NAME "VK_NV_device_diagnostic_checkpoints" +typedef struct VkQueueFamilyCheckpointPropertiesNV { + VkStructureType sType; + void* pNext; + VkPipelineStageFlags checkpointExecutionStageMask; +} VkQueueFamilyCheckpointPropertiesNV; + +typedef struct VkCheckpointDataNV { + VkStructureType sType; + void* pNext; + VkPipelineStageFlagBits stage; + void* pCheckpointMarker; +} VkCheckpointDataNV; + +typedef void (VKAPI_PTR *PFN_vkCmdSetCheckpointNV)(VkCommandBuffer commandBuffer, const void* pCheckpointMarker); +typedef void (VKAPI_PTR *PFN_vkGetQueueCheckpointDataNV)(VkQueue queue, uint32_t* pCheckpointDataCount, VkCheckpointDataNV* pCheckpointData); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetCheckpointNV( + VkCommandBuffer commandBuffer, + const void* pCheckpointMarker); + +VKAPI_ATTR void VKAPI_CALL vkGetQueueCheckpointDataNV( + VkQueue queue, + uint32_t* pCheckpointDataCount, + VkCheckpointDataNV* pCheckpointData); +#endif + + +#define VK_INTEL_shader_integer_functions2 1 +#define VK_INTEL_SHADER_INTEGER_FUNCTIONS_2_SPEC_VERSION 1 +#define VK_INTEL_SHADER_INTEGER_FUNCTIONS_2_EXTENSION_NAME "VK_INTEL_shader_integer_functions2" +typedef struct VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL { + VkStructureType sType; + void* pNext; + VkBool32 shaderIntegerFunctions2; +} VkPhysicalDeviceShaderIntegerFunctions2FeaturesINTEL; + + + +#define VK_INTEL_performance_query 1 +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkPerformanceConfigurationINTEL) +#define VK_INTEL_PERFORMANCE_QUERY_SPEC_VERSION 1 +#define VK_INTEL_PERFORMANCE_QUERY_EXTENSION_NAME "VK_INTEL_performance_query" + +typedef enum VkPerformanceConfigurationTypeINTEL { + VK_PERFORMANCE_CONFIGURATION_TYPE_COMMAND_QUEUE_METRICS_DISCOVERY_ACTIVATED_INTEL = 0, + VK_PERFORMANCE_CONFIGURATION_TYPE_BEGIN_RANGE_INTEL = VK_PERFORMANCE_CONFIGURATION_TYPE_COMMAND_QUEUE_METRICS_DISCOVERY_ACTIVATED_INTEL, + VK_PERFORMANCE_CONFIGURATION_TYPE_END_RANGE_INTEL = VK_PERFORMANCE_CONFIGURATION_TYPE_COMMAND_QUEUE_METRICS_DISCOVERY_ACTIVATED_INTEL, + VK_PERFORMANCE_CONFIGURATION_TYPE_RANGE_SIZE_INTEL = (VK_PERFORMANCE_CONFIGURATION_TYPE_COMMAND_QUEUE_METRICS_DISCOVERY_ACTIVATED_INTEL - VK_PERFORMANCE_CONFIGURATION_TYPE_COMMAND_QUEUE_METRICS_DISCOVERY_ACTIVATED_INTEL + 1), + VK_PERFORMANCE_CONFIGURATION_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkPerformanceConfigurationTypeINTEL; + +typedef enum VkQueryPoolSamplingModeINTEL { + VK_QUERY_POOL_SAMPLING_MODE_MANUAL_INTEL = 0, + VK_QUERY_POOL_SAMPLING_MODE_BEGIN_RANGE_INTEL = VK_QUERY_POOL_SAMPLING_MODE_MANUAL_INTEL, + VK_QUERY_POOL_SAMPLING_MODE_END_RANGE_INTEL = VK_QUERY_POOL_SAMPLING_MODE_MANUAL_INTEL, + VK_QUERY_POOL_SAMPLING_MODE_RANGE_SIZE_INTEL = (VK_QUERY_POOL_SAMPLING_MODE_MANUAL_INTEL - VK_QUERY_POOL_SAMPLING_MODE_MANUAL_INTEL + 1), + VK_QUERY_POOL_SAMPLING_MODE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkQueryPoolSamplingModeINTEL; + +typedef enum VkPerformanceOverrideTypeINTEL { + VK_PERFORMANCE_OVERRIDE_TYPE_NULL_HARDWARE_INTEL = 0, + VK_PERFORMANCE_OVERRIDE_TYPE_FLUSH_GPU_CACHES_INTEL = 1, + VK_PERFORMANCE_OVERRIDE_TYPE_BEGIN_RANGE_INTEL = VK_PERFORMANCE_OVERRIDE_TYPE_NULL_HARDWARE_INTEL, + VK_PERFORMANCE_OVERRIDE_TYPE_END_RANGE_INTEL = VK_PERFORMANCE_OVERRIDE_TYPE_FLUSH_GPU_CACHES_INTEL, + VK_PERFORMANCE_OVERRIDE_TYPE_RANGE_SIZE_INTEL = (VK_PERFORMANCE_OVERRIDE_TYPE_FLUSH_GPU_CACHES_INTEL - VK_PERFORMANCE_OVERRIDE_TYPE_NULL_HARDWARE_INTEL + 1), + VK_PERFORMANCE_OVERRIDE_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkPerformanceOverrideTypeINTEL; + +typedef enum VkPerformanceParameterTypeINTEL { + VK_PERFORMANCE_PARAMETER_TYPE_HW_COUNTERS_SUPPORTED_INTEL = 0, + VK_PERFORMANCE_PARAMETER_TYPE_STREAM_MARKER_VALID_BITS_INTEL = 1, + VK_PERFORMANCE_PARAMETER_TYPE_BEGIN_RANGE_INTEL = VK_PERFORMANCE_PARAMETER_TYPE_HW_COUNTERS_SUPPORTED_INTEL, + VK_PERFORMANCE_PARAMETER_TYPE_END_RANGE_INTEL = VK_PERFORMANCE_PARAMETER_TYPE_STREAM_MARKER_VALID_BITS_INTEL, + VK_PERFORMANCE_PARAMETER_TYPE_RANGE_SIZE_INTEL = (VK_PERFORMANCE_PARAMETER_TYPE_STREAM_MARKER_VALID_BITS_INTEL - VK_PERFORMANCE_PARAMETER_TYPE_HW_COUNTERS_SUPPORTED_INTEL + 1), + VK_PERFORMANCE_PARAMETER_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkPerformanceParameterTypeINTEL; + +typedef enum VkPerformanceValueTypeINTEL { + VK_PERFORMANCE_VALUE_TYPE_UINT32_INTEL = 0, + VK_PERFORMANCE_VALUE_TYPE_UINT64_INTEL = 1, + VK_PERFORMANCE_VALUE_TYPE_FLOAT_INTEL = 2, + VK_PERFORMANCE_VALUE_TYPE_BOOL_INTEL = 3, + VK_PERFORMANCE_VALUE_TYPE_STRING_INTEL = 4, + VK_PERFORMANCE_VALUE_TYPE_BEGIN_RANGE_INTEL = VK_PERFORMANCE_VALUE_TYPE_UINT32_INTEL, + VK_PERFORMANCE_VALUE_TYPE_END_RANGE_INTEL = VK_PERFORMANCE_VALUE_TYPE_STRING_INTEL, + VK_PERFORMANCE_VALUE_TYPE_RANGE_SIZE_INTEL = (VK_PERFORMANCE_VALUE_TYPE_STRING_INTEL - VK_PERFORMANCE_VALUE_TYPE_UINT32_INTEL + 1), + VK_PERFORMANCE_VALUE_TYPE_MAX_ENUM_INTEL = 0x7FFFFFFF +} VkPerformanceValueTypeINTEL; +typedef union VkPerformanceValueDataINTEL { + uint32_t value32; + uint64_t value64; + float valueFloat; + VkBool32 valueBool; + const char* valueString; +} VkPerformanceValueDataINTEL; + +typedef struct VkPerformanceValueINTEL { + VkPerformanceValueTypeINTEL type; + VkPerformanceValueDataINTEL data; +} VkPerformanceValueINTEL; + +typedef struct VkInitializePerformanceApiInfoINTEL { + VkStructureType sType; + const void* pNext; + void* pUserData; +} VkInitializePerformanceApiInfoINTEL; + +typedef struct VkQueryPoolCreateInfoINTEL { + VkStructureType sType; + const void* pNext; + VkQueryPoolSamplingModeINTEL performanceCountersSampling; +} VkQueryPoolCreateInfoINTEL; + +typedef struct VkPerformanceMarkerInfoINTEL { + VkStructureType sType; + const void* pNext; + uint64_t marker; +} VkPerformanceMarkerInfoINTEL; + +typedef struct VkPerformanceStreamMarkerInfoINTEL { + VkStructureType sType; + const void* pNext; + uint32_t marker; +} VkPerformanceStreamMarkerInfoINTEL; + +typedef struct VkPerformanceOverrideInfoINTEL { + VkStructureType sType; + const void* pNext; + VkPerformanceOverrideTypeINTEL type; + VkBool32 enable; + uint64_t parameter; +} VkPerformanceOverrideInfoINTEL; + +typedef struct VkPerformanceConfigurationAcquireInfoINTEL { + VkStructureType sType; + const void* pNext; + VkPerformanceConfigurationTypeINTEL type; +} VkPerformanceConfigurationAcquireInfoINTEL; + +typedef VkResult (VKAPI_PTR *PFN_vkInitializePerformanceApiINTEL)(VkDevice device, const VkInitializePerformanceApiInfoINTEL* pInitializeInfo); +typedef void (VKAPI_PTR *PFN_vkUninitializePerformanceApiINTEL)(VkDevice device); +typedef VkResult (VKAPI_PTR *PFN_vkCmdSetPerformanceMarkerINTEL)(VkCommandBuffer commandBuffer, const VkPerformanceMarkerInfoINTEL* pMarkerInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCmdSetPerformanceStreamMarkerINTEL)(VkCommandBuffer commandBuffer, const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo); +typedef VkResult (VKAPI_PTR *PFN_vkCmdSetPerformanceOverrideINTEL)(VkCommandBuffer commandBuffer, const VkPerformanceOverrideInfoINTEL* pOverrideInfo); +typedef VkResult (VKAPI_PTR *PFN_vkAcquirePerformanceConfigurationINTEL)(VkDevice device, const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, VkPerformanceConfigurationINTEL* pConfiguration); +typedef VkResult (VKAPI_PTR *PFN_vkReleasePerformanceConfigurationINTEL)(VkDevice device, VkPerformanceConfigurationINTEL configuration); +typedef VkResult (VKAPI_PTR *PFN_vkQueueSetPerformanceConfigurationINTEL)(VkQueue queue, VkPerformanceConfigurationINTEL configuration); +typedef VkResult (VKAPI_PTR *PFN_vkGetPerformanceParameterINTEL)(VkDevice device, VkPerformanceParameterTypeINTEL parameter, VkPerformanceValueINTEL* pValue); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkInitializePerformanceApiINTEL( + VkDevice device, + const VkInitializePerformanceApiInfoINTEL* pInitializeInfo); + +VKAPI_ATTR void VKAPI_CALL vkUninitializePerformanceApiINTEL( + VkDevice device); + +VKAPI_ATTR VkResult VKAPI_CALL vkCmdSetPerformanceMarkerINTEL( + VkCommandBuffer commandBuffer, + const VkPerformanceMarkerInfoINTEL* pMarkerInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCmdSetPerformanceStreamMarkerINTEL( + VkCommandBuffer commandBuffer, + const VkPerformanceStreamMarkerInfoINTEL* pMarkerInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkCmdSetPerformanceOverrideINTEL( + VkCommandBuffer commandBuffer, + const VkPerformanceOverrideInfoINTEL* pOverrideInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquirePerformanceConfigurationINTEL( + VkDevice device, + const VkPerformanceConfigurationAcquireInfoINTEL* pAcquireInfo, + VkPerformanceConfigurationINTEL* pConfiguration); + +VKAPI_ATTR VkResult VKAPI_CALL vkReleasePerformanceConfigurationINTEL( + VkDevice device, + VkPerformanceConfigurationINTEL configuration); + +VKAPI_ATTR VkResult VKAPI_CALL vkQueueSetPerformanceConfigurationINTEL( + VkQueue queue, + VkPerformanceConfigurationINTEL configuration); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetPerformanceParameterINTEL( + VkDevice device, + VkPerformanceParameterTypeINTEL parameter, + VkPerformanceValueINTEL* pValue); +#endif + + +#define VK_EXT_pci_bus_info 1 +#define VK_EXT_PCI_BUS_INFO_SPEC_VERSION 2 +#define VK_EXT_PCI_BUS_INFO_EXTENSION_NAME "VK_EXT_pci_bus_info" +typedef struct VkPhysicalDevicePCIBusInfoPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t pciDomain; + uint32_t pciBus; + uint32_t pciDevice; + uint32_t pciFunction; +} VkPhysicalDevicePCIBusInfoPropertiesEXT; + + + +#define VK_AMD_display_native_hdr 1 +#define VK_AMD_DISPLAY_NATIVE_HDR_SPEC_VERSION 1 +#define VK_AMD_DISPLAY_NATIVE_HDR_EXTENSION_NAME "VK_AMD_display_native_hdr" +typedef struct VkDisplayNativeHdrSurfaceCapabilitiesAMD { + VkStructureType sType; + void* pNext; + VkBool32 localDimmingSupport; +} VkDisplayNativeHdrSurfaceCapabilitiesAMD; + +typedef struct VkSwapchainDisplayNativeHdrCreateInfoAMD { + VkStructureType sType; + const void* pNext; + VkBool32 localDimmingEnable; +} VkSwapchainDisplayNativeHdrCreateInfoAMD; + +typedef void (VKAPI_PTR *PFN_vkSetLocalDimmingAMD)(VkDevice device, VkSwapchainKHR swapChain, VkBool32 localDimmingEnable); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkSetLocalDimmingAMD( + VkDevice device, + VkSwapchainKHR swapChain, + VkBool32 localDimmingEnable); +#endif + + +#define VK_EXT_fragment_density_map 1 +#define VK_EXT_FRAGMENT_DENSITY_MAP_SPEC_VERSION 1 +#define VK_EXT_FRAGMENT_DENSITY_MAP_EXTENSION_NAME "VK_EXT_fragment_density_map" +typedef struct VkPhysicalDeviceFragmentDensityMapFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 fragmentDensityMap; + VkBool32 fragmentDensityMapDynamic; + VkBool32 fragmentDensityMapNonSubsampledImages; +} VkPhysicalDeviceFragmentDensityMapFeaturesEXT; + +typedef struct VkPhysicalDeviceFragmentDensityMapPropertiesEXT { + VkStructureType sType; + void* pNext; + VkExtent2D minFragmentDensityTexelSize; + VkExtent2D maxFragmentDensityTexelSize; + VkBool32 fragmentDensityInvocations; +} VkPhysicalDeviceFragmentDensityMapPropertiesEXT; + +typedef struct VkRenderPassFragmentDensityMapCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkAttachmentReference fragmentDensityMapAttachment; +} VkRenderPassFragmentDensityMapCreateInfoEXT; + + + +#define VK_EXT_scalar_block_layout 1 +#define VK_EXT_SCALAR_BLOCK_LAYOUT_SPEC_VERSION 1 +#define VK_EXT_SCALAR_BLOCK_LAYOUT_EXTENSION_NAME "VK_EXT_scalar_block_layout" +typedef VkPhysicalDeviceScalarBlockLayoutFeatures VkPhysicalDeviceScalarBlockLayoutFeaturesEXT; + + + +#define VK_GOOGLE_hlsl_functionality1 1 +#define VK_GOOGLE_HLSL_FUNCTIONALITY1_SPEC_VERSION 1 +#define VK_GOOGLE_HLSL_FUNCTIONALITY1_EXTENSION_NAME "VK_GOOGLE_hlsl_functionality1" + + +#define VK_GOOGLE_decorate_string 1 +#define VK_GOOGLE_DECORATE_STRING_SPEC_VERSION 1 +#define VK_GOOGLE_DECORATE_STRING_EXTENSION_NAME "VK_GOOGLE_decorate_string" + + +#define VK_EXT_subgroup_size_control 1 +#define VK_EXT_SUBGROUP_SIZE_CONTROL_SPEC_VERSION 2 +#define VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME "VK_EXT_subgroup_size_control" +typedef struct VkPhysicalDeviceSubgroupSizeControlFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 subgroupSizeControl; + VkBool32 computeFullSubgroups; +} VkPhysicalDeviceSubgroupSizeControlFeaturesEXT; + +typedef struct VkPhysicalDeviceSubgroupSizeControlPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t minSubgroupSize; + uint32_t maxSubgroupSize; + uint32_t maxComputeWorkgroupSubgroups; + VkShaderStageFlags requiredSubgroupSizeStages; +} VkPhysicalDeviceSubgroupSizeControlPropertiesEXT; + +typedef struct VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT { + VkStructureType sType; + void* pNext; + uint32_t requiredSubgroupSize; +} VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT; + + + +#define VK_AMD_shader_core_properties2 1 +#define VK_AMD_SHADER_CORE_PROPERTIES_2_SPEC_VERSION 1 +#define VK_AMD_SHADER_CORE_PROPERTIES_2_EXTENSION_NAME "VK_AMD_shader_core_properties2" + +typedef enum VkShaderCorePropertiesFlagBitsAMD { + VK_SHADER_CORE_PROPERTIES_FLAG_BITS_MAX_ENUM_AMD = 0x7FFFFFFF +} VkShaderCorePropertiesFlagBitsAMD; +typedef VkFlags VkShaderCorePropertiesFlagsAMD; +typedef struct VkPhysicalDeviceShaderCoreProperties2AMD { + VkStructureType sType; + void* pNext; + VkShaderCorePropertiesFlagsAMD shaderCoreFeatures; + uint32_t activeComputeUnitCount; +} VkPhysicalDeviceShaderCoreProperties2AMD; + + + +#define VK_AMD_device_coherent_memory 1 +#define VK_AMD_DEVICE_COHERENT_MEMORY_SPEC_VERSION 1 +#define VK_AMD_DEVICE_COHERENT_MEMORY_EXTENSION_NAME "VK_AMD_device_coherent_memory" +typedef struct VkPhysicalDeviceCoherentMemoryFeaturesAMD { + VkStructureType sType; + void* pNext; + VkBool32 deviceCoherentMemory; +} VkPhysicalDeviceCoherentMemoryFeaturesAMD; + + + +#define VK_EXT_memory_budget 1 +#define VK_EXT_MEMORY_BUDGET_SPEC_VERSION 1 +#define VK_EXT_MEMORY_BUDGET_EXTENSION_NAME "VK_EXT_memory_budget" +typedef struct VkPhysicalDeviceMemoryBudgetPropertiesEXT { + VkStructureType sType; + void* pNext; + VkDeviceSize heapBudget[VK_MAX_MEMORY_HEAPS]; + VkDeviceSize heapUsage[VK_MAX_MEMORY_HEAPS]; +} VkPhysicalDeviceMemoryBudgetPropertiesEXT; + + + +#define VK_EXT_memory_priority 1 +#define VK_EXT_MEMORY_PRIORITY_SPEC_VERSION 1 +#define VK_EXT_MEMORY_PRIORITY_EXTENSION_NAME "VK_EXT_memory_priority" +typedef struct VkPhysicalDeviceMemoryPriorityFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 memoryPriority; +} VkPhysicalDeviceMemoryPriorityFeaturesEXT; + +typedef struct VkMemoryPriorityAllocateInfoEXT { + VkStructureType sType; + const void* pNext; + float priority; +} VkMemoryPriorityAllocateInfoEXT; + + + +#define VK_NV_dedicated_allocation_image_aliasing 1 +#define VK_NV_DEDICATED_ALLOCATION_IMAGE_ALIASING_SPEC_VERSION 1 +#define VK_NV_DEDICATED_ALLOCATION_IMAGE_ALIASING_EXTENSION_NAME "VK_NV_dedicated_allocation_image_aliasing" +typedef struct VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 dedicatedAllocationImageAliasing; +} VkPhysicalDeviceDedicatedAllocationImageAliasingFeaturesNV; + + + +#define VK_EXT_buffer_device_address 1 +#define VK_EXT_BUFFER_DEVICE_ADDRESS_SPEC_VERSION 2 +#define VK_EXT_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME "VK_EXT_buffer_device_address" +typedef struct VkPhysicalDeviceBufferDeviceAddressFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 bufferDeviceAddress; + VkBool32 bufferDeviceAddressCaptureReplay; + VkBool32 bufferDeviceAddressMultiDevice; +} VkPhysicalDeviceBufferDeviceAddressFeaturesEXT; + +typedef VkPhysicalDeviceBufferDeviceAddressFeaturesEXT VkPhysicalDeviceBufferAddressFeaturesEXT; + +typedef VkBufferDeviceAddressInfo VkBufferDeviceAddressInfoEXT; + +typedef struct VkBufferDeviceAddressCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkDeviceAddress deviceAddress; +} VkBufferDeviceAddressCreateInfoEXT; + +typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetBufferDeviceAddressEXT)(VkDevice device, const VkBufferDeviceAddressInfo* pInfo); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetBufferDeviceAddressEXT( + VkDevice device, + const VkBufferDeviceAddressInfo* pInfo); +#endif + + +#define VK_EXT_tooling_info 1 +#define VK_EXT_TOOLING_INFO_SPEC_VERSION 1 +#define VK_EXT_TOOLING_INFO_EXTENSION_NAME "VK_EXT_tooling_info" + +typedef enum VkToolPurposeFlagBitsEXT { + VK_TOOL_PURPOSE_VALIDATION_BIT_EXT = 0x00000001, + VK_TOOL_PURPOSE_PROFILING_BIT_EXT = 0x00000002, + VK_TOOL_PURPOSE_TRACING_BIT_EXT = 0x00000004, + VK_TOOL_PURPOSE_ADDITIONAL_FEATURES_BIT_EXT = 0x00000008, + VK_TOOL_PURPOSE_MODIFYING_FEATURES_BIT_EXT = 0x00000010, + VK_TOOL_PURPOSE_DEBUG_REPORTING_BIT_EXT = 0x00000020, + VK_TOOL_PURPOSE_DEBUG_MARKERS_BIT_EXT = 0x00000040, + VK_TOOL_PURPOSE_FLAG_BITS_MAX_ENUM_EXT = 0x7FFFFFFF +} VkToolPurposeFlagBitsEXT; +typedef VkFlags VkToolPurposeFlagsEXT; +typedef struct VkPhysicalDeviceToolPropertiesEXT { + VkStructureType sType; + void* pNext; + char name[VK_MAX_EXTENSION_NAME_SIZE]; + char version[VK_MAX_EXTENSION_NAME_SIZE]; + VkToolPurposeFlagsEXT purposes; + char description[VK_MAX_DESCRIPTION_SIZE]; + char layer[VK_MAX_EXTENSION_NAME_SIZE]; +} VkPhysicalDeviceToolPropertiesEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceToolPropertiesEXT)(VkPhysicalDevice physicalDevice, uint32_t* pToolCount, VkPhysicalDeviceToolPropertiesEXT* pToolProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceToolPropertiesEXT( + VkPhysicalDevice physicalDevice, + uint32_t* pToolCount, + VkPhysicalDeviceToolPropertiesEXT* pToolProperties); +#endif + + +#define VK_EXT_separate_stencil_usage 1 +#define VK_EXT_SEPARATE_STENCIL_USAGE_SPEC_VERSION 1 +#define VK_EXT_SEPARATE_STENCIL_USAGE_EXTENSION_NAME "VK_EXT_separate_stencil_usage" +typedef VkImageStencilUsageCreateInfo VkImageStencilUsageCreateInfoEXT; + + + +#define VK_EXT_validation_features 1 +#define VK_EXT_VALIDATION_FEATURES_SPEC_VERSION 2 +#define VK_EXT_VALIDATION_FEATURES_EXTENSION_NAME "VK_EXT_validation_features" + +typedef enum VkValidationFeatureEnableEXT { + VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT = 0, + VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_RESERVE_BINDING_SLOT_EXT = 1, + VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT = 2, + VK_VALIDATION_FEATURE_ENABLE_BEGIN_RANGE_EXT = VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT, + VK_VALIDATION_FEATURE_ENABLE_END_RANGE_EXT = VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT, + VK_VALIDATION_FEATURE_ENABLE_RANGE_SIZE_EXT = (VK_VALIDATION_FEATURE_ENABLE_BEST_PRACTICES_EXT - VK_VALIDATION_FEATURE_ENABLE_GPU_ASSISTED_EXT + 1), + VK_VALIDATION_FEATURE_ENABLE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationFeatureEnableEXT; + +typedef enum VkValidationFeatureDisableEXT { + VK_VALIDATION_FEATURE_DISABLE_ALL_EXT = 0, + VK_VALIDATION_FEATURE_DISABLE_SHADERS_EXT = 1, + VK_VALIDATION_FEATURE_DISABLE_THREAD_SAFETY_EXT = 2, + VK_VALIDATION_FEATURE_DISABLE_API_PARAMETERS_EXT = 3, + VK_VALIDATION_FEATURE_DISABLE_OBJECT_LIFETIMES_EXT = 4, + VK_VALIDATION_FEATURE_DISABLE_CORE_CHECKS_EXT = 5, + VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT = 6, + VK_VALIDATION_FEATURE_DISABLE_BEGIN_RANGE_EXT = VK_VALIDATION_FEATURE_DISABLE_ALL_EXT, + VK_VALIDATION_FEATURE_DISABLE_END_RANGE_EXT = VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT, + VK_VALIDATION_FEATURE_DISABLE_RANGE_SIZE_EXT = (VK_VALIDATION_FEATURE_DISABLE_UNIQUE_HANDLES_EXT - VK_VALIDATION_FEATURE_DISABLE_ALL_EXT + 1), + VK_VALIDATION_FEATURE_DISABLE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkValidationFeatureDisableEXT; +typedef struct VkValidationFeaturesEXT { + VkStructureType sType; + const void* pNext; + uint32_t enabledValidationFeatureCount; + const VkValidationFeatureEnableEXT* pEnabledValidationFeatures; + uint32_t disabledValidationFeatureCount; + const VkValidationFeatureDisableEXT* pDisabledValidationFeatures; +} VkValidationFeaturesEXT; + + + +#define VK_NV_cooperative_matrix 1 +#define VK_NV_COOPERATIVE_MATRIX_SPEC_VERSION 1 +#define VK_NV_COOPERATIVE_MATRIX_EXTENSION_NAME "VK_NV_cooperative_matrix" + +typedef enum VkComponentTypeNV { + VK_COMPONENT_TYPE_FLOAT16_NV = 0, + VK_COMPONENT_TYPE_FLOAT32_NV = 1, + VK_COMPONENT_TYPE_FLOAT64_NV = 2, + VK_COMPONENT_TYPE_SINT8_NV = 3, + VK_COMPONENT_TYPE_SINT16_NV = 4, + VK_COMPONENT_TYPE_SINT32_NV = 5, + VK_COMPONENT_TYPE_SINT64_NV = 6, + VK_COMPONENT_TYPE_UINT8_NV = 7, + VK_COMPONENT_TYPE_UINT16_NV = 8, + VK_COMPONENT_TYPE_UINT32_NV = 9, + VK_COMPONENT_TYPE_UINT64_NV = 10, + VK_COMPONENT_TYPE_BEGIN_RANGE_NV = VK_COMPONENT_TYPE_FLOAT16_NV, + VK_COMPONENT_TYPE_END_RANGE_NV = VK_COMPONENT_TYPE_UINT64_NV, + VK_COMPONENT_TYPE_RANGE_SIZE_NV = (VK_COMPONENT_TYPE_UINT64_NV - VK_COMPONENT_TYPE_FLOAT16_NV + 1), + VK_COMPONENT_TYPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkComponentTypeNV; + +typedef enum VkScopeNV { + VK_SCOPE_DEVICE_NV = 1, + VK_SCOPE_WORKGROUP_NV = 2, + VK_SCOPE_SUBGROUP_NV = 3, + VK_SCOPE_QUEUE_FAMILY_NV = 5, + VK_SCOPE_BEGIN_RANGE_NV = VK_SCOPE_DEVICE_NV, + VK_SCOPE_END_RANGE_NV = VK_SCOPE_QUEUE_FAMILY_NV, + VK_SCOPE_RANGE_SIZE_NV = (VK_SCOPE_QUEUE_FAMILY_NV - VK_SCOPE_DEVICE_NV + 1), + VK_SCOPE_MAX_ENUM_NV = 0x7FFFFFFF +} VkScopeNV; +typedef struct VkCooperativeMatrixPropertiesNV { + VkStructureType sType; + void* pNext; + uint32_t MSize; + uint32_t NSize; + uint32_t KSize; + VkComponentTypeNV AType; + VkComponentTypeNV BType; + VkComponentTypeNV CType; + VkComponentTypeNV DType; + VkScopeNV scope; +} VkCooperativeMatrixPropertiesNV; + +typedef struct VkPhysicalDeviceCooperativeMatrixFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 cooperativeMatrix; + VkBool32 cooperativeMatrixRobustBufferAccess; +} VkPhysicalDeviceCooperativeMatrixFeaturesNV; + +typedef struct VkPhysicalDeviceCooperativeMatrixPropertiesNV { + VkStructureType sType; + void* pNext; + VkShaderStageFlags cooperativeMatrixSupportedStages; +} VkPhysicalDeviceCooperativeMatrixPropertiesNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV)(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkCooperativeMatrixPropertiesNV* pProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceCooperativeMatrixPropertiesNV( + VkPhysicalDevice physicalDevice, + uint32_t* pPropertyCount, + VkCooperativeMatrixPropertiesNV* pProperties); +#endif + + +#define VK_NV_coverage_reduction_mode 1 +#define VK_NV_COVERAGE_REDUCTION_MODE_SPEC_VERSION 1 +#define VK_NV_COVERAGE_REDUCTION_MODE_EXTENSION_NAME "VK_NV_coverage_reduction_mode" + +typedef enum VkCoverageReductionModeNV { + VK_COVERAGE_REDUCTION_MODE_MERGE_NV = 0, + VK_COVERAGE_REDUCTION_MODE_TRUNCATE_NV = 1, + VK_COVERAGE_REDUCTION_MODE_BEGIN_RANGE_NV = VK_COVERAGE_REDUCTION_MODE_MERGE_NV, + VK_COVERAGE_REDUCTION_MODE_END_RANGE_NV = VK_COVERAGE_REDUCTION_MODE_TRUNCATE_NV, + VK_COVERAGE_REDUCTION_MODE_RANGE_SIZE_NV = (VK_COVERAGE_REDUCTION_MODE_TRUNCATE_NV - VK_COVERAGE_REDUCTION_MODE_MERGE_NV + 1), + VK_COVERAGE_REDUCTION_MODE_MAX_ENUM_NV = 0x7FFFFFFF +} VkCoverageReductionModeNV; +typedef VkFlags VkPipelineCoverageReductionStateCreateFlagsNV; +typedef struct VkPhysicalDeviceCoverageReductionModeFeaturesNV { + VkStructureType sType; + void* pNext; + VkBool32 coverageReductionMode; +} VkPhysicalDeviceCoverageReductionModeFeaturesNV; + +typedef struct VkPipelineCoverageReductionStateCreateInfoNV { + VkStructureType sType; + const void* pNext; + VkPipelineCoverageReductionStateCreateFlagsNV flags; + VkCoverageReductionModeNV coverageReductionMode; +} VkPipelineCoverageReductionStateCreateInfoNV; + +typedef struct VkFramebufferMixedSamplesCombinationNV { + VkStructureType sType; + void* pNext; + VkCoverageReductionModeNV coverageReductionMode; + VkSampleCountFlagBits rasterizationSamples; + VkSampleCountFlags depthStencilSamples; + VkSampleCountFlags colorSamples; +} VkFramebufferMixedSamplesCombinationNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV)(VkPhysicalDevice physicalDevice, uint32_t* pCombinationCount, VkFramebufferMixedSamplesCombinationNV* pCombinations); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV( + VkPhysicalDevice physicalDevice, + uint32_t* pCombinationCount, + VkFramebufferMixedSamplesCombinationNV* pCombinations); +#endif + + +#define VK_EXT_fragment_shader_interlock 1 +#define VK_EXT_FRAGMENT_SHADER_INTERLOCK_SPEC_VERSION 1 +#define VK_EXT_FRAGMENT_SHADER_INTERLOCK_EXTENSION_NAME "VK_EXT_fragment_shader_interlock" +typedef struct VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 fragmentShaderSampleInterlock; + VkBool32 fragmentShaderPixelInterlock; + VkBool32 fragmentShaderShadingRateInterlock; +} VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT; + + + +#define VK_EXT_ycbcr_image_arrays 1 +#define VK_EXT_YCBCR_IMAGE_ARRAYS_SPEC_VERSION 1 +#define VK_EXT_YCBCR_IMAGE_ARRAYS_EXTENSION_NAME "VK_EXT_ycbcr_image_arrays" +typedef struct VkPhysicalDeviceYcbcrImageArraysFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 ycbcrImageArrays; +} VkPhysicalDeviceYcbcrImageArraysFeaturesEXT; + + + +#define VK_EXT_headless_surface 1 +#define VK_EXT_HEADLESS_SURFACE_SPEC_VERSION 1 +#define VK_EXT_HEADLESS_SURFACE_EXTENSION_NAME "VK_EXT_headless_surface" +typedef VkFlags VkHeadlessSurfaceCreateFlagsEXT; +typedef struct VkHeadlessSurfaceCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkHeadlessSurfaceCreateFlagsEXT flags; +} VkHeadlessSurfaceCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateHeadlessSurfaceEXT)(VkInstance instance, const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateHeadlessSurfaceEXT( + VkInstance instance, + const VkHeadlessSurfaceCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + + +#define VK_EXT_line_rasterization 1 +#define VK_EXT_LINE_RASTERIZATION_SPEC_VERSION 1 +#define VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME "VK_EXT_line_rasterization" + +typedef enum VkLineRasterizationModeEXT { + VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT = 0, + VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT = 1, + VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT = 2, + VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT = 3, + VK_LINE_RASTERIZATION_MODE_BEGIN_RANGE_EXT = VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT, + VK_LINE_RASTERIZATION_MODE_END_RANGE_EXT = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT, + VK_LINE_RASTERIZATION_MODE_RANGE_SIZE_EXT = (VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT - VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT + 1), + VK_LINE_RASTERIZATION_MODE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkLineRasterizationModeEXT; +typedef struct VkPhysicalDeviceLineRasterizationFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 rectangularLines; + VkBool32 bresenhamLines; + VkBool32 smoothLines; + VkBool32 stippledRectangularLines; + VkBool32 stippledBresenhamLines; + VkBool32 stippledSmoothLines; +} VkPhysicalDeviceLineRasterizationFeaturesEXT; + +typedef struct VkPhysicalDeviceLineRasterizationPropertiesEXT { + VkStructureType sType; + void* pNext; + uint32_t lineSubPixelPrecisionBits; +} VkPhysicalDeviceLineRasterizationPropertiesEXT; + +typedef struct VkPipelineRasterizationLineStateCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkLineRasterizationModeEXT lineRasterizationMode; + VkBool32 stippledLineEnable; + uint32_t lineStippleFactor; + uint16_t lineStipplePattern; +} VkPipelineRasterizationLineStateCreateInfoEXT; + +typedef void (VKAPI_PTR *PFN_vkCmdSetLineStippleEXT)(VkCommandBuffer commandBuffer, uint32_t lineStippleFactor, uint16_t lineStipplePattern); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkCmdSetLineStippleEXT( + VkCommandBuffer commandBuffer, + uint32_t lineStippleFactor, + uint16_t lineStipplePattern); +#endif + + +#define VK_EXT_host_query_reset 1 +#define VK_EXT_HOST_QUERY_RESET_SPEC_VERSION 1 +#define VK_EXT_HOST_QUERY_RESET_EXTENSION_NAME "VK_EXT_host_query_reset" +typedef VkPhysicalDeviceHostQueryResetFeatures VkPhysicalDeviceHostQueryResetFeaturesEXT; + +typedef void (VKAPI_PTR *PFN_vkResetQueryPoolEXT)(VkDevice device, VkQueryPool queryPool, uint32_t firstQuery, uint32_t queryCount); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR void VKAPI_CALL vkResetQueryPoolEXT( + VkDevice device, + VkQueryPool queryPool, + uint32_t firstQuery, + uint32_t queryCount); +#endif + + +#define VK_EXT_index_type_uint8 1 +#define VK_EXT_INDEX_TYPE_UINT8_SPEC_VERSION 1 +#define VK_EXT_INDEX_TYPE_UINT8_EXTENSION_NAME "VK_EXT_index_type_uint8" +typedef struct VkPhysicalDeviceIndexTypeUint8FeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 indexTypeUint8; +} VkPhysicalDeviceIndexTypeUint8FeaturesEXT; + + + +#define VK_EXT_shader_demote_to_helper_invocation 1 +#define VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_SPEC_VERSION 1 +#define VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_EXTENSION_NAME "VK_EXT_shader_demote_to_helper_invocation" +typedef struct VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 shaderDemoteToHelperInvocation; +} VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT; + + + +#define VK_EXT_texel_buffer_alignment 1 +#define VK_EXT_TEXEL_BUFFER_ALIGNMENT_SPEC_VERSION 1 +#define VK_EXT_TEXEL_BUFFER_ALIGNMENT_EXTENSION_NAME "VK_EXT_texel_buffer_alignment" +typedef struct VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT { + VkStructureType sType; + void* pNext; + VkBool32 texelBufferAlignment; +} VkPhysicalDeviceTexelBufferAlignmentFeaturesEXT; + +typedef struct VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT { + VkStructureType sType; + void* pNext; + VkDeviceSize storageTexelBufferOffsetAlignmentBytes; + VkBool32 storageTexelBufferOffsetSingleTexelAlignment; + VkDeviceSize uniformTexelBufferOffsetAlignmentBytes; + VkBool32 uniformTexelBufferOffsetSingleTexelAlignment; +} VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT; + + + +#define VK_GOOGLE_user_type 1 +#define VK_GOOGLE_USER_TYPE_SPEC_VERSION 1 +#define VK_GOOGLE_USER_TYPE_EXTENSION_NAME "VK_GOOGLE_user_type" + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vma-rs/vulkan/vulkan_fuchsia.h b/vma-rs/vulkan/vulkan_fuchsia.h new file mode 100644 index 0000000..81ebe55 --- /dev/null +++ b/vma-rs/vulkan/vulkan_fuchsia.h @@ -0,0 +1,57 @@ +#ifndef VULKAN_FUCHSIA_H_ +#define VULKAN_FUCHSIA_H_ 1 + +/* +** Copyright (c) 2015-2019 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_FUCHSIA_imagepipe_surface 1 +#define VK_FUCHSIA_IMAGEPIPE_SURFACE_SPEC_VERSION 1 +#define VK_FUCHSIA_IMAGEPIPE_SURFACE_EXTENSION_NAME "VK_FUCHSIA_imagepipe_surface" +typedef VkFlags VkImagePipeSurfaceCreateFlagsFUCHSIA; +typedef struct VkImagePipeSurfaceCreateInfoFUCHSIA { + VkStructureType sType; + const void* pNext; + VkImagePipeSurfaceCreateFlagsFUCHSIA flags; + zx_handle_t imagePipeHandle; +} VkImagePipeSurfaceCreateInfoFUCHSIA; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateImagePipeSurfaceFUCHSIA)(VkInstance instance, const VkImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateImagePipeSurfaceFUCHSIA( + VkInstance instance, + const VkImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vma-rs/vulkan/vulkan_ggp.h b/vma-rs/vulkan/vulkan_ggp.h new file mode 100644 index 0000000..fd30613 --- /dev/null +++ b/vma-rs/vulkan/vulkan_ggp.h @@ -0,0 +1,68 @@ +#ifndef VULKAN_GGP_H_ +#define VULKAN_GGP_H_ 1 + +/* +** Copyright (c) 2015-2019 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_GGP_stream_descriptor_surface 1 +#define VK_GGP_STREAM_DESCRIPTOR_SURFACE_SPEC_VERSION 1 +#define VK_GGP_STREAM_DESCRIPTOR_SURFACE_EXTENSION_NAME "VK_GGP_stream_descriptor_surface" +typedef VkFlags VkStreamDescriptorSurfaceCreateFlagsGGP; +typedef struct VkStreamDescriptorSurfaceCreateInfoGGP { + VkStructureType sType; + const void* pNext; + VkStreamDescriptorSurfaceCreateFlagsGGP flags; + GgpStreamDescriptor streamDescriptor; +} VkStreamDescriptorSurfaceCreateInfoGGP; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateStreamDescriptorSurfaceGGP)(VkInstance instance, const VkStreamDescriptorSurfaceCreateInfoGGP* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateStreamDescriptorSurfaceGGP( + VkInstance instance, + const VkStreamDescriptorSurfaceCreateInfoGGP* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + + +#define VK_GGP_frame_token 1 +#define VK_GGP_FRAME_TOKEN_SPEC_VERSION 1 +#define VK_GGP_FRAME_TOKEN_EXTENSION_NAME "VK_GGP_frame_token" +typedef struct VkPresentFrameTokenGGP { + VkStructureType sType; + const void* pNext; + GgpFrameToken frameToken; +} VkPresentFrameTokenGGP; + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vma-rs/vulkan/vulkan_ios.h b/vma-rs/vulkan/vulkan_ios.h new file mode 100644 index 0000000..72ef1a8 --- /dev/null +++ b/vma-rs/vulkan/vulkan_ios.h @@ -0,0 +1,57 @@ +#ifndef VULKAN_IOS_H_ +#define VULKAN_IOS_H_ 1 + +/* +** Copyright (c) 2015-2019 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_MVK_ios_surface 1 +#define VK_MVK_IOS_SURFACE_SPEC_VERSION 2 +#define VK_MVK_IOS_SURFACE_EXTENSION_NAME "VK_MVK_ios_surface" +typedef VkFlags VkIOSSurfaceCreateFlagsMVK; +typedef struct VkIOSSurfaceCreateInfoMVK { + VkStructureType sType; + const void* pNext; + VkIOSSurfaceCreateFlagsMVK flags; + const void* pView; +} VkIOSSurfaceCreateInfoMVK; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateIOSSurfaceMVK)(VkInstance instance, const VkIOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateIOSSurfaceMVK( + VkInstance instance, + const VkIOSSurfaceCreateInfoMVK* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vma-rs/vulkan/vulkan_macos.h b/vma-rs/vulkan/vulkan_macos.h new file mode 100644 index 0000000..e6e5dea --- /dev/null +++ b/vma-rs/vulkan/vulkan_macos.h @@ -0,0 +1,57 @@ +#ifndef VULKAN_MACOS_H_ +#define VULKAN_MACOS_H_ 1 + +/* +** Copyright (c) 2015-2019 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_MVK_macos_surface 1 +#define VK_MVK_MACOS_SURFACE_SPEC_VERSION 2 +#define VK_MVK_MACOS_SURFACE_EXTENSION_NAME "VK_MVK_macos_surface" +typedef VkFlags VkMacOSSurfaceCreateFlagsMVK; +typedef struct VkMacOSSurfaceCreateInfoMVK { + VkStructureType sType; + const void* pNext; + VkMacOSSurfaceCreateFlagsMVK flags; + const void* pView; +} VkMacOSSurfaceCreateInfoMVK; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateMacOSSurfaceMVK)(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateMacOSSurfaceMVK( + VkInstance instance, + const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vma-rs/vulkan/vulkan_metal.h b/vma-rs/vulkan/vulkan_metal.h new file mode 100644 index 0000000..3dec68c --- /dev/null +++ b/vma-rs/vulkan/vulkan_metal.h @@ -0,0 +1,64 @@ +#ifndef VULKAN_METAL_H_ +#define VULKAN_METAL_H_ 1 + +/* +** Copyright (c) 2015-2019 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_EXT_metal_surface 1 + +#ifdef __OBJC__ +@class CAMetalLayer; +#else +typedef void CAMetalLayer; +#endif + +#define VK_EXT_METAL_SURFACE_SPEC_VERSION 1 +#define VK_EXT_METAL_SURFACE_EXTENSION_NAME "VK_EXT_metal_surface" +typedef VkFlags VkMetalSurfaceCreateFlagsEXT; +typedef struct VkMetalSurfaceCreateInfoEXT { + VkStructureType sType; + const void* pNext; + VkMetalSurfaceCreateFlagsEXT flags; + const CAMetalLayer* pLayer; +} VkMetalSurfaceCreateInfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateMetalSurfaceEXT)(VkInstance instance, const VkMetalSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateMetalSurfaceEXT( + VkInstance instance, + const VkMetalSurfaceCreateInfoEXT* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vma-rs/vulkan/vulkan_vi.h b/vma-rs/vulkan/vulkan_vi.h new file mode 100644 index 0000000..6fb66f9 --- /dev/null +++ b/vma-rs/vulkan/vulkan_vi.h @@ -0,0 +1,57 @@ +#ifndef VULKAN_VI_H_ +#define VULKAN_VI_H_ 1 + +/* +** Copyright (c) 2015-2019 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_NN_vi_surface 1 +#define VK_NN_VI_SURFACE_SPEC_VERSION 1 +#define VK_NN_VI_SURFACE_EXTENSION_NAME "VK_NN_vi_surface" +typedef VkFlags VkViSurfaceCreateFlagsNN; +typedef struct VkViSurfaceCreateInfoNN { + VkStructureType sType; + const void* pNext; + VkViSurfaceCreateFlagsNN flags; + void* window; +} VkViSurfaceCreateInfoNN; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateViSurfaceNN)(VkInstance instance, const VkViSurfaceCreateInfoNN* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateViSurfaceNN( + VkInstance instance, + const VkViSurfaceCreateInfoNN* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vma-rs/vulkan/vulkan_wayland.h b/vma-rs/vulkan/vulkan_wayland.h new file mode 100644 index 0000000..599d05b --- /dev/null +++ b/vma-rs/vulkan/vulkan_wayland.h @@ -0,0 +1,64 @@ +#ifndef VULKAN_WAYLAND_H_ +#define VULKAN_WAYLAND_H_ 1 + +/* +** Copyright (c) 2015-2019 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_KHR_wayland_surface 1 +#define VK_KHR_WAYLAND_SURFACE_SPEC_VERSION 6 +#define VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME "VK_KHR_wayland_surface" +typedef VkFlags VkWaylandSurfaceCreateFlagsKHR; +typedef struct VkWaylandSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkWaylandSurfaceCreateFlagsKHR flags; + struct wl_display* display; + struct wl_surface* surface; +} VkWaylandSurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateWaylandSurfaceKHR)(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, struct wl_display* display); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateWaylandSurfaceKHR( + VkInstance instance, + const VkWaylandSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWaylandPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + struct wl_display* display); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vma-rs/vulkan/vulkan_win32.h b/vma-rs/vulkan/vulkan_win32.h new file mode 100644 index 0000000..20a1dc0 --- /dev/null +++ b/vma-rs/vulkan/vulkan_win32.h @@ -0,0 +1,328 @@ +#ifndef VULKAN_WIN32_H_ +#define VULKAN_WIN32_H_ 1 + +/* +** Copyright (c) 2015-2019 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_KHR_win32_surface 1 +#define VK_KHR_WIN32_SURFACE_SPEC_VERSION 6 +#define VK_KHR_WIN32_SURFACE_EXTENSION_NAME "VK_KHR_win32_surface" +typedef VkFlags VkWin32SurfaceCreateFlagsKHR; +typedef struct VkWin32SurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkWin32SurfaceCreateFlagsKHR flags; + HINSTANCE hinstance; + HWND hwnd; +} VkWin32SurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateWin32SurfaceKHR)(VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR( + VkInstance instance, + const VkWin32SurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWin32PresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex); +#endif + + +#define VK_KHR_external_memory_win32 1 +#define VK_KHR_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_KHR_external_memory_win32" +typedef struct VkImportMemoryWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagBits handleType; + HANDLE handle; + LPCWSTR name; +} VkImportMemoryWin32HandleInfoKHR; + +typedef struct VkExportMemoryWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; +} VkExportMemoryWin32HandleInfoKHR; + +typedef struct VkMemoryWin32HandlePropertiesKHR { + VkStructureType sType; + void* pNext; + uint32_t memoryTypeBits; +} VkMemoryWin32HandlePropertiesKHR; + +typedef struct VkMemoryGetWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkDeviceMemory memory; + VkExternalMemoryHandleTypeFlagBits handleType; +} VkMemoryGetWin32HandleInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleKHR)(VkDevice device, const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandlePropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleKHR( + VkDevice device, + const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandlePropertiesKHR( + VkDevice device, + VkExternalMemoryHandleTypeFlagBits handleType, + HANDLE handle, + VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties); +#endif + + +#define VK_KHR_win32_keyed_mutex 1 +#define VK_KHR_WIN32_KEYED_MUTEX_SPEC_VERSION 1 +#define VK_KHR_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_KHR_win32_keyed_mutex" +typedef struct VkWin32KeyedMutexAcquireReleaseInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t acquireCount; + const VkDeviceMemory* pAcquireSyncs; + const uint64_t* pAcquireKeys; + const uint32_t* pAcquireTimeouts; + uint32_t releaseCount; + const VkDeviceMemory* pReleaseSyncs; + const uint64_t* pReleaseKeys; +} VkWin32KeyedMutexAcquireReleaseInfoKHR; + + + +#define VK_KHR_external_semaphore_win32 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME "VK_KHR_external_semaphore_win32" +typedef struct VkImportSemaphoreWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkSemaphoreImportFlags flags; + VkExternalSemaphoreHandleTypeFlagBits handleType; + HANDLE handle; + LPCWSTR name; +} VkImportSemaphoreWin32HandleInfoKHR; + +typedef struct VkExportSemaphoreWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; +} VkExportSemaphoreWin32HandleInfoKHR; + +typedef struct VkD3D12FenceSubmitInfoKHR { + VkStructureType sType; + const void* pNext; + uint32_t waitSemaphoreValuesCount; + const uint64_t* pWaitSemaphoreValues; + uint32_t signalSemaphoreValuesCount; + const uint64_t* pSignalSemaphoreValues; +} VkD3D12FenceSubmitInfoKHR; + +typedef struct VkSemaphoreGetWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkSemaphore semaphore; + VkExternalSemaphoreHandleTypeFlagBits handleType; +} VkSemaphoreGetWin32HandleInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreWin32HandleKHR)(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreWin32HandleKHR)(VkDevice device, const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreWin32HandleKHR( + VkDevice device, + const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreWin32HandleKHR( + VkDevice device, + const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle); +#endif + + +#define VK_KHR_external_fence_win32 1 +#define VK_KHR_EXTERNAL_FENCE_WIN32_SPEC_VERSION 1 +#define VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME "VK_KHR_external_fence_win32" +typedef struct VkImportFenceWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkFenceImportFlags flags; + VkExternalFenceHandleTypeFlagBits handleType; + HANDLE handle; + LPCWSTR name; +} VkImportFenceWin32HandleInfoKHR; + +typedef struct VkExportFenceWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; + LPCWSTR name; +} VkExportFenceWin32HandleInfoKHR; + +typedef struct VkFenceGetWin32HandleInfoKHR { + VkStructureType sType; + const void* pNext; + VkFence fence; + VkExternalFenceHandleTypeFlagBits handleType; +} VkFenceGetWin32HandleInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkImportFenceWin32HandleKHR)(VkDevice device, const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo); +typedef VkResult (VKAPI_PTR *PFN_vkGetFenceWin32HandleKHR)(VkDevice device, const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkImportFenceWin32HandleKHR( + VkDevice device, + const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceWin32HandleKHR( + VkDevice device, + const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, + HANDLE* pHandle); +#endif + + +#define VK_NV_external_memory_win32 1 +#define VK_NV_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1 +#define VK_NV_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_NV_external_memory_win32" +typedef struct VkImportMemoryWin32HandleInfoNV { + VkStructureType sType; + const void* pNext; + VkExternalMemoryHandleTypeFlagsNV handleType; + HANDLE handle; +} VkImportMemoryWin32HandleInfoNV; + +typedef struct VkExportMemoryWin32HandleInfoNV { + VkStructureType sType; + const void* pNext; + const SECURITY_ATTRIBUTES* pAttributes; + DWORD dwAccess; +} VkExportMemoryWin32HandleInfoNV; + +typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleNV)(VkDevice device, VkDeviceMemory memory, VkExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleNV( + VkDevice device, + VkDeviceMemory memory, + VkExternalMemoryHandleTypeFlagsNV handleType, + HANDLE* pHandle); +#endif + + +#define VK_NV_win32_keyed_mutex 1 +#define VK_NV_WIN32_KEYED_MUTEX_SPEC_VERSION 2 +#define VK_NV_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_NV_win32_keyed_mutex" +typedef struct VkWin32KeyedMutexAcquireReleaseInfoNV { + VkStructureType sType; + const void* pNext; + uint32_t acquireCount; + const VkDeviceMemory* pAcquireSyncs; + const uint64_t* pAcquireKeys; + const uint32_t* pAcquireTimeoutMilliseconds; + uint32_t releaseCount; + const VkDeviceMemory* pReleaseSyncs; + const uint64_t* pReleaseKeys; +} VkWin32KeyedMutexAcquireReleaseInfoNV; + + + +#define VK_EXT_full_screen_exclusive 1 +#define VK_EXT_FULL_SCREEN_EXCLUSIVE_SPEC_VERSION 4 +#define VK_EXT_FULL_SCREEN_EXCLUSIVE_EXTENSION_NAME "VK_EXT_full_screen_exclusive" + +typedef enum VkFullScreenExclusiveEXT { + VK_FULL_SCREEN_EXCLUSIVE_DEFAULT_EXT = 0, + VK_FULL_SCREEN_EXCLUSIVE_ALLOWED_EXT = 1, + VK_FULL_SCREEN_EXCLUSIVE_DISALLOWED_EXT = 2, + VK_FULL_SCREEN_EXCLUSIVE_APPLICATION_CONTROLLED_EXT = 3, + VK_FULL_SCREEN_EXCLUSIVE_BEGIN_RANGE_EXT = VK_FULL_SCREEN_EXCLUSIVE_DEFAULT_EXT, + VK_FULL_SCREEN_EXCLUSIVE_END_RANGE_EXT = VK_FULL_SCREEN_EXCLUSIVE_APPLICATION_CONTROLLED_EXT, + VK_FULL_SCREEN_EXCLUSIVE_RANGE_SIZE_EXT = (VK_FULL_SCREEN_EXCLUSIVE_APPLICATION_CONTROLLED_EXT - VK_FULL_SCREEN_EXCLUSIVE_DEFAULT_EXT + 1), + VK_FULL_SCREEN_EXCLUSIVE_MAX_ENUM_EXT = 0x7FFFFFFF +} VkFullScreenExclusiveEXT; +typedef struct VkSurfaceFullScreenExclusiveInfoEXT { + VkStructureType sType; + void* pNext; + VkFullScreenExclusiveEXT fullScreenExclusive; +} VkSurfaceFullScreenExclusiveInfoEXT; + +typedef struct VkSurfaceCapabilitiesFullScreenExclusiveEXT { + VkStructureType sType; + void* pNext; + VkBool32 fullScreenExclusiveSupported; +} VkSurfaceCapabilitiesFullScreenExclusiveEXT; + +typedef struct VkSurfaceFullScreenExclusiveWin32InfoEXT { + VkStructureType sType; + const void* pNext; + HMONITOR hmonitor; +} VkSurfaceFullScreenExclusiveWin32InfoEXT; + +typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes); +typedef VkResult (VKAPI_PTR *PFN_vkAcquireFullScreenExclusiveModeEXT)(VkDevice device, VkSwapchainKHR swapchain); +typedef VkResult (VKAPI_PTR *PFN_vkReleaseFullScreenExclusiveModeEXT)(VkDevice device, VkSwapchainKHR swapchain); +typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupSurfacePresentModes2EXT)(VkDevice device, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkDeviceGroupPresentModeFlagsKHR* pModes); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModes2EXT( + VkPhysicalDevice physicalDevice, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + uint32_t* pPresentModeCount, + VkPresentModeKHR* pPresentModes); + +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireFullScreenExclusiveModeEXT( + VkDevice device, + VkSwapchainKHR swapchain); + +VKAPI_ATTR VkResult VKAPI_CALL vkReleaseFullScreenExclusiveModeEXT( + VkDevice device, + VkSwapchainKHR swapchain); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModes2EXT( + VkDevice device, + const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, + VkDeviceGroupPresentModeFlagsKHR* pModes); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vma-rs/vulkan/vulkan_xcb.h b/vma-rs/vulkan/vulkan_xcb.h new file mode 100644 index 0000000..4cc0bc0 --- /dev/null +++ b/vma-rs/vulkan/vulkan_xcb.h @@ -0,0 +1,65 @@ +#ifndef VULKAN_XCB_H_ +#define VULKAN_XCB_H_ 1 + +/* +** Copyright (c) 2015-2019 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_KHR_xcb_surface 1 +#define VK_KHR_XCB_SURFACE_SPEC_VERSION 6 +#define VK_KHR_XCB_SURFACE_EXTENSION_NAME "VK_KHR_xcb_surface" +typedef VkFlags VkXcbSurfaceCreateFlagsKHR; +typedef struct VkXcbSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkXcbSurfaceCreateFlagsKHR flags; + xcb_connection_t* connection; + xcb_window_t window; +} VkXcbSurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateXcbSurfaceKHR)(VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR( + VkInstance instance, + const VkXcbSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXcbPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + xcb_connection_t* connection, + xcb_visualid_t visual_id); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vma-rs/vulkan/vulkan_xlib.h b/vma-rs/vulkan/vulkan_xlib.h new file mode 100644 index 0000000..ee2b48a --- /dev/null +++ b/vma-rs/vulkan/vulkan_xlib.h @@ -0,0 +1,65 @@ +#ifndef VULKAN_XLIB_H_ +#define VULKAN_XLIB_H_ 1 + +/* +** Copyright (c) 2015-2019 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_KHR_xlib_surface 1 +#define VK_KHR_XLIB_SURFACE_SPEC_VERSION 6 +#define VK_KHR_XLIB_SURFACE_EXTENSION_NAME "VK_KHR_xlib_surface" +typedef VkFlags VkXlibSurfaceCreateFlagsKHR; +typedef struct VkXlibSurfaceCreateInfoKHR { + VkStructureType sType; + const void* pNext; + VkXlibSurfaceCreateFlagsKHR flags; + Display* dpy; + Window window; +} VkXlibSurfaceCreateInfoKHR; + +typedef VkResult (VKAPI_PTR *PFN_vkCreateXlibSurfaceKHR)(VkInstance instance, const VkXlibSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface); +typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, Display* dpy, VisualID visualID); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkCreateXlibSurfaceKHR( + VkInstance instance, + const VkXlibSurfaceCreateInfoKHR* pCreateInfo, + const VkAllocationCallbacks* pAllocator, + VkSurfaceKHR* pSurface); + +VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXlibPresentationSupportKHR( + VkPhysicalDevice physicalDevice, + uint32_t queueFamilyIndex, + Display* dpy, + VisualID visualID); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vma-rs/vulkan/vulkan_xlib_xrandr.h b/vma-rs/vulkan/vulkan_xlib_xrandr.h new file mode 100644 index 0000000..08c4fd7 --- /dev/null +++ b/vma-rs/vulkan/vulkan_xlib_xrandr.h @@ -0,0 +1,55 @@ +#ifndef VULKAN_XLIB_XRANDR_H_ +#define VULKAN_XLIB_XRANDR_H_ 1 + +/* +** Copyright (c) 2015-2019 The Khronos Group Inc. +** +** Licensed under the Apache License, Version 2.0 (the "License"); +** you may not use this file except in compliance with the License. +** You may obtain a copy of the License at +** +** http://www.apache.org/licenses/LICENSE-2.0 +** +** Unless required by applicable law or agreed to in writing, software +** distributed under the License is distributed on an "AS IS" BASIS, +** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +** See the License for the specific language governing permissions and +** limitations under the License. +*/ + +/* +** This header is generated from the Khronos Vulkan XML API Registry. +** +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define VK_EXT_acquire_xlib_display 1 +#define VK_EXT_ACQUIRE_XLIB_DISPLAY_SPEC_VERSION 1 +#define VK_EXT_ACQUIRE_XLIB_DISPLAY_EXTENSION_NAME "VK_EXT_acquire_xlib_display" +typedef VkResult (VKAPI_PTR *PFN_vkAcquireXlibDisplayEXT)(VkPhysicalDevice physicalDevice, Display* dpy, VkDisplayKHR display); +typedef VkResult (VKAPI_PTR *PFN_vkGetRandROutputDisplayEXT)(VkPhysicalDevice physicalDevice, Display* dpy, RROutput rrOutput, VkDisplayKHR* pDisplay); + +#ifndef VK_NO_PROTOTYPES +VKAPI_ATTR VkResult VKAPI_CALL vkAcquireXlibDisplayEXT( + VkPhysicalDevice physicalDevice, + Display* dpy, + VkDisplayKHR display); + +VKAPI_ATTR VkResult VKAPI_CALL vkGetRandROutputDisplayEXT( + VkPhysicalDevice physicalDevice, + Display* dpy, + RROutput rrOutput, + VkDisplayKHR* pDisplay); +#endif + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/vulkan-rs/.vscode/settings.json b/vulkan-rs/.vscode/settings.json new file mode 100644 index 0000000..772d8cc --- /dev/null +++ b/vulkan-rs/.vscode/settings.json @@ -0,0 +1,7 @@ +{ + "workbench.colorCustomizations": { + "activityBar.background": "#3F214A", + "titleBar.activeBackground": "#592F68", + "titleBar.activeForeground": "#FDFCFE" + } +} \ No newline at end of file diff --git a/vulkan-rs/Cargo.toml b/vulkan-rs/Cargo.toml new file mode 100644 index 0000000..98bb031 --- /dev/null +++ b/vulkan-rs/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "vulkan-rs" +version = "0.1.0" +authors = ["hodasemi "] +edition = "2021" + +[dependencies] +image = "0.24.5" +vulkan-sys = { path = "../vulkan-sys" } +vma-rs = { path = "../vma-rs" } +anyhow = { version = "1.0.68", features = ["backtrace"] } +cgmath = "0.18.0" +assetpath = { path = "../assetpath" } diff --git a/vulkan-rs/src/acceleration_structure.rs b/vulkan-rs/src/acceleration_structure.rs new file mode 100644 index 0000000..890971b --- /dev/null +++ b/vulkan-rs/src/acceleration_structure.rs @@ -0,0 +1,451 @@ +use crate::prelude::*; + +use anyhow::Result; + +use cgmath::{Matrix, Matrix4, One}; + +use core::slice; +use std::{ + mem, + sync::{Arc, Mutex}, +}; + +enum AccelerationStructureBuilderData { + TopLevel(Vec), + BottomLevel(Vec, Vec), +} + +pub struct AccelerationStructureBuilder { + flags: VkAccelerationStructureCreateFlagBitsKHR, + capture_replay_address: Option, + + data: AccelerationStructureBuilderData, +} + +impl AccelerationStructureBuilder { + pub fn add_instance( + mut self, + blas: &Arc, + transform: Option>, + instance_flags: impl Into, + ) -> Self { + match &mut self.data { + AccelerationStructureBuilderData::TopLevel(instances) => { + let transposed: [[f32; 4]; 4] = match transform { + Some(transform) => transform.transpose(), + None => Matrix4::one(), + } + .into(); + + let instance = VkAccelerationStructureInstanceKHR::new( + VkTransformMatrixKHR::from(transposed), + instances.len() as u32, + 0xFF, + 0, + instance_flags, + blas.address(), + ); + + instances.push(instance); + } + AccelerationStructureBuilderData::BottomLevel(_, _) => { + panic!("can not add acceleration structures to bottom level as") + } + } + + self + } + + pub fn add_vertices( + mut self, + vertex_buffer: &Arc>, + transform: Option>>>, + flags: impl Into, + ) -> Self { + match &mut self.data { + AccelerationStructureBuilderData::TopLevel(_) => { + panic!("can not add buffers to top level as") + } + AccelerationStructureBuilderData::BottomLevel(geometries, primitive_counts) => { + let geometry = VkAccelerationStructureGeometryKHR::new( + VK_GEOMETRY_TYPE_TRIANGLES_KHR, + VkAccelerationStructureGeometryDataKHR::from( + VkAccelerationStructureGeometryTrianglesDataKHR::new( + VK_FORMAT_R32G32B32_SFLOAT, + vertex_buffer.device_address().into(), + mem::size_of::() as VkDeviceSize, + vertex_buffer.size() as u32, + VK_INDEX_TYPE_NONE_KHR, + VkDeviceOrHostAddressConstKHR::null(), + match &transform { + Some(transform_buffer) => transform_buffer.device_address().into(), + None => VkDeviceOrHostAddressConstKHR::null(), + }, + ), + ), + flags, + ); + + let primitive_count = (vertex_buffer.size() / 3) as u32; + + geometries.push(geometry); + primitive_counts.push(primitive_count); + } + } + + self + } + + pub fn set_flags(mut self, flags: impl Into) -> Self { + self.flags = flags.into(); + + self + } + + pub fn set_replay_address(mut self, capture_replay_address: VkDeviceAddress) -> Self { + self.capture_replay_address = Some(capture_replay_address); + + self + } + + pub fn build( + self, + device: Arc, + recorder: &mut CommandBufferRecorder<'_>, + ) -> Result> { + let build_flags = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR + | VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR; + + let (acceleration_structure, generation_data, result_buffer, build_sizes_info) = + match self.data { + AccelerationStructureBuilderData::TopLevel(instances) => { + let instances_buffer = Buffer::builder() + .set_data(&instances) + .set_usage( + VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR + | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, + ) + .set_memory_usage(MemoryUsage::CpuToGpu) + .build(device.clone())?; + + let device_address: VkDeviceOrHostAddressConstKHR = + instances_buffer.device_address().into(); + + let geometry = VkAccelerationStructureGeometryKHR::new( + VK_GEOMETRY_TYPE_INSTANCES_KHR, + VkAccelerationStructureGeometryDataKHR::from( + VkAccelerationStructureGeometryInstancesDataKHR::from(device_address), + ), + VK_GEOMETRY_OPAQUE_BIT_KHR, + ); + + let mut geometry_info = VkAccelerationStructureBuildGeometryInfoKHR::minimal( + VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, + build_flags, + VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR, + ); + + geometry_info.set_geometry(slice::from_ref(&geometry)); + + let max_primitive_counts = 1; + + let build_sizes_info = device.get_acceleration_structure_build_sizes( + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR, + &geometry_info, + &max_primitive_counts, + ); + + let result_buffer = Buffer::builder() + .set_usage(VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR) + .set_memory_usage(MemoryUsage::GpuOnly) + .set_size(build_sizes_info.accelerationStructureSize) + .build(device.clone())?; + + let as_ci = VkAccelerationStructureCreateInfoKHR::new( + self.flags, + result_buffer.vk_handle(), + 0, + build_sizes_info.accelerationStructureSize, + VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, + self.capture_replay_address.unwrap_or(0), + ); + + ( + device.create_acceleration_structure(&as_ci, None)?, + AccelerationStructureGenerationData::from(( + instances, + geometry, + instances_buffer, + )), + result_buffer, + build_sizes_info, + ) + } + AccelerationStructureBuilderData::BottomLevel(geometries, primitive_counts) => { + let mut geometry_info = VkAccelerationStructureBuildGeometryInfoKHR::minimal( + VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, + build_flags, + VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR, + ); + + geometry_info.set_geometry(geometries.as_slice()); + + let max_primitive_counts = *primitive_counts + .iter() + .max() + .expect("empty primitive counts"); + + let build_sizes_info = device.get_acceleration_structure_build_sizes( + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR, + &geometry_info, + &max_primitive_counts, + ); + + let result_buffer = Buffer::builder() + .set_usage(VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR) + .set_memory_usage(MemoryUsage::GpuOnly) + .set_size(build_sizes_info.accelerationStructureSize) + .build(device.clone())?; + + let as_ci = VkAccelerationStructureCreateInfoKHR::new( + self.flags, + result_buffer.vk_handle(), + 0, + build_sizes_info.accelerationStructureSize, + VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, + self.capture_replay_address.unwrap_or(0), + ); + + ( + device.create_acceleration_structure(&as_ci, None)?, + AccelerationStructureGenerationData::from((geometries, primitive_counts)), + result_buffer, + build_sizes_info, + ) + } + }; + + let acceleration_structure = Arc::new(AccelerationStructure { + device: device.clone(), + + acceleration_structure, + + result_buffer, + scratch_buffer: Mutex::new(AccelerationStructure::create_scratch_buffer( + &device, + build_sizes_info.buildScratchSize, + device + .physical_device() + .acceleration_structure_properties() + .minAccelerationStructureScratchOffsetAlignment as VkDeviceSize, + )?), + + update_scratch_buffer_size: build_sizes_info.updateScratchSize, + + generation_data, + build_flags, + }); + + acceleration_structure.generate( + recorder, + VkAccelerationStructureKHR::NULL_HANDLE, + VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR, + )?; + + Ok(acceleration_structure) + } +} + +pub struct AccelerationStructure { + device: Arc, + + acceleration_structure: VkAccelerationStructureKHR, + + result_buffer: Arc>, + scratch_buffer: Mutex>>, + + update_scratch_buffer_size: VkDeviceSize, + + generation_data: AccelerationStructureGenerationData, + build_flags: VkBuildAccelerationStructureFlagBitsKHR, +} + +impl AccelerationStructure { + pub fn bottom_level() -> AccelerationStructureBuilder { + AccelerationStructureBuilder { + flags: 0.into(), + capture_replay_address: None, + + data: AccelerationStructureBuilderData::BottomLevel(Vec::new(), Vec::new()), + } + } + + pub fn top_level() -> AccelerationStructureBuilder { + AccelerationStructureBuilder { + flags: 0.into(), + capture_replay_address: None, + + data: AccelerationStructureBuilderData::TopLevel(Vec::new()), + } + } + + pub fn result_buffer(&self) -> &Arc> { + &self.result_buffer + } + + fn address(&self) -> VkDeviceAddress { + self.device.get_acceleration_structure_device_address( + &VkAccelerationStructureDeviceAddressInfoKHR::new(self.acceleration_structure), + ) + } + + pub fn update(&self, buffer_recorder: &mut CommandBufferRecorder<'_>) -> Result<()> { + *self.scratch_buffer.lock().unwrap() = Self::create_scratch_buffer( + &self.device, + self.update_scratch_buffer_size, + self.device + .physical_device() + .acceleration_structure_properties() + .minAccelerationStructureScratchOffsetAlignment as VkDeviceSize, + )?; + + self.generate( + buffer_recorder, + self.acceleration_structure, + VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR, + ) + } + + fn generate( + &self, + buffer_recorder: &mut CommandBufferRecorder<'_>, + src: VkAccelerationStructureKHR, + mode: VkBuildAccelerationStructureModeKHR, + ) -> Result<()> { + match &self.generation_data { + AccelerationStructureGenerationData::TopLevel(instances, geometry, _buffer) => { + let mut build_info = VkAccelerationStructureBuildGeometryInfoKHR::new( + VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR, + self.build_flags, + mode, + src, + self.acceleration_structure, + self.scratch_buffer.lock().unwrap().device_address().into(), + ); + + let geometry_slice: &[&VkAccelerationStructureGeometryKHR] = &[geometry]; + + build_info.set_geometry(geometry_slice); + + buffer_recorder.build_acceleration_structures( + &[build_info], + &[&[VkAccelerationStructureBuildRangeInfoKHR::new( + instances.len() as u32, + 0, + 0, + 0, + )]], + ); + } + AccelerationStructureGenerationData::BottomLevel(geometries, range_infos) => { + let mut build_info = VkAccelerationStructureBuildGeometryInfoKHR::new( + VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR, + self.build_flags, + mode, + src, + self.acceleration_structure, + self.scratch_buffer.lock().unwrap().device_address().into(), + ); + + build_info.set_geometry(geometries.as_slice()); + + buffer_recorder + .build_acceleration_structures(&[build_info], &[range_infos.as_slice()]); + } + }; + + buffer_recorder.memory_barrier( + VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR, + VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, + VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR, + VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR, + ); + + Ok(()) + } + + #[inline] + fn create_scratch_buffer( + device: &Arc, + size: VkDeviceSize, + alignment: VkDeviceSize, + ) -> Result>> { + Buffer::builder() + .set_usage( + VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, + ) + .set_memory_usage(MemoryUsage::GpuOnly) + .set_size(size) + .force_alignment(alignment) + .build(device.clone()) + } +} + +impl Drop for AccelerationStructure { + fn drop(&mut self) { + self.device + .destroy_acceleration_structure(self.acceleration_structure, None); + } +} + +impl_vk_handle!( + AccelerationStructure, + VkAccelerationStructureKHR, + acceleration_structure +); + +enum AccelerationStructureGenerationData { + TopLevel( + Vec, + VkAccelerationStructureGeometryKHR, + Arc>, + ), + BottomLevel( + Vec, + Vec, + ), +} + +impl + From<( + Vec, + VkAccelerationStructureGeometryKHR, + Arc>, + )> for AccelerationStructureGenerationData +{ + fn from( + (instances, geometry, buffer): ( + Vec, + VkAccelerationStructureGeometryKHR, + Arc>, + ), + ) -> Self { + Self::TopLevel(instances, geometry, buffer) + } +} + +impl From<(Vec, Vec)> + for AccelerationStructureGenerationData +{ + fn from( + (geometries, primitive_counts): (Vec, Vec), + ) -> Self { + Self::BottomLevel( + geometries, + primitive_counts + .iter() + .map(|&count| VkAccelerationStructureBuildRangeInfoKHR::new(count, 0, 0, 0)) + .collect(), + ) + } +} diff --git a/vulkan-rs/src/address.rs b/vulkan-rs/src/address.rs new file mode 100644 index 0000000..faae62f --- /dev/null +++ b/vulkan-rs/src/address.rs @@ -0,0 +1,157 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std::fmt; + +#[derive(Clone)] +pub enum Address { + DeviceAddress(VkDeviceAddress), + HostAddressMut(VkDeviceOrHostAddressKHR), + HostAddressConst(VkDeviceOrHostAddressConstKHR), +} + +impl Address { + #[inline] + pub fn device_address_mut(&self) -> Result { + match self { + Self::DeviceAddress(address) => Ok(VkDeviceOrHostAddressKHR::from(*address)), + Self::HostAddressMut(_) => Err(anyhow::Error::msg( + "Wrong device address format. Expected: DeviceAddress found: HostAddressMut", + )), + Self::HostAddressConst(_) => Err(anyhow::Error::msg( + "Wrong device address format. Expected: DeviceAddress found: HostAddressConst", + )), + } + } + + #[inline] + pub fn device_address_const(&self) -> Result { + match self { + Self::DeviceAddress(address) => Ok(VkDeviceOrHostAddressConstKHR::from(*address)), + Self::HostAddressMut(_) => Err(anyhow::Error::msg( + "Wrong device address format. Expected: DeviceAddress found: HostAddressMut", + )), + Self::HostAddressConst(_) => Err(anyhow::Error::msg( + "Wrong device address format. Expected: DeviceAddress found: HostAddressConst", + )), + } + } + + #[inline] + pub fn host_address_mut(&self) -> Result { + match self { + Self::DeviceAddress(_) => Err(anyhow::Error::msg( + "Wrong device address format. Expected: HostAddressMut found: DeviceAddress", + )), + Self::HostAddressMut(address) => Ok(*address), + Self::HostAddressConst(_) => Err(anyhow::Error::msg( + "Wrong device address format. Expected: HostAddressMut found: HostAddressConst", + )), + } + } + + #[inline] + pub fn host_address_const(&self) -> Result { + match self { + Self::DeviceAddress(_) => Err(anyhow::Error::msg( + "Wrong device address format. Expected: HostAddressConst found: DeviceAddress", + )), + Self::HostAddressMut(_) => Err(anyhow::Error::msg( + "Wrong device address format. Expected: HostAddressConst found: HostAddressMut", + )), + Self::HostAddressConst(address) => Ok(*address), + } + } + + #[inline] + pub fn is_device_address(&self) -> bool { + match self { + Self::DeviceAddress(_) => true, + _ => false, + } + } + + #[inline] + pub fn is_host_address(&self) -> bool { + match self { + Self::DeviceAddress(_) => false, + Self::HostAddressConst(_) => true, + Self::HostAddressMut(_) => true, + } + } + + #[inline] + pub fn is_host_address_mut(&self) -> bool { + match self { + Self::DeviceAddress(_) => false, + Self::HostAddressConst(_) => false, + Self::HostAddressMut(_) => true, + } + } + + #[inline] + pub fn is_host_address_const(&self) -> bool { + match self { + Self::DeviceAddress(_) => false, + Self::HostAddressConst(_) => true, + Self::HostAddressMut(_) => false, + } + } +} + +impl<'a, T> From<&'a mut T> for Address { + fn from(reference: &'a mut T) -> Self { + Self::HostAddressMut(VkDeviceOrHostAddressKHR::from(reference)) + } +} + +impl<'a, T> From<&'a T> for Address { + fn from(reference: &'a T) -> Self { + Self::HostAddressConst(VkDeviceOrHostAddressConstKHR::from(reference)) + } +} + +impl From for Address { + fn from(device_address: VkDeviceAddress) -> Self { + Self::DeviceAddress(device_address) + } +} + +impl Into for Address { + fn into(self) -> VkDeviceOrHostAddressKHR { + match self { + Self::DeviceAddress(address) => VkDeviceOrHostAddressKHR::from(address), + Self::HostAddressMut(address) => address, + Self::HostAddressConst(_) => { + panic!("Called Into on HostAddressConst") + } + } + } +} + +impl Into for Address { + fn into(self) -> VkDeviceOrHostAddressConstKHR { + match self { + Self::DeviceAddress(address) => VkDeviceOrHostAddressConstKHR::from(address), + Self::HostAddressMut(address) => address.into(), + Self::HostAddressConst(address) => address, + } + } +} + +impl Into for Address { + fn into(self) -> VkDeviceAddress { + match self { + Self::DeviceAddress(address) => address, + Self::HostAddressMut(address) => address.device_address(), + Self::HostAddressConst(address) => address.device_address(), + } + } +} + +impl fmt::Debug for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Address").finish() + } +} diff --git a/vulkan-rs/src/buffer.rs b/vulkan-rs/src/buffer.rs new file mode 100644 index 0000000..000daf3 --- /dev/null +++ b/vulkan-rs/src/buffer.rs @@ -0,0 +1,288 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std; +use std::mem; +use std::sync::Arc; + +pub struct BufferBuilder<'a, T> { + flags: VkBufferCreateFlagBits, + usage: VkBufferUsageFlagBits, + memory_usage: Option, + sharing_mode: VkSharingMode, + data: Option<&'a [T]>, + size: VkDeviceSize, + + alignment: Option, +} + +impl<'a, T> BufferBuilder<'a, T> { + pub fn set_memory_usage(mut self, usage: MemoryUsage) -> Self { + self.memory_usage = Some(usage); + + self + } + + pub fn set_usage(mut self, usage: impl Into) -> Self { + self.usage = usage.into(); + + self + } + + pub fn set_data(mut self, data: &'a [T]) -> Self { + self.data = Some(data); + + self + } + + pub fn set_size(mut self, size: VkDeviceSize) -> Self { + self.size = size; + + self + } + + pub fn set_sharing_mode(mut self, sharing_mode: VkSharingMode) -> Self { + self.sharing_mode = sharing_mode; + + self + } + + pub fn set_flags(mut self, flags: impl Into) -> Self { + self.flags = flags.into(); + + self + } + + pub fn force_alignment(mut self, alignment: VkDeviceSize) -> Self { + self.alignment = Some(alignment); + + self + } +} + +impl<'a, T: Clone + Send + Sync + 'static> BufferBuilder<'a, T> { + pub fn build(self, device: Arc) -> Result>> { + let size = match self.data { + Some(data) => data.len() as VkDeviceSize, + None => self.size, + }; + + if size == 0 { + panic!("Vulkan buffer size must not be zero"); + } + + // create buffer + let buffer_ci = VkBufferCreateInfo::new( + self.flags, + size * mem::size_of::() as VkDeviceSize, + self.usage, + self.sharing_mode, + &[], + ); + + let buffer = device.create_buffer(&buffer_ci)?; + + // create memory + let memory = match self.alignment { + Some(alignment) => { + let mut memory_requirements = device.buffer_memory_requirements(buffer); + memory_requirements.alignment = alignment; + + Memory::forced_requirements( + &device, + memory_requirements, + buffer, + MemoryUsage::into_vma(self.memory_usage), + )? + } + None => { + Memory::buffer_memory(&device, buffer, MemoryUsage::into_vma(self.memory_usage))? + } + }; + + let buffer = Arc::new(Buffer { + device, + buffer, + memory, + + _usage: self.usage, + + _sharing_mode: self.sharing_mode, + + size, + }); + + if let Some(data) = self.data { + buffer.fill(data)?; + } + + Ok(buffer) + } +} + +#[derive(Debug)] +pub struct Buffer { + device: Arc, + buffer: VkBuffer, + + memory: Arc>, + + _usage: VkBufferUsageFlagBits, + + _sharing_mode: VkSharingMode, + size: VkDeviceSize, +} + +impl Buffer { + pub fn fill(&self, data: &[T]) -> Result<()> { + let mut buffer_map = self.map(data.len() as VkDeviceSize)?; + + buffer_map.copy(data); + + Ok(()) + } + + pub fn map(&self, length: VkDeviceSize) -> Result> { + self.memory.map(length) + } + + pub fn map_complete(&self) -> Result> { + self.memory.map(self.size) + } + + pub fn into_device_local( + self: &Arc>, + buffer_recorder: &mut CommandBufferRecorder<'_>, + access_mask: impl Into, + stage: impl Into, + usage: impl Into, + ) -> Result>> { + let new_usage = usage.into() | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + + let device_local_buffer = Buffer::builder() + .set_memory_usage(MemoryUsage::GpuOnly) + .set_usage(new_usage) + .set_size(self.size) + .build(self.device.clone())?; + + // copy complete buffer + buffer_recorder.copy_buffer( + self, + &device_local_buffer, + &[VkBufferCopy { + srcOffset: 0, + dstOffset: 0, + size: self.byte_size(), + }], + ); + + // make sure buffer is copied before using it + buffer_recorder.buffer_barrier( + &device_local_buffer, + VK_ACCESS_TRANSFER_WRITE_BIT, + VK_PIPELINE_STAGE_TRANSFER_BIT, + access_mask, + stage, + ); + + Ok(device_local_buffer) + } +} + +impl Buffer { + pub fn builder<'a>() -> BufferBuilder<'a, T> { + BufferBuilder { + flags: 0u32.into(), + usage: 0u32.into(), + memory_usage: None, + sharing_mode: VK_SHARING_MODE_EXCLUSIVE, + data: None, + size: 0, + + alignment: None, + } + } + + pub fn byte_size(&self) -> VkDeviceSize { + self.size * mem::size_of::() as VkDeviceSize + } + + pub fn size(&self) -> VkDeviceSize { + self.size + } + + pub fn device_address(&self) -> Address { + self.device.get_buffer_device_address(self.buffer) + } +} + +impl VulkanDevice for Buffer { + fn device(&self) -> &Arc { + &self.device + } +} + +impl_vk_handle_t!(Buffer, VkBuffer, buffer); + +impl VkHandle for Buffer { + fn vk_handle(&self) -> VkDeviceMemory { + self.memory.vk_handle() + } +} + +impl<'a, T> VkHandle for &'a Buffer { + fn vk_handle(&self) -> VkDeviceMemory { + self.memory.vk_handle() + } +} + +impl VkHandle for Arc> { + fn vk_handle(&self) -> VkDeviceMemory { + self.memory.vk_handle() + } +} + +impl<'a, T> VkHandle for &'a Arc> { + fn vk_handle(&self) -> VkDeviceMemory { + self.memory.vk_handle() + } +} + +impl Drop for Buffer { + fn drop(&mut self) { + self.device.destroy_buffer(self.buffer); + } +} + +// use crate::{ffi::*, handle_ffi_result}; + +impl FFIBufferTrait for Buffer { + fn byte_size(&self) -> VkDeviceSize { + self.byte_size() + } +} + +pub trait FFIBufferTrait { + fn byte_size(&self) -> VkDeviceSize; +} + +pub struct FFIBuffer { + trait_obj: Box, +} + +impl FFIBuffer { + fn byte_size(&self) -> VkDeviceSize { + self.trait_obj.byte_size() + } +} + +#[no_mangle] +pub extern "C" fn create_buffer(_device: *const Device) -> *const FFIBuffer { + todo!() +} + +#[no_mangle] +pub extern "C" fn byte_size(buffer: *const FFIBuffer) -> VkDeviceSize { + unsafe { &*buffer }.byte_size() +} diff --git a/vulkan-rs/src/commandbuffer.rs b/vulkan-rs/src/commandbuffer.rs new file mode 100644 index 0000000..1582b40 --- /dev/null +++ b/vulkan-rs/src/commandbuffer.rs @@ -0,0 +1,1090 @@ +use super::{ + commandpool::{CommandPool, CommandPoolBuilder}, + pipeline::PipelineType, +}; +use crate::prelude::*; + +use anyhow::Result; + +use std::any::Any; +use std::sync::{ + atomic::{AtomicUsize, Ordering::SeqCst}, + Arc, Mutex, MutexGuard, +}; + +pub struct QueryEnable { + pub query_flags: VkQueryControlFlagBits, + pub pipeline_statistics: VkQueryPipelineStatisticFlagBits, +} + +pub struct CommandBufferBuilder { + buffer_level: VkCommandBufferLevel, + pool_builder: CommandPoolBuilder, +} + +impl CommandBufferBuilder { + pub fn set_flags(mut self, flags: impl Into) -> Self { + self.pool_builder = self.pool_builder.set_flags(flags); + + self + } + + pub fn build( + self, + device: Arc, + queue: Arc>, + ) -> Result> { + let command_pool = self + .pool_builder + .set_queue_family_index( + queue + .lock() + .map_err(|_| anyhow::Error::msg("Failed locking vulkan queue"))? + .family_index(), + ) + .build(device.clone())?; + + let command_buffer_ci = + VkCommandBufferAllocateInfo::new(command_pool.vk_handle(), self.buffer_level, 1); + + let command_buffer = device.allocate_command_buffers(&command_buffer_ci)?[0]; + + Ok(Arc::new(CommandBuffer { + device, + pool: command_pool, + + buffer: command_buffer, + + calls: Arc::new(AtomicUsize::new(0)), + stored_handles: Mutex::new(Vec::new()), + })) + } +} + +#[derive(Debug)] +pub struct CommandBuffer { + device: Arc, + pool: Arc, + + buffer: VkCommandBuffer, + + calls: Arc, + stored_handles: Mutex>>, +} + +#[derive(Debug)] +pub struct CommandBufferRecorder<'a> { + device: Arc, + pipeline: Option>, + + calls: Arc, + buffer: VkCommandBuffer, + handles_lock: MutexGuard<'a, Vec>>, +} + +impl_vk_handle!(CommandBuffer, VkCommandBuffer, buffer); + +impl CommandBuffer { + pub fn new_primary() -> CommandBufferBuilder { + CommandBufferBuilder { + buffer_level: VK_COMMAND_BUFFER_LEVEL_PRIMARY, + pool_builder: CommandPool::builder(), + } + } + + pub fn new_secondary() -> CommandBufferBuilder { + CommandBufferBuilder { + buffer_level: VK_COMMAND_BUFFER_LEVEL_SECONDARY, + pool_builder: CommandPool::builder(), + } + } + + pub fn reset(&self, flags: impl Into) -> Result<()> { + self.device.reset_command_buffer(self.buffer, flags) + } + + pub fn calls(&self) -> usize { + self.calls.load(SeqCst) + } + + pub fn begin(&self, begin_info: VkCommandBufferBeginInfo) -> Result> { + self.device.begin_command_buffer(self.buffer, &begin_info)?; + + let mut handles_lock = self.stored_handles.lock().unwrap(); + handles_lock.clear(); + + self.calls.store(0, SeqCst); + + Ok(CommandBufferRecorder { + device: self.device.clone(), + pipeline: None, + + calls: self.calls.clone(), + buffer: self.buffer, + handles_lock, + }) + } + + pub fn access_to_stage(access_mask: impl Into) -> VkPipelineStageFlags { + let access_mask = access_mask.into(); + + if access_mask == 0 { + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT + } else if access_mask == VK_ACCESS_HOST_WRITE_BIT { + VK_PIPELINE_STAGE_HOST_BIT + } else if access_mask == VK_ACCESS_TRANSFER_WRITE_BIT + || access_mask == VK_ACCESS_TRANSFER_READ_BIT + { + VK_PIPELINE_STAGE_TRANSFER_BIT + } else if access_mask == VK_ACCESS_SHADER_READ_BIT { + VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT + } else if access_mask == VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT + || access_mask == VK_ACCESS_COLOR_ATTACHMENT_READ_BIT + || access_mask + == VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT + { + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT + } else if access_mask == VK_ACCESS_MEMORY_READ_BIT { + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT + } else if access_mask == VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT { + VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT + } else if access_mask + == VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT + | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT + { + VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT + } else { + unimplemented!("access mask not supported {:?}", access_mask) + } + } + + pub fn inheritance_info( + render_pass: Option<&Arc>, + sub_pass: Option, + framebuffer: Option<&Arc>, + query_enable: Option, + ) -> VkCommandBufferInheritanceInfo { + let mut info = VkCommandBufferInheritanceInfo::new( + match render_pass { + Some(render_pass) => render_pass.vk_handle(), + None => VkRenderPass::NULL_HANDLE, + }, + sub_pass.unwrap_or(0), + match framebuffer { + Some(framebuffer) => framebuffer.vk_handle(), + None => VkFramebuffer::NULL_HANDLE, + }, + ); + + if let Some(query) = query_enable { + info.set_query(true, query.query_flags, query.pipeline_statistics); + } + + info + } +} + +impl<'a> CommandBufferRecorder<'a> { + pub fn pipeline_barrier( + &self, + src_stage_mask: impl Into, + dst_stage_mask: impl Into, + dependency_flags: impl Into, + memory_barriers: &[VkMemoryBarrier], + buffer_memory_barriers: &[VkBufferMemoryBarrier], + image_memory_barriers: &[VkImageMemoryBarrier], + ) { + self.calls.fetch_add(1, SeqCst); + + self.device.cmd_pipeline_barrier( + self.buffer, + src_stage_mask, + dst_stage_mask, + dependency_flags, + memory_barriers, + buffer_memory_barriers, + image_memory_barriers, + ) + } + + pub fn memory_barrier( + &self, + src_access_mask: impl Into, + src_stage: VkPipelineStageFlags, + dst_access_mask: impl Into, + dst_stage: VkPipelineStageFlags, + ) { + self.pipeline_barrier( + src_stage, + dst_stage, + 0, + &[VkMemoryBarrier::new(src_access_mask, dst_access_mask)], + &[], + &[], + ); + } + + pub fn buffer_barrier( + &mut self, + buffer: &Arc>, + src_access_mask: impl Into, + src_stage: impl Into, + dst_access_mask: impl Into, + dst_stage: impl Into, + ) { + self.handles_lock.push(buffer.clone()); + + self.pipeline_barrier( + src_stage, + dst_stage, + 0, + &[], + &[VkBufferMemoryBarrier::new( + src_access_mask, + dst_access_mask, + VK_QUEUE_FAMILY_IGNORED, + VK_QUEUE_FAMILY_IGNORED, + buffer.vk_handle(), + 0, + buffer.byte_size(), + )], + &[], + ); + } + + pub fn image_barrier( + &mut self, + image: &Arc, + old_image_layout: VkImageLayout, + src_stage: impl Into, + new_image_layout: VkImageLayout, + dst_stage: impl Into, + ) { + let src_access_mask = Image::src_layout_to_access(old_image_layout); + let dst_access_mask = Image::dst_layout_to_access(new_image_layout); + + self.handles_lock.push(image.clone()); + + self.pipeline_barrier( + src_stage, + dst_stage, + 0, + &[], + &[], + &[VkImageMemoryBarrier::new( + src_access_mask, + dst_access_mask, + old_image_layout, + new_image_layout, + VK_QUEUE_FAMILY_IGNORED, + VK_QUEUE_FAMILY_IGNORED, + image.vk_handle(), + image.full_resource_range(), + )], + ); + + image.set_image_layout(new_image_layout); + } + + pub fn image_barrier_auto_stage( + &mut self, + image: &Arc, + old_image_layout: VkImageLayout, + new_image_layout: VkImageLayout, + ) { + let src_access_mask = Image::src_layout_to_access(old_image_layout); + let dst_access_mask = Image::dst_layout_to_access(new_image_layout); + + self.handles_lock.push(image.clone()); + + self.pipeline_barrier( + CommandBuffer::access_to_stage(src_access_mask), + CommandBuffer::access_to_stage(dst_access_mask), + 0, + &[], + &[], + &[VkImageMemoryBarrier::new( + src_access_mask, + dst_access_mask, + old_image_layout, + new_image_layout, + VK_QUEUE_FAMILY_IGNORED, + VK_QUEUE_FAMILY_IGNORED, + image.vk_handle(), + image.full_resource_range(), + )], + ); + + image.set_image_layout(new_image_layout); + } + + pub fn begin_render_pass( + &self, + renderpass_begin_info: VkRenderPassBeginInfo, + subpass_contents: VkSubpassContents, + ) { + self.device + .cmd_begin_render_pass(self.buffer, &renderpass_begin_info, subpass_contents); + } + + pub fn begin_render_pass_full( + &mut self, + render_pass: &Arc, + framebuffer: &Arc, + clear_values: &[VkClearValue], + subpass_contents: VkSubpassContents, + ) { + self.handles_lock.push(render_pass.clone()); + self.handles_lock.push(framebuffer.clone()); + + let render_pass_begin_info = VkRenderPassBeginInfo::new( + render_pass.vk_handle(), + framebuffer.vk_handle(), + VkRect2D { + offset: VkOffset2D { x: 0, y: 0 }, + extent: VkExtent2D { + width: framebuffer.width(), + height: framebuffer.height(), + }, + }, + clear_values, + ); + + self.device + .cmd_begin_render_pass(self.buffer, &render_pass_begin_info, subpass_contents); + } + + pub fn next_subpass(&self, subpass_contents: VkSubpassContents) { + self.device.cmd_next_subpass(self.buffer, subpass_contents); + } + + pub fn end_render_pass(&self) { + self.device.cmd_end_render_pass(self.buffer); + } + + pub fn bind_pipeline(&mut self, pipeline: &Arc) -> Result<()> { + self.handles_lock.push(pipeline.clone()); + + match pipeline.pipeline_type() { + PipelineType::Graphics => self.device.cmd_bind_pipeline( + self.buffer, + VK_PIPELINE_BIND_POINT_GRAPHICS, + pipeline.vk_handle(), + ), + PipelineType::Compute => self.device.cmd_bind_pipeline( + self.buffer, + VK_PIPELINE_BIND_POINT_COMPUTE, + pipeline.vk_handle(), + ), + PipelineType::RayTracing => self.device.cmd_bind_pipeline( + self.buffer, + VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, + pipeline.vk_handle(), + ), + } + + self.pipeline = Some(pipeline.clone()); + + Ok(()) + } + + pub fn execute_commands(&self, command_buffers: &[&impl VkHandle]) { + self.calls.fetch_add(1, SeqCst); + + let buffers: Vec = + command_buffers.iter().map(|cb| cb.vk_handle()).collect(); + + self.device + .cmd_execute_commands(self.buffer, buffers.as_slice()); + } + + pub fn bind_descriptor_sets_minimal(&mut self, descriptor_sets: &[&Arc]) { + self.calls.fetch_add(1, SeqCst); + + let (pipeline_bind_point, vk_layout) = { + let pipeline = match &self.pipeline { + Some(pipeline) => pipeline, + None => panic!("no pipeline in command buffer"), + }; + + let pipe_type = match pipeline.pipeline_type() { + PipelineType::Graphics => VK_PIPELINE_BIND_POINT_GRAPHICS, + PipelineType::Compute => VK_PIPELINE_BIND_POINT_COMPUTE, + PipelineType::RayTracing => VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, + }; + + (pipe_type, pipeline.pipeline_layout().vk_handle()) + }; + + let vk_descriptor_sets: Vec = descriptor_sets + .iter() + .map(|ds: &&Arc| { + self.handles_lock.push((*ds).clone()); + + ds.vk_handle() + }) + .collect(); + + self.device.cmd_bind_descriptor_sets( + self.buffer, + pipeline_bind_point, + vk_layout, + 0, + vk_descriptor_sets.as_slice(), + &[], + ); + } + + pub fn bind_vertex_buffer(&mut self, buffer: &Arc>) { + self.calls.fetch_add(1, SeqCst); + + self.handles_lock.push(buffer.clone()); + + self.device + .cmd_bind_vertex_buffers(self.buffer, 0, &[buffer.vk_handle()], &[0]); + } + + pub fn bind_vertex_buffers_minimal( + &mut self, + buffers: &[&Arc>], + ) { + self.calls.fetch_add(1, SeqCst); + + let vk_buffers: Vec = buffers + .iter() + .map(|b: &&Arc>| { + self.handles_lock.push((*b).clone()); + + b.vk_handle() + }) + .collect(); + + let offsets = vec![0; vk_buffers.len()]; + + self.device.cmd_bind_vertex_buffers( + self.buffer, + 0, + vk_buffers.as_slice(), + offsets.as_slice(), + ); + } + + pub fn bind_index_buffer( + &mut self, + buffer: &Arc>, + offset: VkDeviceSize, + index_type: VkIndexType, + ) { + self.calls.fetch_add(1, SeqCst); + + self.handles_lock.push(buffer.clone()); + + self.device + .cmd_bind_index_buffer(self.buffer, buffer.vk_handle(), offset, index_type); + } + + pub fn set_viewport(&self, viewports: &[VkViewport]) { + self.device.cmd_set_viewport(self.buffer, 0, viewports); + } + + pub fn set_scissor(&self, scissors: &[VkRect2D]) { + self.device.cmd_set_scissor(self.buffer, 0, scissors); + } + + pub fn draw( + &self, + vertex_count: u32, + instance_count: u32, + first_vertex: u32, + first_instance: u32, + ) { + self.calls.fetch_add(1, SeqCst); + + self.device.cmd_draw( + self.buffer, + vertex_count, + instance_count, + first_vertex, + first_instance, + ); + } + + pub fn draw_complete_single_instance(&self, vertex_count: u32) { + self.calls.fetch_add(1, SeqCst); + + self.device.cmd_draw(self.buffer, vertex_count, 1, 0, 0); + } + + pub fn draw_indexed( + &self, + index_count: u32, + instance_count: u32, + first_index: u32, + vertex_offset: i32, + first_instance: u32, + ) { + self.calls.fetch_add(1, SeqCst); + + self.device.cmd_draw_indexed( + self.buffer, + index_count, + instance_count, + first_index, + vertex_offset, + first_instance, + ); + } + + pub fn draw_indexed_complete_single_instance(&self, index_count: u32) { + self.calls.fetch_add(1, SeqCst); + + self.device + .cmd_draw_indexed(self.buffer, index_count, 1, 0, 0, 0); + } + + pub fn push_constants(&self, stage_flags: impl Into, data: &U) { + self.calls.fetch_add(1, SeqCst); + + let pipeline = match &self.pipeline { + Some(pipeline) => pipeline, + None => panic!("no pipeline in command buffer"), + }; + + let layout = pipeline.pipeline_layout(); + + self.device + .cmd_push_constants(self.buffer, layout.vk_handle(), stage_flags, 0, data); + } + + pub fn set_image_layout( + &mut self, + image: &Arc, + new_image_layout: VkImageLayout, + subresource_range: VkImageSubresourceRange, + ) { + let src_access = Image::src_layout_to_access(image.image_layout()); + let dst_access = Image::dst_layout_to_access(new_image_layout); + + self.handles_lock.push(image.clone()); + + self.pipeline_barrier( + CommandBuffer::access_to_stage(src_access), + CommandBuffer::access_to_stage(dst_access), + 0, + &[], + &[], + &[VkImageMemoryBarrier::new( + src_access, + dst_access, + image.image_layout(), + new_image_layout, + VK_QUEUE_FAMILY_IGNORED, + VK_QUEUE_FAMILY_IGNORED, + image.vk_handle(), + subresource_range, + )], + ); + + image.set_image_layout(new_image_layout); + } + + pub fn set_full_image_layout(&mut self, image: &Arc, new_image_layout: VkImageLayout) { + let src_access = Image::src_layout_to_access(image.image_layout()); + let dst_access = Image::dst_layout_to_access(new_image_layout); + + self.handles_lock.push(image.clone()); + + self.pipeline_barrier( + CommandBuffer::access_to_stage(src_access), + CommandBuffer::access_to_stage(dst_access), + 0, + &[], + &[], + &[VkImageMemoryBarrier::new( + src_access, + dst_access, + image.image_layout(), + new_image_layout, + VK_QUEUE_FAMILY_IGNORED, + VK_QUEUE_FAMILY_IGNORED, + image.vk_handle(), + image.full_resource_range(), + )], + ); + + image.set_image_layout(new_image_layout); + } + + // TODO: + pub fn set_line_width(&self) { + unimplemented!(); + } + + pub fn set_depth_bias(&self) { + unimplemented!(); + } + + pub fn set_blend_constants(&self) { + unimplemented!(); + } + + pub fn set_depth_bounds(&self) { + unimplemented!(); + } + + pub fn set_stencil_compare_mask(&self) { + unimplemented!(); + } + + pub fn set_stencil_write_mask(&self) { + unimplemented!(); + } + + pub fn set_stencil_reference(&self) { + unimplemented!(); + } + + pub fn draw_indirect(&self) { + unimplemented!(); + } + + pub fn draw_indexed_indirect(&self) { + unimplemented!(); + } + + pub fn dispatch(&self, x: u32, y: u32, z: u32) { + self.calls.fetch_add(1, SeqCst); + + self.device.cmd_dispatch(self.buffer, x, y, z); + } + + pub fn dispatch_indirect(&self) { + unimplemented!(); + } + + pub fn copy_buffer( + &mut self, + src_buffer: &Arc>, + dst_buffer: &Arc>, + regions: &[VkBufferCopy], + ) { + self.calls.fetch_add(1, SeqCst); + + self.handles_lock.push(src_buffer.clone()); + self.handles_lock.push(dst_buffer.clone()); + + self.device.cmd_copy_buffer( + self.buffer, + src_buffer.vk_handle(), + dst_buffer.vk_handle(), + regions, + ); + } + + pub fn copy_image( + &mut self, + src_image: &Arc, + dst_image: &Arc, + src_layout: VkImageLayout, + dst_layout: VkImageLayout, + regions: &[VkImageCopy], + ) { + self.calls.fetch_add(1, SeqCst); + + self.handles_lock.push(src_image.clone()); + self.handles_lock.push(dst_image.clone()); + + self.device.cmd_copy_image( + self.buffer, + src_image.vk_handle(), + src_layout, + dst_image.vk_handle(), + dst_layout, + regions, + ); + } + + pub fn blit_complete( + &mut self, + src_image: &Arc, + dst_image: &Arc, + filter: VkFilter, + ) { + self.handles_lock.push(src_image.clone()); + self.handles_lock.push(dst_image.clone()); + + let image_blit = VkImageBlit { + srcSubresource: src_image.full_resource_layers(), + srcOffsets: [ + VkOffset3D { x: 0, y: 0, z: 0 }, + VkOffset3D { + x: src_image.width() as i32, + y: src_image.height() as i32, + z: 1, + }, + ], + dstSubresource: dst_image.full_resource_layers(), + dstOffsets: [ + VkOffset3D { x: 0, y: 0, z: 0 }, + VkOffset3D { + x: dst_image.width() as i32, + y: dst_image.height() as i32, + z: 1, + }, + ], + }; + + self.blit_image( + src_image, + dst_image, + src_image.image_layout(), + dst_image.image_layout(), + &[image_blit], + filter, + ); + } + + pub fn blit_image( + &mut self, + src_image: &Arc, + dst_image: &Arc, + src_layout: VkImageLayout, + dst_layout: VkImageLayout, + regions: &[VkImageBlit], + filter: VkFilter, + ) { + self.calls.fetch_add(1, SeqCst); + + self.handles_lock.push(src_image.clone()); + self.handles_lock.push(dst_image.clone()); + + self.device.cmd_blit_image( + self.buffer, + src_image.vk_handle(), + src_layout, + dst_image.vk_handle(), + dst_layout, + regions, + filter, + ); + } + + pub fn copy_buffer_to_image( + &mut self, + src_buffer: &Arc>, + dst_image: &Arc, + image_layout: VkImageLayout, + regions: &[VkBufferImageCopy], + ) { + self.calls.fetch_add(1, SeqCst); + + self.handles_lock.push(src_buffer.clone()); + self.handles_lock.push(dst_image.clone()); + + self.device.cmd_copy_buffer_to_image( + self.buffer, + src_buffer.vk_handle(), + dst_image.vk_handle(), + image_layout, + regions, + ); + } + + pub fn copy_image_to_buffer( + &mut self, + src_image: &Arc, + image_layout: VkImageLayout, + dst_buffer: &Arc>, + regions: &[VkBufferImageCopy], + ) { + self.calls.fetch_add(1, SeqCst); + + self.handles_lock.push(src_image.clone()); + self.handles_lock.push(dst_buffer.clone()); + + self.device.cmd_copy_image_to_buffer( + self.buffer, + src_image.vk_handle(), + image_layout, + dst_buffer.vk_handle(), + regions, + ) + } + + pub fn update_buffer(&self) { + unimplemented!(); + } + + pub fn fill_buffer(&self) { + unimplemented!(); + } + + pub fn clear_color_image(&mut self, image: &Arc, clear_color: VkClearColorValue) { + self.calls.fetch_add(1, SeqCst); + + self.handles_lock.push(image.clone()); + + self.device.cmd_clear_color_image( + self.buffer, + image.vk_handle(), + image.image_layout(), + clear_color, + &[image.full_resource_range()], + ); + } + + pub fn clear_depth_stencil_image(&self) { + unimplemented!(); + } + + pub fn clear_attachments(&self) { + unimplemented!(); + } + + pub fn resolve_image( + &mut self, + src_image: &Arc, + dst_image: &Arc, + regions: &[VkImageResolve], + ) { + self.calls.fetch_add(1, SeqCst); + + self.handles_lock.push(src_image.clone()); + self.handles_lock.push(dst_image.clone()); + + self.device.cmd_resolve_image( + self.buffer, + src_image.vk_handle(), + src_image.image_layout(), + dst_image.vk_handle(), + dst_image.image_layout(), + regions, + ); + } + + pub fn set_event(&self) { + unimplemented!(); + } + + pub fn reset_event(&self) { + unimplemented!(); + } + + pub fn wait_events(&self) { + unimplemented!(); + } + + pub fn begin_query(&self) { + unimplemented!(); + } + + pub fn end_query(&self) { + unimplemented!(); + } + + pub fn reset_query_pool(&self) { + unimplemented!(); + } + + pub fn write_timestamp( + &mut self, + query_pool: &Arc, + query: u32, + pipeline_stage: impl Into, + ) { + self.calls.fetch_add(1, SeqCst); + + self.handles_lock.push(query_pool.clone()); + + self.device + .cmd_write_timestamp(self.buffer, pipeline_stage, query_pool.vk_handle(), query); + } + + pub fn copy_query_pool_results(&self) { + unimplemented!(); + } +} + +impl<'a> CommandBufferRecorder<'a> { + pub fn build_acceleration_structure_indirect( + &mut self, + infos: &[VkAccelerationStructureBuildGeometryInfoKHR], + indirect_buffers: &[Arc>], + indirect_strides: &[u32], + max_primitive_counts: &[&u32], + ) { + let mut device_addresses: Vec = Vec::with_capacity(indirect_buffers.len()); + + for indirect_buffer in indirect_buffers.iter() { + self.handles_lock.push(indirect_buffer.clone()); + device_addresses.push(indirect_buffer.device_address().into()); + } + + self.device.cmd_build_acceleration_structure_indirect( + self.buffer, + infos, + &device_addresses, + indirect_strides, + max_primitive_counts, + ); + } + + pub fn build_acceleration_structures( + &self, + infos: &[VkAccelerationStructureBuildGeometryInfoKHR], + range_infos: &[&[VkAccelerationStructureBuildRangeInfoKHR]], + ) { + self.device + .cmd_build_acceleration_structures(self.buffer, infos, range_infos); + } + + pub fn copy_acceleration_structure( + &mut self, + src: &Arc, + dst: &Arc, + mode: VkCopyAccelerationStructureModeKHR, + ) { + self.handles_lock.push(src.clone()); + self.handles_lock.push(dst.clone()); + + let info = VkCopyAccelerationStructureInfoKHR::new(src.vk_handle(), dst.vk_handle(), mode); + + self.device + .cmd_copy_acceleration_structure(self.buffer, &info); + } + + pub fn copy_acceleration_structure_to_memory( + &mut self, + src: &Arc, + dst: VkDeviceOrHostAddressKHR, + mode: VkCopyAccelerationStructureModeKHR, + ) { + self.handles_lock.push(src.clone()); + + let info = VkCopyAccelerationStructureToMemoryInfoKHR::new(src.vk_handle(), dst, mode); + + self.device + .cmd_copy_acceleration_structure_to_memory(self.buffer, &info); + } + + pub fn copy_memory_to_acceleration_structure( + &mut self, + src: VkDeviceOrHostAddressConstKHR, + dst: &Arc, + mode: VkCopyAccelerationStructureModeKHR, + ) { + self.handles_lock.push(dst.clone()); + + let info = VkCopyMemoryToAccelerationStructureInfoKHR::new(src, dst.vk_handle(), mode); + + self.device + .cmd_copy_memory_to_acceleration_structure(self.buffer, &info); + } + + pub fn trace_rays_indirect( + &mut self, + sbt: ShaderBindingTable, + buffer: Arc>, + ) { + self.handles_lock.push(buffer.clone()); + + self.device.cmd_trace_rays_indirect( + self.buffer, + sbt.raygen_shader_binding_table(), + sbt.miss_shader_binding_table(), + sbt.hit_shader_binding_table(), + sbt.callable_shader_binding_table(), + buffer.device_address().into(), + ) + } + + pub fn trace_rays(&self, sbt: &ShaderBindingTable, width: u32, height: u32, depth: u32) { + self.device.cmd_trace_rays( + self.buffer, + sbt.raygen_shader_binding_table(), + sbt.miss_shader_binding_table(), + sbt.hit_shader_binding_table(), + sbt.callable_shader_binding_table(), + width, + height, + depth, + ) + } + + pub fn write_acceleration_structure_properties( + &mut self, + acceleration_structures: &[&Arc], + query_type: VkQueryType, + query_pool: &Arc, + first_query: u32, + ) { + self.handles_lock.push(query_pool.clone()); + + let as_handles: Vec = acceleration_structures + .iter() + .map(|a| { + self.handles_lock.push((*a).clone()); + a.vk_handle() + }) + .collect(); + + self.device.cmd_write_acceleration_structure_properties( + self.buffer, + &as_handles, + query_type, + query_pool.vk_handle(), + first_query, + ) + } +} + +impl VulkanDevice for CommandBuffer { + fn device(&self) -> &Arc { + &self.device + } +} + +impl Drop for CommandBuffer { + fn drop(&mut self) { + self.device + .free_command_buffers(self.pool.vk_handle(), &[self.buffer]); + } +} + +impl<'a> Drop for CommandBufferRecorder<'a> { + fn drop(&mut self) { + self.device.end_command_buffer(self.buffer).unwrap() + } +} + +// ========================================================================================== +// ======================================== FFI ============================================= +// ========================================================================================== + +// use crate::{ffi::*, handle_ffi_result}; + +// #[no_mangle] +// pub extern "C" fn allocate_primary_buffer( +// flags: VkCommandPoolCreateFlagBits, +// device: *const Device, +// queue: *const Queue, +// ) -> *const CommandBuffer { +// handle_ffi_result!(CommandBuffer::new_primary() +// .set_flags(flags) +// .build(unsafe { Arc::from_raw(device) }, unsafe { +// Arc::from_raw(queue) +// })) +// } + +// #[no_mangle] +// pub extern "C" fn allocate_secondary_buffer( +// flags: VkCommandPoolCreateFlagBits, +// device: *const Device, +// queue: *const Queue, +// ) -> *const CommandBuffer { +// handle_ffi_result!(CommandBuffer::new_secondary() +// .set_flags(flags) +// .build(unsafe { Arc::from_raw(device) }, unsafe { +// Arc::from_raw(queue) +// })) +// } diff --git a/vulkan-rs/src/commandpool.rs b/vulkan-rs/src/commandpool.rs new file mode 100644 index 0000000..cb01f64 --- /dev/null +++ b/vulkan-rs/src/commandpool.rs @@ -0,0 +1,88 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std::sync::Arc; + +pub(crate) struct CommandPoolBuilder { + flags: VkCommandPoolCreateFlagBits, + queue_family_index: u32, +} + +impl CommandPoolBuilder { + pub(crate) fn set_flags(mut self, flags: impl Into) -> Self { + self.flags = flags.into(); + + self + } + + pub(crate) fn set_queue_family_index(mut self, queue_family_index: u32) -> Self { + self.queue_family_index = queue_family_index; + + self + } + + pub(crate) fn build(self, device: Arc) -> Result> { + let command_pool_ci = VkCommandPoolCreateInfo::new(self.flags, self.queue_family_index); + + let command_pool = device.create_command_pool(&command_pool_ci)?; + + Ok(Arc::new(CommandPool { + device, + command_pool, + })) + } +} + +#[derive(Debug)] +pub(crate) struct CommandPool { + device: Arc, + command_pool: VkCommandPool, +} + +impl CommandPool { + pub(crate) fn builder() -> CommandPoolBuilder { + CommandPoolBuilder { + flags: VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT + | VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, + queue_family_index: 0, + } + } +} + +impl VulkanDevice for CommandPool { + fn device(&self) -> &Arc { + &self.device + } +} + +impl_vk_handle!(CommandPool, VkCommandPool, command_pool); + +impl Drop for CommandPool { + fn drop(&mut self) { + self.device.destroy_command_pool(self.command_pool); + } +} + +// use crate::{ffi::*, handle_ffi_result}; + +// #[no_mangle] +// pub extern "C" fn create_command_pool( +// flags: VkCommandPoolCreateFlagBits, +// queue_family_index: u32, +// device: *const Device, +// ) -> *const CommandPool { +// let device = unsafe { Arc::from_raw(device) }; + +// let pool_res = CommandPool::builder() +// .set_flags(flags) +// .set_queue_family_index(queue_family_index) +// .build(device); + +// handle_ffi_result!(pool_res) +// } + +// #[no_mangle] +// pub extern "C" fn destroy_command_pool(command_pool: *const CommandPool) { +// let _pool = unsafe { Arc::from_raw(command_pool) }; +// } diff --git a/vulkan-rs/src/deferred_operation.rs b/vulkan-rs/src/deferred_operation.rs new file mode 100644 index 0000000..0d36cfe --- /dev/null +++ b/vulkan-rs/src/deferred_operation.rs @@ -0,0 +1,62 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std::sync::Arc; + +pub enum DeferredOperationResult { + Success, + Pending, + OperationResult(VkResult), +} + +pub struct DeferredOperation { + device: Arc, + + deferred_operation: VkDeferredOperationKHR, +} + +impl DeferredOperation { + pub fn new(device: Arc) -> Result> { + let deferred_operation = device.create_deferred_operation(None)?; + + Ok(Arc::new(DeferredOperation { + device, + deferred_operation, + })) + } + + pub fn max_concurrency(&self) -> u32 { + self.device + .get_deferred_operation_max_concurrency(self.deferred_operation) + } + + pub fn result(&self) -> DeferredOperationResult { + let result = self + .device + .get_deferred_operation_result(self.deferred_operation); + + match result { + VK_SUCCESS => DeferredOperationResult::Success, + VK_NOT_READY => DeferredOperationResult::Pending, + _ => DeferredOperationResult::OperationResult(result), + } + } + + pub fn join(&self) -> DeferredOperationResult { + let result = self.device.deferred_operation_join(self.deferred_operation); + + match result { + VK_SUCCESS => DeferredOperationResult::Success, + VK_THREAD_IDLE_KHR | VK_THREAD_DONE_KHR => DeferredOperationResult::Pending, + _ => DeferredOperationResult::OperationResult(result), + } + } +} + +impl Drop for DeferredOperation { + fn drop(&mut self) { + self.device + .destroy_deferred_operation(self.deferred_operation, None); + } +} diff --git a/vulkan-rs/src/descriptorpool.rs b/vulkan-rs/src/descriptorpool.rs new file mode 100644 index 0000000..5227115 --- /dev/null +++ b/vulkan-rs/src/descriptorpool.rs @@ -0,0 +1,160 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std::sync::Arc; + +pub struct DescriptorPoolBuilder { + layout: Option>, + descriptor_count: u32, + flags: VkDescriptorPoolCreateFlagBits, +} + +impl DescriptorPoolBuilder { + pub fn set_flags(mut self, flags: impl Into) -> Self { + self.flags |= flags.into(); + + self + } + + pub fn set_descriptor_set_count(mut self, count: u32) -> Self { + self.descriptor_count = count; + + self + } + + pub fn set_layout(mut self, layout: Arc) -> Self { + self.layout = Some(layout); + + self + } + + pub fn build(self, device: Arc) -> Result> { + if cfg!(debug_assertions) { + if self.layout.is_none() { + panic!("no layout set!"); + } + + if self.descriptor_count == 0 { + panic!("descriptor count must be greater than 0"); + } + } + + let layout = self.layout.expect("descriptor set layout was not set!"); + + let descriptor_pool_ci = + VkDescriptorPoolCreateInfo::new(self.flags, self.descriptor_count, layout.pool_sizes()); + + let descriptor_pool = device.create_descriptor_pool(&descriptor_pool_ci)?; + + Ok(Arc::new(DescriptorPool { + device, + descriptor_pool, + descriptor_set_layout: layout, + })) + } +} + +#[derive(Debug)] +pub struct DescriptorPool { + device: Arc, + descriptor_pool: VkDescriptorPool, + descriptor_set_layout: Arc, +} + +impl DescriptorPool { + pub fn builder() -> DescriptorPoolBuilder { + DescriptorPoolBuilder { + layout: None, + descriptor_count: 1, + flags: VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.into(), + } + } + + pub fn reset(&self) -> Result<()> { + self.device + .reset_descriptor_pool(self.descriptor_pool, VK_DESCRIPTOR_POOL_RESET_NULL_BIT) + } + + pub fn prepare_set(self: &Arc) -> DescriptorSetBuilder { + DescriptorSet::builder(self.device.clone(), self.clone()) + } +} + +impl VulkanDevice for DescriptorPool { + fn device(&self) -> &Arc { + &self.device + } +} + +impl_vk_handle!(DescriptorPool, VkDescriptorPool, descriptor_pool); + +impl VkHandle for DescriptorPool { + fn vk_handle(&self) -> VkDescriptorSetLayout { + self.descriptor_set_layout.vk_handle() + } +} + +impl<'a> VkHandle for &'a DescriptorPool { + fn vk_handle(&self) -> VkDescriptorSetLayout { + self.descriptor_set_layout.vk_handle() + } +} + +impl VkHandle for Arc { + fn vk_handle(&self) -> VkDescriptorSetLayout { + self.descriptor_set_layout.vk_handle() + } +} + +impl<'a> VkHandle for &'a Arc { + fn vk_handle(&self) -> VkDescriptorSetLayout { + self.descriptor_set_layout.vk_handle() + } +} + +impl Drop for DescriptorPool { + fn drop(&mut self) { + self.device.destroy_descriptor_pool(self.descriptor_pool); + } +} + +use crate::{ffi::*, handle_ffi_result}; + +#[no_mangle] +pub extern "C" fn create_descriptor_pool( + flags: VkDescriptorPoolCreateFlagBits, + descriptor_count: u32, + descriptor_set_layout: *const DescriptorSetLayout, + device: *const Device, +) -> *const DescriptorPool { + let device = unsafe { Arc::from_raw(device) }; + let layout = unsafe { Arc::from_raw(descriptor_set_layout) }; + + let pool_res = DescriptorPool::builder() + .set_flags(flags) + .set_descriptor_set_count(descriptor_count) + .set_layout(layout) + .build(device); + + handle_ffi_result!(pool_res) +} + +#[no_mangle] +pub extern "C" fn reset_descriptor_pool(descriptor_pool: *const DescriptorPool) -> bool { + let pool = unsafe { Arc::from_raw(descriptor_pool) }; + + match pool.reset() { + Ok(_) => true, + Err(err) => { + update_last_error(err); + + false + } + } +} + +#[no_mangle] +pub extern "C" fn destroy_descriptor_pool(descriptor_pool: *const DescriptorPool) { + let _pool = unsafe { Arc::from_raw(descriptor_pool) }; +} diff --git a/vulkan-rs/src/descriptorset.rs b/vulkan-rs/src/descriptorset.rs new file mode 100644 index 0000000..9d80531 --- /dev/null +++ b/vulkan-rs/src/descriptorset.rs @@ -0,0 +1,319 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std::any::Any; +use std::collections::HashMap; +use std::slice; +use std::sync::{Arc, Mutex}; + +#[derive(Debug)] +pub struct DescriptorWrite { + binding: u32, + descriptor_type: VkDescriptorType, + inner: InnerWrite, + handles: Vec>, +} + +#[derive(Debug)] +enum InnerWrite { + Buffers(Vec), + Images(Vec), + AS( + ( + VkWriteDescriptorSetAccelerationStructureKHR, + Vec, + ), + ), +} + +impl DescriptorWrite { + pub fn uniform_buffers( + binding: u32, + buffers: &[&Arc>], + ) -> Self { + DescriptorWrite { + binding, + descriptor_type: VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, + inner: InnerWrite::Buffers( + buffers + .iter() + .map(|buffer| VkDescriptorBufferInfo { + buffer: buffer.vk_handle(), + offset: 0, + range: buffer.byte_size(), + }) + .collect(), + ), + handles: buffers + .iter() + .map(|b| (*b).clone() as Arc) + .collect(), + } + } + + pub fn storage_buffers( + binding: u32, + buffers: &[&Arc>], + ) -> Self { + DescriptorWrite { + binding, + descriptor_type: VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, + inner: InnerWrite::Buffers( + buffers + .iter() + .map(|buffer| VkDescriptorBufferInfo { + buffer: buffer.vk_handle(), + offset: 0, + range: buffer.byte_size(), + }) + .collect(), + ), + handles: buffers + .iter() + .map(|b| (*b).clone() as Arc) + .collect(), + } + } + + pub fn combined_samplers(binding: u32, images: &[&Arc]) -> Self { + DescriptorWrite { + binding, + descriptor_type: VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, + inner: InnerWrite::Images( + images + .iter() + .map(|image| VkDescriptorImageInfo { + sampler: image + .sampler() + .as_ref() + .expect("image has no sampler attached") + .vk_handle(), + imageView: image.vk_handle(), + imageLayout: image.image_layout(), + }) + .collect(), + ), + handles: images + .iter() + .map(|i| (*i).clone() as Arc) + .collect(), + } + } + + pub fn storage_images(binding: u32, images: &[&Arc]) -> Self { + DescriptorWrite { + binding, + descriptor_type: VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, + inner: InnerWrite::Images( + images + .iter() + .map(|image| VkDescriptorImageInfo { + sampler: VkSampler::NULL_HANDLE, + imageView: image.vk_handle(), + imageLayout: image.image_layout(), + }) + .collect(), + ), + handles: images + .iter() + .map(|i| (*i).clone() as Arc) + .collect(), + } + } + + pub fn acceleration_structures( + binding: u32, + acceleration_structures: &[&Arc], + ) -> Self { + let vk_as: Vec = acceleration_structures + .iter() + .map(|a| a.vk_handle()) + .collect(); + + let mut write = DescriptorWrite { + binding, + descriptor_type: VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, + inner: InnerWrite::AS(( + VkWriteDescriptorSetAccelerationStructureKHR::default(), + vk_as, + )), + handles: acceleration_structures + .iter() + .map(|a| (*a).clone() as Arc) + .collect(), + }; + + if let InnerWrite::AS((vk_write_as, vk_as)) = &mut write.inner { + vk_write_as.set_acceleration_structures(vk_as); + } + + write + } + + pub fn change_image_layout(mut self, image_layout: VkImageLayout) -> Self { + if let InnerWrite::Images(ref mut infos) = self.inner { + for info in infos { + info.imageLayout = image_layout; + } + } + + self + } + + fn vk_write(&self, write: &mut VkWriteDescriptorSet) { + match &self.inner { + InnerWrite::Buffers(buffer_infos) => { + write.set_buffer_infos(buffer_infos); + } + InnerWrite::Images(image_infos) => { + write.set_image_infos(image_infos); + } + InnerWrite::AS((as_write, _)) => { + write.descriptorCount = as_write.accelerationStructureCount; + write.chain(as_write); + } + } + } +} + +pub struct DescriptorSetBuilder { + device: Arc, + descriptor_pool: Arc, + variable_desc_counts: Vec, + variable_descriptor_count: VkDescriptorSetVariableDescriptorCountAllocateInfoEXT, +} + +impl DescriptorSetBuilder { + pub fn set_variable_descriptor_counts(mut self, descriptor_counts: &[u32]) -> Self { + self.variable_desc_counts = descriptor_counts.to_vec(); + + self + } + + pub fn allocate(mut self) -> Result> { + let layout = self.descriptor_pool.vk_handle(); + + let mut descriptor_set_ci = VkDescriptorSetAllocateInfo::new( + self.descriptor_pool.vk_handle(), + slice::from_ref(&layout), + ); + + if !self.variable_desc_counts.is_empty() { + self.variable_descriptor_count + .set_descriptor_counts(&self.variable_desc_counts); + descriptor_set_ci.chain(&self.variable_descriptor_count); + } + + let descriptor_set = self.device.allocate_descriptor_sets(&descriptor_set_ci)?[0]; + + Ok(Arc::new(DescriptorSet { + device: self.device, + pool: self.descriptor_pool, + descriptor_set, + + handles: Mutex::new(HashMap::new()), + })) + } +} + +#[derive(Debug)] +pub struct DescriptorSet { + device: Arc, + pool: Arc, + descriptor_set: VkDescriptorSet, + + handles: Mutex>>>, +} + +impl DescriptorSet { + pub(crate) fn builder( + device: Arc, + descriptor_pool: Arc, + ) -> DescriptorSetBuilder { + DescriptorSetBuilder { + device, + descriptor_pool, + variable_desc_counts: Vec::new(), + variable_descriptor_count: VkDescriptorSetVariableDescriptorCountAllocateInfoEXT::new( + &[], + ), + } + } + + // TODO: add update function for VkCopyDescriptorSet + pub fn update(&self, writes: &[DescriptorWrite]) -> Result<()> { + debug_assert!(!writes.is_empty()); + + let mut vk_writes = Vec::new(); + let mut handles_lock = self.handles.lock().unwrap(); + + for write in writes { + let mut write_desc = VkWriteDescriptorSet::new( + self.descriptor_set, + write.binding, + 0, + write.descriptor_type, + ); + + write.vk_write(&mut write_desc); + + vk_writes.push(write_desc); + + match handles_lock.get_mut(&write.binding) { + Some(val) => *val = write.handles.clone(), + None => { + handles_lock.insert(write.binding, write.handles.clone()); + } + } + } + + self.device + .update_descriptor_sets(vk_writes.as_slice(), &[]); + + Ok(()) + } +} + +impl VulkanDevice for DescriptorSet { + fn device(&self) -> &Arc { + &self.device + } +} + +impl_vk_handle!(DescriptorSet, VkDescriptorSet, descriptor_set); + +impl VkHandle for DescriptorSet { + fn vk_handle(&self) -> VkDescriptorSetLayout { + self.pool.vk_handle() + } +} + +impl<'a> VkHandle for &'a DescriptorSet { + fn vk_handle(&self) -> VkDescriptorSetLayout { + self.pool.vk_handle() + } +} + +impl VkHandle for Arc { + fn vk_handle(&self) -> VkDescriptorSetLayout { + self.pool.vk_handle() + } +} + +impl<'a> VkHandle for &'a Arc { + fn vk_handle(&self) -> VkDescriptorSetLayout { + self.pool.vk_handle() + } +} + +impl Drop for DescriptorSet { + fn drop(&mut self) { + if let Err(error) = self + .device + .free_descriptor_sets(self.pool.vk_handle(), &[self.descriptor_set]) + { + println!("{}", error); + } + } +} diff --git a/vulkan-rs/src/descriptorsetlayout.rs b/vulkan-rs/src/descriptorsetlayout.rs new file mode 100644 index 0000000..0535cfe --- /dev/null +++ b/vulkan-rs/src/descriptorsetlayout.rs @@ -0,0 +1,130 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std::sync::Arc; + +pub struct DescriptorSetLayoutBuilder { + layout_bindings: Vec, + indexing_flags: Vec, + flags: VkDescriptorSetLayoutCreateFlagBits, +} + +impl DescriptorSetLayoutBuilder { + pub fn add_layout_binding( + mut self, + binding: u32, + descriptor_type: VkDescriptorType, + stage_flags: impl Into, + indexing_flags: impl Into, + ) -> Self { + self.layout_bindings.push(VkDescriptorSetLayoutBinding::new( + binding, + descriptor_type, + stage_flags, + )); + + let flags = indexing_flags.into(); + self.indexing_flags.push(flags); + + if (flags & VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT) != 0 { + self.flags |= VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT; + } + + self + } + + pub fn change_descriptor_count(mut self, count: u32) -> Self { + if let Some(binding) = self.layout_bindings.last_mut() { + binding.descriptorCount = count; + } + + self + } + + pub fn set_flags(mut self, flags: impl Into) -> Self { + self.flags = flags.into(); + + self + } + + pub fn build(self, device: Arc) -> Result> { + let mut descriptor_set_ci = + VkDescriptorSetLayoutCreateInfo::new(self.flags, &self.layout_bindings); + let binding_flags_ci = + VkDescriptorSetLayoutBindingFlagsCreateInfoEXT::new(&self.indexing_flags); + + if device.enabled_extensions().descriptor_indexing { + descriptor_set_ci.chain(&binding_flags_ci); + + /* + if device.enabled_extensions().maintenance3 { + let mut layout_support = VkDescriptorSetLayoutSupport::default(); + let variable_support = + VkDescriptorSetVariableDescriptorCountLayoutSupportEXT::default(); + + layout_support.chain(&variable_support); + + device.descriptor_set_layout_support(&descriptor_set_ci, &mut layout_support); + } + */ + } + + let descriptor_set_layout = device.create_descriptor_set_layout(&descriptor_set_ci)?; + + let mut pool_sizes = Vec::new(); + + for layout_binding in &self.layout_bindings { + pool_sizes.push(VkDescriptorPoolSize { + ty: layout_binding.descriptorType, + descriptorCount: layout_binding.descriptorCount, + }); + } + + Ok(Arc::new(DescriptorSetLayout { + device, + descriptor_set_layout, + pool_sizes, + })) + } +} + +#[derive(Debug)] +pub struct DescriptorSetLayout { + device: Arc, + descriptor_set_layout: VkDescriptorSetLayout, + pool_sizes: Vec, +} + +impl DescriptorSetLayout { + pub fn builder() -> DescriptorSetLayoutBuilder { + DescriptorSetLayoutBuilder { + layout_bindings: Vec::new(), + indexing_flags: Vec::new(), + flags: 0u32.into(), + } + } + + pub fn pool_sizes(&self) -> &[VkDescriptorPoolSize] { + self.pool_sizes.as_slice() + } +} + +impl VulkanDevice for DescriptorSetLayout { + fn device(&self) -> &Arc { + &self.device + } +} + +impl_vk_handle!( + DescriptorSetLayout, + VkDescriptorSetLayout, + descriptor_set_layout +); + +impl Drop for DescriptorSetLayout { + fn drop(&mut self) { + self.device + .destroy_descriptor_set_layout(self.descriptor_set_layout); + } +} diff --git a/vulkan-rs/src/device.rs b/vulkan-rs/src/device.rs new file mode 100644 index 0000000..db9e5e5 --- /dev/null +++ b/vulkan-rs/src/device.rs @@ -0,0 +1,2943 @@ +pub use super::memory::Memory; + +use anyhow::Result; + +use vma_rs::prelude::*; + +use crate::prelude::*; +use crate::sampler_manager::SamplerManager; + +use std::cmp::min; +use std::fmt; +use std::mem::{size_of, MaybeUninit}; +use std::ptr; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use core::ffi::c_void; + +Extensions!(DeviceExtensions, { + (amd_rasterization_order, "VK_AMD_rasterization_order"), + (maintenance3, "VK_KHR_maintenance3"), + (descriptor_indexing, "VK_EXT_descriptor_indexing"), + (memory_requirements2, "VK_KHR_get_memory_requirements2"), + (swapchain, "VK_KHR_swapchain"), + (memory_budget, "VK_EXT_memory_budget"), + (memory_priority, "VK_EXT_memory_priority"), + (debug_marker, "VK_EXT_debug_marker"), + (ray_tracing_pipeline, "VK_KHR_ray_tracing_pipeline"), + (buffer_device_address, "VK_KHR_buffer_device_address"), + (deferred_host_operations, "VK_KHR_deferred_host_operations"), + (pipeline_library, "VK_KHR_pipeline_library"), + (acceleration_structure, "VK_KHR_acceleration_structure"), + (spirv_1_4, "VK_KHR_spirv_1_4"), + (shader_float_controls, "VK_KHR_shader_float_controls"), +}); + +pub use vulkan_sys::prelude::VkPhysicalDeviceFeatures as DeviceFeatures; + +pub struct MemoryHeap { + pub usage: VkDeviceSize, + pub budget: VkDeviceSize, +} + +pub struct Device { + device_functions: DeviceFunctions, + device_wsi_functions: DeviceWSIFunctions, + maintenance3_functions: Maintenance3Functions, + + _acceleration_structure_functions: AccelerationStructureFunctions, + _ray_tracing_pipeline_functions: RayTracingPipelineFunctions, + + deferred_operation_functions: DeferredOperationsFunctions, + + enabled_extensions: DeviceExtensions, + + physical_device: Arc, + device: VkDevice, + + memory_allocator: Allocator, + + sampler_manager: Mutex, +} + +impl Device { + pub fn preinitialized( + device: VkDevice, + proc_addr: PFN_vkGetDeviceProcAddr, + physical_device: Arc, + extensions: &[VkString], + ) -> Result> { + let device_functions = DeviceFunctions::load(|name| { + proc_addr(device, name.as_ptr()) as *const std::ffi::c_void + }); + let device_wsi_functions = DeviceWSIFunctions::load(|name| { + proc_addr(device, name.as_ptr()) as *const std::ffi::c_void + }); + let maintenance3_functions = Maintenance3Functions::load(|name| { + proc_addr(device, name.as_ptr()) as *const std::ffi::c_void + }); + let ray_tracing_functions = RayTracingPipelineFunctions::load(|name| { + proc_addr(device, name.as_ptr()) as *const std::ffi::c_void + }); + let acceleration_structure_functions = AccelerationStructureFunctions::load(|name| { + proc_addr(device, name.as_ptr()) as *const std::ffi::c_void + }); + let deferred_operation_functions = DeferredOperationsFunctions::load(|name| { + proc_addr(device, name.as_ptr()) as *const std::ffi::c_void + }); + + let vma_fns = VmaVulkanFunctions { + vkGetPhysicalDeviceProperties: physical_device + .instance() + .instance_functions + .vkGetPhysicalDeviceProperties, + vkGetPhysicalDeviceMemoryProperties: physical_device + .instance() + .instance_functions + .vkGetPhysicalDeviceMemoryProperties, + vkAllocateMemory: device_functions.vkAllocateMemory, + vkFreeMemory: device_functions.vkFreeMemory, + vkMapMemory: device_functions.vkMapMemory, + vkUnmapMemory: device_functions.vkUnmapMemory, + vkFlushMappedMemoryRanges: device_functions.vkFlushMappedMemoryRanges, + vkInvalidateMappedMemoryRanges: device_functions.vkInvalidateMappedMemoryRanges, + vkBindBufferMemory: device_functions.vkBindBufferMemory, + vkBindImageMemory: device_functions.vkBindImageMemory, + vkGetBufferMemoryRequirements: device_functions.vkGetBufferMemoryRequirements, + vkGetImageMemoryRequirements: device_functions.vkGetImageMemoryRequirements, + vkCreateBuffer: device_functions.vkCreateBuffer, + vkDestroyBuffer: device_functions.vkDestroyBuffer, + vkCreateImage: device_functions.vkCreateImage, + vkDestroyImage: device_functions.vkDestroyImage, + vkCmdCopyBuffer: device_functions.vkCmdCopyBuffer, + vkGetBufferMemoryRequirements2KHR: device_functions.vkGetBufferMemoryRequirements2, + vkGetImageMemoryRequirements2KHR: device_functions.vkGetImageMemoryRequirements2, + vkBindBufferMemory2KHR: device_functions.vkBindBufferMemory2, + vkBindImageMemory2KHR: device_functions.vkBindImageMemory2, + vkGetPhysicalDeviceMemoryProperties2KHR: physical_device + .instance() + .instance_functions + .vkGetPhysicalDeviceMemoryProperties2, + }; + + Self::verify_vma_vk_functions(&vma_fns)?; + + let memory_allocator = Allocator::builder() + .set_vulkan_functions(vma_fns) + .set_flags(VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT as u32) + .build( + physical_device.instance().vk_handle(), + device, + physical_device.vk_handle(), + physical_device.instance().api_version(), + )?; + + Ok(Arc::new(Device { + memory_allocator, + + device_functions, + device_wsi_functions, + maintenance3_functions, + + deferred_operation_functions, + + _acceleration_structure_functions: acceleration_structure_functions, + _ray_tracing_pipeline_functions: ray_tracing_functions, + + enabled_extensions: DeviceExtensions::from_list(extensions), + + physical_device, + device, + + sampler_manager: SamplerManager::new(), + })) + } + + pub fn new( + physical_device: Arc, + extensions: DeviceExtensions, + queue_infos: &[VkDeviceQueueCreateInfo], + requested_device_features: DeviceFeatures, + ) -> Result> { + let device_extensions = physical_device.extensions(); + + let mut checked_extensions = Vec::new(); + + let extension_list = extensions.as_list(); + + for extension in extension_list { + for ext_prop in device_extensions { + if *ext_prop == extension { + checked_extensions.push(extension); + break; + } + } + } + + let names = VkNames::new(checked_extensions.as_slice()); + + println!("\nenabled device extensions ({}):", names.len()); + + for extension_name in names.iter() { + println!("\t- {:?}", extension_name); + } + + println!(); + + if !requested_device_features.is_subset_of(&physical_device.features()) { + return Err(anyhow::Error::msg( + "Requested features are not supported by the device", + )); + } + + let mut device_ci = VkDeviceCreateInfo::new( + VK_DEVICE_CREATE_NULL_BIT, + queue_infos, + &names, + &requested_device_features, + ); + + let enabled_extensions = DeviceExtensions::from_list(&checked_extensions); + + if let Err(missing_extensions) = extensions.check_availability(&enabled_extensions) { + for m in missing_extensions { + println!("{}", m); + } + } + + if enabled_extensions.descriptor_indexing { + device_ci.chain(physical_device.descriptor_indexing_features()); + } + + // only required for khr ray tracing + // ----- + if enabled_extensions.buffer_device_address { + device_ci.chain(physical_device.buffer_device_address_features()); + } + + if enabled_extensions.acceleration_structure { + device_ci.chain(physical_device.acceleration_structure_features()); + } + + if enabled_extensions.ray_tracing_pipeline { + device_ci.chain(physical_device.ray_tracing_features()); + } + // ----- + + let instance = physical_device.instance(); + + let device = instance.create_device(physical_device.vk_handle(), &device_ci)?; + + let device_functions = DeviceFunctions::new(&instance.instance_functions, device); + let device_wsi_functions = DeviceWSIFunctions::new(&instance.instance_functions, device); + let maintenance3_functions = + Maintenance3Functions::new(&instance.instance_functions, device); + let ray_tracing_functions = + RayTracingPipelineFunctions::new(&instance.instance_functions, device); + let acceleration_structure_functions = + AccelerationStructureFunctions::new(&instance.instance_functions, device); + let deferred_operation_functions = + DeferredOperationsFunctions::new(&instance.instance_functions, device); + + let memory_allocator = Allocator::builder() + .set_vulkan_functions(VmaVulkanFunctions { + vkGetPhysicalDeviceProperties: physical_device + .instance() + .instance_functions + .vkGetPhysicalDeviceProperties, + vkGetPhysicalDeviceMemoryProperties: physical_device + .instance() + .instance_functions + .vkGetPhysicalDeviceMemoryProperties, + vkAllocateMemory: device_functions.vkAllocateMemory, + vkFreeMemory: device_functions.vkFreeMemory, + vkMapMemory: device_functions.vkMapMemory, + vkUnmapMemory: device_functions.vkUnmapMemory, + vkFlushMappedMemoryRanges: device_functions.vkFlushMappedMemoryRanges, + vkInvalidateMappedMemoryRanges: device_functions.vkInvalidateMappedMemoryRanges, + vkBindBufferMemory: device_functions.vkBindBufferMemory, + vkBindImageMemory: device_functions.vkBindImageMemory, + vkGetBufferMemoryRequirements: device_functions.vkGetBufferMemoryRequirements, + vkGetImageMemoryRequirements: device_functions.vkGetImageMemoryRequirements, + vkCreateBuffer: device_functions.vkCreateBuffer, + vkDestroyBuffer: device_functions.vkDestroyBuffer, + vkCreateImage: device_functions.vkCreateImage, + vkDestroyImage: device_functions.vkDestroyImage, + vkCmdCopyBuffer: device_functions.vkCmdCopyBuffer, + vkGetBufferMemoryRequirements2KHR: device_functions.vkGetBufferMemoryRequirements2, + vkGetImageMemoryRequirements2KHR: device_functions.vkGetImageMemoryRequirements2, + vkBindBufferMemory2KHR: device_functions.vkBindBufferMemory2, + vkBindImageMemory2KHR: device_functions.vkBindImageMemory2, + vkGetPhysicalDeviceMemoryProperties2KHR: physical_device + .instance() + .instance_functions + .vkGetPhysicalDeviceMemoryProperties2, + }) + .set_flags(VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT as u32) + .build( + physical_device.instance().vk_handle(), + device, + physical_device.vk_handle(), + physical_device.instance().api_version(), + )?; + + Ok(Arc::new(Device { + memory_allocator, + + device_functions, + device_wsi_functions, + maintenance3_functions, + + deferred_operation_functions, + + _acceleration_structure_functions: acceleration_structure_functions, + _ray_tracing_pipeline_functions: ray_tracing_functions, + + enabled_extensions, + + physical_device, + device, + + sampler_manager: SamplerManager::new(), + })) + } + + fn verify_vma_vk_functions(fns: &VmaVulkanFunctions) -> Result<()> { + macro_rules! test_vma_fn { + ($([$var:ident: $pfn:ident],)*) => { + $( + if unsafe { std::mem::transmute::<$pfn, *const c_void>(fns.$var) } == ptr::null() { + return Err(anyhow::anyhow!(format!("vma function {} is null ptr", stringify!($var)))); + } + )* + }; + } + + test_vma_fn!( + [vkGetPhysicalDeviceProperties: PFN_vkGetPhysicalDeviceProperties], + [vkGetPhysicalDeviceMemoryProperties: PFN_vkGetPhysicalDeviceMemoryProperties], + [vkAllocateMemory: PFN_vkAllocateMemory], + [vkFreeMemory: PFN_vkFreeMemory], + [vkMapMemory: PFN_vkMapMemory], + [vkUnmapMemory: PFN_vkUnmapMemory], + [vkFlushMappedMemoryRanges: PFN_vkFlushMappedMemoryRanges], + [vkInvalidateMappedMemoryRanges: PFN_vkInvalidateMappedMemoryRanges], + [vkBindBufferMemory: PFN_vkBindBufferMemory], + [vkBindImageMemory: PFN_vkBindImageMemory], + [vkGetBufferMemoryRequirements: PFN_vkGetBufferMemoryRequirements], + [vkGetImageMemoryRequirements: PFN_vkGetImageMemoryRequirements], + [vkCreateBuffer: PFN_vkCreateBuffer], + [vkDestroyBuffer: PFN_vkDestroyBuffer], + [vkCreateImage: PFN_vkCreateImage], + [vkDestroyImage: PFN_vkDestroyImage], + [vkCmdCopyBuffer: PFN_vkCmdCopyBuffer], + [vkGetBufferMemoryRequirements2KHR: PFN_vkGetBufferMemoryRequirements2], + [vkGetImageMemoryRequirements2KHR: PFN_vkGetImageMemoryRequirements2], + [vkBindBufferMemory2KHR: PFN_vkBindBufferMemory2], + [vkBindImageMemory2KHR: PFN_vkBindImageMemory2], + [ + vkGetPhysicalDeviceMemoryProperties2KHR: + PFN_vkGetPhysicalDeviceMemoryProperties2KHR + ], + ); + + Ok(()) + } + + pub fn get_queue( + self: &Arc, + queue_family_index: u32, + queue_index: u32, + ) -> Arc> { + Queue::new( + self.clone(), + self.get_device_queue(queue_family_index, queue_index), + queue_family_index, + queue_index, + ) + } + + pub fn physical_device(&self) -> &Arc { + &self.physical_device + } + + pub fn wait_for_fences( + &self, + fences: &[&Arc], + wait_all: bool, + timeout: Duration, + ) -> Result<()> { + let vkfences: Vec = fences.iter().map(|fence| fence.vk_handle()).collect(); + + self.device_wait_for_fences(vkfences.as_slice(), wait_all, timeout.as_nanos() as u64)?; + + Ok(()) + } + + pub fn enabled_extensions(&self) -> &DeviceExtensions { + &self.enabled_extensions + } + + pub fn memory_budgets(&self) -> Vec { + let phys_dev = self.physical_device(); + + let (budget, count) = phys_dev + .instance() + .physical_device_memory_budget(phys_dev.vk_handle()); + + let mut heaps = Vec::with_capacity(count as usize); + let usages = budget.heap_usages(count); + let budgets = budget.heap_budgets(count); + + for i in 0..count { + heaps.push(MemoryHeap { + usage: usages[i as usize], + budget: budgets[i as usize], + }) + } + + heaps + } + + pub fn max_supported_sample_count( + &self, + requested_sample_count: VkSampleCountFlags, + ) -> VkSampleCountFlags { + let dev_props = self.physical_device.properties(); + + let phys_counts = min( + dev_props.limits.framebufferColorSampleCounts, + dev_props.limits.framebufferDepthSampleCounts, + ); + + let counts = min(phys_counts, requested_sample_count.into()); + + if (counts & VK_SAMPLE_COUNT_64_BIT) != 0 { + VK_SAMPLE_COUNT_64_BIT + } else if (counts & VK_SAMPLE_COUNT_32_BIT) != 0 { + VK_SAMPLE_COUNT_32_BIT + } else if (counts & VK_SAMPLE_COUNT_16_BIT) != 0 { + VK_SAMPLE_COUNT_16_BIT + } else if (counts & VK_SAMPLE_COUNT_8_BIT) != 0 { + VK_SAMPLE_COUNT_8_BIT + } else if (counts & VK_SAMPLE_COUNT_4_BIT) != 0 { + VK_SAMPLE_COUNT_4_BIT + } else if (counts & VK_SAMPLE_COUNT_2_BIT) != 0 { + VK_SAMPLE_COUNT_2_BIT + } else { + VK_SAMPLE_COUNT_1_BIT + } + } + + pub fn memory_statistics(&self) -> VmaStats { + self.memory_allocator.statistics() + } +} + +impl_vk_handle!(Device, VkDevice, device); + +impl fmt::Debug for Device { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "Device {{ device: {:#?}, physical_device: {:#?} }}", + self.device, self.physical_device + ) + } +} + +impl Drop for Device { + fn drop(&mut self) { + unsafe { + self.sampler_manager + .lock() + .expect("failed to lock sampler manager at drop of device") + .clear(self); + } + + self.destroy_device(); + } +} + +impl Device { + #[inline] + pub fn device_proc_addr(&self, name: VkString) -> PFN_vkVoidFunction { + self.physical_device + .instance() + .get_device_proc_addr(self.device, name) + } + + #[inline] + fn destroy_device(&self) { + unsafe { + self.device_functions + .vkDestroyDevice(self.device, ptr::null()); + } + } + + #[inline] + pub fn wait_idle(&self) -> Result<()> { + let result = unsafe { self.device_functions.vkDeviceWaitIdle(self.device) }; + + match result { + VK_SUCCESS => Ok(()), + + _ => Err(anyhow::Error::new(result)), + } + } + + #[inline] + fn get_device_queue(&self, queue_family_index: u32, queue_index: u32) -> VkQueue { + unsafe { + let mut queue = MaybeUninit::uninit(); + + self.device_functions.vkGetDeviceQueue( + self.device, + queue_family_index, + queue_index, + queue.as_mut_ptr(), + ); + + queue.assume_init() + } + } + + #[inline] + fn device_wait_for_fences( + &self, + fences: &[VkFence], + wait_all: impl Into, + timeout: u64, + ) -> Result<()> { + unsafe { + let result = self.device_functions.vkWaitForFences( + self.device, + fences.len() as u32, + fences.as_ptr(), + wait_all.into(), + timeout, + ); + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn query_pool_results( + &self, + query_pool: VkQueryPool, + first_query: u32, + query_count: u32, + data: &mut T, + stride: VkDeviceSize, + flags: impl Into, + ) -> Result<()> { + unsafe { + let result = self.device_functions.vkGetQueryPoolResults( + self.device, + query_pool, + first_query, + query_count, + size_of::(), + data as *mut T as *mut c_void, + stride, + flags.into(), + ); + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn queue_submit( + &self, + queue: VkQueue, + submits: &[VkSubmitInfo], + fence: VkFence, + ) -> Result<()> { + unsafe { + let result = self.device_functions.vkQueueSubmit( + queue, + submits.len() as u32, + submits.as_ptr(), + fence, + ); + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn queue_wait_idle(&self, queue: VkQueue) -> Result<()> { + unsafe { + let result = self.device_functions.vkQueueWaitIdle(queue); + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn create_buffer(&self, create_info: &VkBufferCreateInfo) -> Result { + unsafe { + let mut buffer = MaybeUninit::uninit(); + + let result = self.device_functions.vkCreateBuffer( + self.device, + create_info, + ptr::null(), + buffer.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(buffer.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn destroy_buffer(&self, buffer: VkBuffer) { + unsafe { + self.device_functions + .vkDestroyBuffer(self.device, buffer, ptr::null()) + }; + } + + #[inline] + pub fn buffer_memory_requirements(&self, buffer: VkBuffer) -> VkMemoryRequirements { + unsafe { + let mut memory_requirements = MaybeUninit::uninit(); + + self.device_functions.vkGetBufferMemoryRequirements( + self.device, + buffer, + memory_requirements.as_mut_ptr(), + ); + + memory_requirements.assume_init() + } + } + + #[inline] + pub fn get_buffer_device_address(&self, buffer: VkBuffer) -> Address { + Address::from(unsafe { + self.device_functions + .vkGetBufferDeviceAddress(self.device, &VkBufferDeviceAddressInfo::new(buffer)) + }) + } + + pub(crate) fn allocator(&self) -> &Allocator { + &self.memory_allocator + } + + #[inline] + pub fn allocate_memory(&self, allocate_info: &VkMemoryAllocateInfo) -> Result { + unsafe { + let mut memory = MaybeUninit::uninit(); + + let result = self.device_functions.vkAllocateMemory( + self.device, + allocate_info, + ptr::null(), + memory.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(memory.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn free_memory(&self, memory: VkDeviceMemory) { + unsafe { + self.device_functions + .vkFreeMemory(self.device, memory, ptr::null()) + }; + } + + #[inline] + pub fn unmap_memory(&self, memory: VkDeviceMemory) { + unsafe { self.device_functions.vkUnmapMemory(self.device, memory) }; + } + + #[inline] + pub fn bind_buffer_memory( + &self, + buffer: VkBuffer, + memory: VkDeviceMemory, + offset: VkDeviceSize, + ) -> Result<()> { + unsafe { + let result = + self.device_functions + .vkBindBufferMemory(self.device, buffer, memory, offset); + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn create_render_pass(&self, create_info: &VkRenderPassCreateInfo) -> Result { + unsafe { + let mut render_pass = MaybeUninit::uninit(); + + let result = self.device_functions.vkCreateRenderPass( + self.device, + create_info, + ptr::null(), + render_pass.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(render_pass.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn destroy_render_pass(&self, render_pass: VkRenderPass) { + unsafe { + self.device_functions + .vkDestroyRenderPass(self.device, render_pass, ptr::null()) + }; + } + + #[inline] + pub fn create_image(&self, create_info: &VkImageCreateInfo) -> Result { + unsafe { + let mut image = MaybeUninit::uninit(); + + let result = self.device_functions.vkCreateImage( + self.device, + create_info, + ptr::null(), + image.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(image.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn destroy_image(&self, image: VkImage) { + unsafe { + self.device_functions + .vkDestroyImage(self.device, image, ptr::null()) + }; + } + + #[inline] + pub fn image_subresource_layout( + &self, + image: VkImage, + subresource: &VkImageSubresource, + ) -> VkSubresourceLayout { + unsafe { + let mut subresource_layout = MaybeUninit::uninit(); + + self.device_functions.vkGetImageSubresourceLayout( + self.device, + image, + subresource, + subresource_layout.as_mut_ptr(), + ); + + subresource_layout.assume_init() + } + } + + #[inline] + pub fn create_image_view(&self, create_info: &VkImageViewCreateInfo) -> Result { + unsafe { + let mut image_view = MaybeUninit::uninit(); + + let result = self.device_functions.vkCreateImageView( + self.device, + create_info, + ptr::null(), + image_view.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(image_view.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn destroy_image_view(&self, image_view: VkImageView) { + unsafe { + self.device_functions + .vkDestroyImageView(self.device, image_view, ptr::null()) + }; + } + + #[inline] + pub fn image_memory_requirements(&self, image: VkImage) -> VkMemoryRequirements { + unsafe { + let mut memory_requirements = MaybeUninit::uninit(); + + self.device_functions.vkGetImageMemoryRequirements( + self.device, + image, + memory_requirements.as_mut_ptr(), + ); + + memory_requirements.assume_init() + } + } + + #[inline] + pub fn image_sparse_memory_requirements( + &self, + image: VkImage, + ) -> Vec { + let mut count: u32 = 0; + + unsafe { + self.device_functions.vkGetImageSparseMemoryRequirements( + self.device, + image, + &mut count, + ptr::null_mut(), + ) + }; + + let mut sparse_memory_requirements = Vec::with_capacity(count as usize); + unsafe { sparse_memory_requirements.set_len(count as usize) }; + + unsafe { + self.device_functions.vkGetImageSparseMemoryRequirements( + self.device, + image, + &mut count, + sparse_memory_requirements.as_mut_ptr(), + ) + }; + + sparse_memory_requirements + } + + #[inline] + pub fn bind_image_memory( + &self, + image: VkImage, + memory: VkDeviceMemory, + offset: VkDeviceSize, + ) -> Result<()> { + unsafe { + let result = + self.device_functions + .vkBindImageMemory(self.device, image, memory, offset); + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub(crate) fn create_sampler_from_manager( + &self, + create_info: VkSamplerCreateInfo, + ) -> Result> { + self.sampler_manager + .lock() + .unwrap() + .create_sampler(create_info, self) + } + + #[inline] + pub fn create_sampler(&self, create_info: &VkSamplerCreateInfo) -> Result { + unsafe { + let mut sampler = MaybeUninit::uninit(); + + let result = self.device_functions.vkCreateSampler( + self.device, + create_info, + ptr::null(), + sampler.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(sampler.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn destroy_sampler(&self, sampler: VkSampler) { + unsafe { + self.device_functions + .vkDestroySampler(self.device, sampler, ptr::null()) + }; + } + + #[inline] + pub fn create_buffer_view(&self, create_info: &VkBufferViewCreateInfo) -> Result { + unsafe { + let mut buffer_view = MaybeUninit::uninit(); + + let result = self.device_functions.vkCreateBufferView( + self.device, + create_info, + ptr::null(), + buffer_view.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(buffer_view.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn destroy_buffer_view(&self, buffer_view: VkBufferView) { + unsafe { + self.device_functions + .vkDestroyBufferView(self.device, buffer_view, ptr::null()) + }; + } + + #[inline] + pub fn create_fence(&self, create_info: &VkFenceCreateInfo) -> Result { + unsafe { + let mut fence = MaybeUninit::uninit(); + + let result = self.device_functions.vkCreateFence( + self.device, + create_info, + ptr::null(), + fence.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(fence.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn destroy_fence(&self, fence: VkFence) { + unsafe { + self.device_functions + .vkDestroyFence(self.device, fence, ptr::null()) + }; + } + + #[inline] + pub fn reset_fences(&self, fences: &[VkFence]) -> Result<()> { + unsafe { + let result = self.device_functions.vkResetFences( + self.device, + fences.len() as u32, + fences.as_ptr(), + ); + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn create_semaphore(&self, create_info: &VkSemaphoreCreateInfo) -> Result { + unsafe { + let mut semaphore = MaybeUninit::uninit(); + + let result = self.device_functions.vkCreateSemaphore( + self.device, + create_info, + ptr::null(), + semaphore.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(semaphore.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn destroy_semaphore(&self, semaphore: VkSemaphore) { + unsafe { + self.device_functions + .vkDestroySemaphore(self.device, semaphore, ptr::null()) + }; + } + + #[inline] + pub fn create_shader_module( + &self, + create_info: &VkShaderModuleCreateInfo, + ) -> Result { + unsafe { + let mut shader_module = MaybeUninit::uninit(); + + let result = self.device_functions.vkCreateShaderModule( + self.device, + create_info, + ptr::null(), + shader_module.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(shader_module.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn destroy_shader_module(&self, shader_module: VkShaderModule) { + unsafe { + self.device_functions + .vkDestroyShaderModule(self.device, shader_module, ptr::null()) + }; + } + + #[inline] + pub fn create_descriptor_pool( + &self, + create_info: &VkDescriptorPoolCreateInfo, + ) -> Result { + unsafe { + let mut descriptor_pool = MaybeUninit::uninit(); + + let result = self.device_functions.vkCreateDescriptorPool( + self.device, + create_info, + ptr::null(), + descriptor_pool.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(descriptor_pool.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn destroy_descriptor_pool(&self, descriptor_pool: VkDescriptorPool) { + unsafe { + self.device_functions + .vkDestroyDescriptorPool(self.device, descriptor_pool, ptr::null()) + }; + } + + #[inline] + pub fn reset_descriptor_pool( + &self, + descriptor_pool: VkDescriptorPool, + flags: T, + ) -> Result<()> + where + T: Into, + { + unsafe { + let result = self.device_functions.vkResetDescriptorPool( + self.device, + descriptor_pool, + flags.into(), + ); + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn create_descriptor_set_layout( + &self, + create_info: &VkDescriptorSetLayoutCreateInfo, + ) -> Result { + unsafe { + let mut descriptor_set_layout = MaybeUninit::uninit(); + + let result = self.device_functions.vkCreateDescriptorSetLayout( + self.device, + create_info, + ptr::null(), + descriptor_set_layout.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(descriptor_set_layout.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn destroy_descriptor_set_layout(&self, descriptor_set_layout: VkDescriptorSetLayout) { + unsafe { + self.device_functions.vkDestroyDescriptorSetLayout( + self.device, + descriptor_set_layout, + ptr::null(), + ) + }; + } + + #[inline] + pub fn allocate_descriptor_sets<'a>( + &self, + allocate_info: &VkDescriptorSetAllocateInfo<'a>, + ) -> Result> { + unsafe { + let count = allocate_info.descriptorSetCount as usize; + + let mut descriptor_sets = Vec::with_capacity(count); + descriptor_sets.set_len(count); + + let result = self.device_functions.vkAllocateDescriptorSets( + self.device, + allocate_info, + descriptor_sets.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(descriptor_sets) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn free_descriptor_sets( + &self, + descriptor_pool: VkDescriptorPool, + descriptor_sets: &[VkDescriptorSet], + ) -> Result<()> { + unsafe { + let result = self.device_functions.vkFreeDescriptorSets( + self.device, + descriptor_pool, + descriptor_sets.len() as u32, + descriptor_sets.as_ptr(), + ); + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn update_descriptor_sets( + &self, + writes: &[VkWriteDescriptorSet], + copies: &[VkCopyDescriptorSet], + ) { + unsafe { + self.device_functions.vkUpdateDescriptorSets( + self.device, + writes.len() as u32, + writes.as_ptr(), + copies.len() as u32, + copies.as_ptr(), + ); + } + } + + #[inline] + pub fn create_event(&self, create_info: &VkEventCreateInfo) -> Result { + unsafe { + let mut event = MaybeUninit::uninit(); + + let result = self.device_functions.vkCreateEvent( + self.device, + create_info, + ptr::null(), + event.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(event.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn destroy_event(&self, event: VkEvent) { + unsafe { + self.device_functions + .vkDestroyEvent(self.device, event, ptr::null()) + }; + } + + #[inline] + pub fn event_status(&self, event: VkEvent) -> Result<()> { + unsafe { + let result = self.device_functions.vkGetEventStatus(self.device, event); + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn set_event(&self, event: VkEvent) -> Result<()> { + unsafe { + let result = self.device_functions.vkSetEvent(self.device, event); + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn reset_event(&self, event: VkEvent) -> Result<()> { + unsafe { + let result = self.device_functions.vkResetEvent(self.device, event); + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn create_command_pool( + &self, + create_info: &VkCommandPoolCreateInfo, + ) -> Result { + unsafe { + let mut command_pool = MaybeUninit::uninit(); + + let result = self.device_functions.vkCreateCommandPool( + self.device, + create_info, + ptr::null(), + command_pool.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(command_pool.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn destroy_command_pool(&self, command_pool: VkCommandPool) { + unsafe { + self.device_functions + .vkDestroyCommandPool(self.device, command_pool, ptr::null()) + }; + } + + #[inline] + pub fn reset_command_pool( + &self, + command_pool: VkCommandPool, + flags: impl Into, + ) -> Result<()> { + unsafe { + let result = + self.device_functions + .vkResetCommandPool(self.device, command_pool, flags.into()); + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn trim_command_pool(&self, command_pool: VkCommandPool, flags: T) + where + T: Into, + { + unsafe { + self.device_functions + .vkTrimCommandPool(self.device, command_pool, flags.into()); + } + } + + #[inline] + pub fn create_framebuffer( + &self, + create_info: &VkFramebufferCreateInfo, + ) -> Result { + unsafe { + let mut framebuffer = MaybeUninit::uninit(); + + let result = self.device_functions.vkCreateFramebuffer( + self.device, + create_info, + ptr::null(), + framebuffer.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(framebuffer.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn destroy_framebuffer(&self, framebuffer: VkFramebuffer) { + unsafe { + self.device_functions + .vkDestroyFramebuffer(self.device, framebuffer, ptr::null()) + }; + } + + #[inline] + pub fn allocate_command_buffers( + &self, + allocate_info: &VkCommandBufferAllocateInfo, + ) -> Result> { + unsafe { + let count = allocate_info.commandBufferCount as usize; + + let mut command_buffers = Vec::with_capacity(count); + command_buffers.set_len(count); + + let result = self.device_functions.vkAllocateCommandBuffers( + self.device, + allocate_info, + command_buffers.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(command_buffers) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn free_command_buffers( + &self, + command_pool: VkCommandPool, + command_buffers: &[VkCommandBuffer], + ) { + unsafe { + self.device_functions.vkFreeCommandBuffers( + self.device, + command_pool, + command_buffers.len() as u32, + command_buffers.as_ptr(), + ) + } + } + + #[inline] + pub fn create_query_pool(&self, create_info: &VkQueryPoolCreateInfo) -> Result { + unsafe { + let mut query_pool = MaybeUninit::uninit(); + + let result = self.device_functions.vkCreateQueryPool( + self.device, + create_info, + ptr::null(), + query_pool.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(query_pool.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn destroy_query_pool(&self, query_pool: VkQueryPool) { + unsafe { + self.device_functions + .vkDestroyQueryPool(self.device, query_pool, ptr::null()) + }; + } + + #[inline] + pub fn create_pipeline_cache( + &self, + create_info: &VkPipelineCacheCreateInfo, + ) -> Result { + unsafe { + let mut pipeline_cache = MaybeUninit::uninit(); + + let result = self.device_functions.vkCreatePipelineCache( + self.device, + create_info, + ptr::null(), + pipeline_cache.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(pipeline_cache.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn destroy_pipeline_cache(&self, pipeline_cache: VkPipelineCache) { + unsafe { + self.device_functions + .vkDestroyPipelineCache(self.device, pipeline_cache, ptr::null()) + }; + } + + #[inline] + pub fn pipeline_cache_data(&self, pipeline_cache: VkPipelineCache) -> Result { + let mut count = 0; + + let result = unsafe { + self.device_functions.vkGetPipelineCacheData( + self.device, + pipeline_cache, + &mut count, + ptr::null_mut(), + ) + }; + + if result != VK_SUCCESS || count != size_of::() { + return Err(anyhow::Error::new(result)); + } + + unsafe { + let mut data = MaybeUninit::::uninit(); + + let result = self.device_functions.vkGetPipelineCacheData( + self.device, + pipeline_cache, + &mut count, + data.as_mut_ptr() as *mut c_void, + ); + + if result == VK_SUCCESS { + Ok(data.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn merge_pipeline_cache( + &self, + sources: &[VkPipelineCache], + destination: VkPipelineCache, + ) -> Result<()> { + unsafe { + let result = self.device_functions.vkMergePipelineCaches( + self.device, + destination, + sources.len() as u32, + sources.as_ptr(), + ); + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn create_pipeline_layout( + &self, + create_info: &VkPipelineLayoutCreateInfo, + ) -> Result { + unsafe { + let mut pipeline_layout = MaybeUninit::uninit(); + + let result = self.device_functions.vkCreatePipelineLayout( + self.device, + create_info, + ptr::null(), + pipeline_layout.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(pipeline_layout.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn destroy_pipeline_layout(&self, pipeline_layout: VkPipelineLayout) { + unsafe { + self.device_functions + .vkDestroyPipelineLayout(self.device, pipeline_layout, ptr::null()) + }; + } + + #[inline] + pub fn create_graphics_pipelines( + &self, + pipeline_cache: Option, + create_infos: &[VkGraphicsPipelineCreateInfo], + ) -> Result> { + unsafe { + let count = create_infos.len() as usize; + + let mut pipelines = Vec::with_capacity(count); + pipelines.set_len(count); + + let result = self.device_functions.vkCreateGraphicsPipelines( + self.device, + match pipeline_cache { + Some(cache) => cache, + None => VkPipelineCache::NULL_HANDLE, + }, + create_infos.len() as u32, + create_infos.as_ptr(), + ptr::null(), + pipelines.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(pipelines) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn create_compute_pipelines( + &self, + pipeline_cache: Option, + create_infos: &[VkComputePipelineCreateInfo], + ) -> Result> { + unsafe { + let count = create_infos.len() as usize; + + let mut pipelines = Vec::with_capacity(count); + pipelines.set_len(count); + + let result = self.device_functions.vkCreateComputePipelines( + self.device, + match pipeline_cache { + Some(cache) => cache, + None => VkPipelineCache::NULL_HANDLE, + }, + create_infos.len() as u32, + create_infos.as_ptr(), + ptr::null(), + pipelines.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(pipelines) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn destroy_pipeline(&self, pipeline: VkPipeline) { + unsafe { + self.device_functions + .vkDestroyPipeline(self.device, pipeline, ptr::null()) + }; + } + + #[inline] + pub fn queue_present( + &self, + queue: VkQueue, + present_info: &VkPresentInfoKHR, + ) -> Result> { + unsafe { + let result = self + .device_wsi_functions + .vkQueuePresentKHR(queue, present_info); + + if result == VK_SUCCESS { + Ok(OutOfDate::Ok(())) + } else if result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR { + Ok(OutOfDate::OutOfDate) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn create_swapchain( + &self, + create_info: &VkSwapchainCreateInfoKHR, + ) -> Result { + unsafe { + let mut swapchain = MaybeUninit::uninit(); + + let result = self.device_wsi_functions.vkCreateSwapchainKHR( + self.device, + create_info, + ptr::null(), + swapchain.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(swapchain.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn destroy_swapchain(&self, swapchain: VkSwapchainKHR) { + unsafe { + self.device_wsi_functions + .vkDestroySwapchainKHR(self.device, swapchain, ptr::null()) + }; + } + + #[inline] + pub fn swapchain_images(&self, swapchain: VkSwapchainKHR) -> Result> { + let mut count = 0; + + let result = unsafe { + self.device_wsi_functions.vkGetSwapchainImagesKHR( + self.device, + swapchain, + &mut count, + ptr::null_mut(), + ) + }; + + if result != VK_SUCCESS { + return Err(anyhow::Error::new(result)); + } + + let mut images = Vec::with_capacity(count as usize); + unsafe { images.set_len(count as usize) }; + + let result = unsafe { + self.device_wsi_functions.vkGetSwapchainImagesKHR( + self.device, + swapchain, + &mut count, + images.as_mut_ptr(), + ) + }; + + if result == VK_SUCCESS { + Ok(images) + } else { + Err(anyhow::Error::new(result)) + } + } + + #[inline] + pub fn acquire_next_image( + &self, + swapchain: VkSwapchainKHR, + timeout: u64, + semaphore: Option, + fence: Option, + ) -> Result> { + unsafe { + let mut image_index = 0; + + let result = self.device_wsi_functions.vkAcquireNextImageKHR( + self.device, + swapchain, + timeout, + match semaphore { + Some(sem) => sem, + None => VkSemaphore::NULL_HANDLE, + }, + match fence { + Some(fence) => fence, + None => VkFence::NULL_HANDLE, + }, + &mut image_index, + ); + + match result { + VK_SUCCESS => Ok(OutOfDate::Ok(image_index)), + VK_ERROR_OUT_OF_DATE_KHR | VK_SUBOPTIMAL_KHR => Ok(OutOfDate::OutOfDate), + VK_TIMEOUT | VK_NOT_READY => Ok(OutOfDate::TimeOut), + _ => Err(anyhow::Error::new(result)), + } + } + } +} + +// command buffer functions +impl Device { + #[inline] + pub fn begin_command_buffer( + &self, + command_buffer: VkCommandBuffer, + begin_info: &VkCommandBufferBeginInfo, + ) -> Result<()> { + unsafe { + if std::mem::transmute::( + self.device_functions.vkBeginCommandBuffer, + ) == ptr::null() + { + return Err(anyhow::anyhow!("vkBeginCommandBuffer is null")); + } + + let result = self + .device_functions + .vkBeginCommandBuffer(command_buffer, begin_info); + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn end_command_buffer(&self, command_buffer: VkCommandBuffer) -> Result<()> { + unsafe { + let result = self.device_functions.vkEndCommandBuffer(command_buffer); + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn reset_command_buffer( + &self, + command_buffer: VkCommandBuffer, + flags: impl Into, + ) -> Result<()> { + unsafe { + let result = self + .device_functions + .vkResetCommandBuffer(command_buffer, flags.into()); + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn cmd_bind_pipeline( + &self, + command_buffer: VkCommandBuffer, + pipeline_bind_point: VkPipelineBindPoint, + pipeline: VkPipeline, + ) { + unsafe { + self.device_functions + .vkCmdBindPipeline(command_buffer, pipeline_bind_point, pipeline); + } + } + + #[inline] + pub fn cmd_resolve_image( + &self, + command_buffer: VkCommandBuffer, + src_image: VkImage, + src_image_layout: VkImageLayout, + dst_image: VkImage, + dst_image_layout: VkImageLayout, + regions: &[VkImageResolve], + ) { + unsafe { + self.device_functions.vkCmdResolveImage( + command_buffer, + src_image, + src_image_layout, + dst_image, + dst_image_layout, + regions.len() as u32, + regions.as_ptr(), + ) + } + } + + #[inline] + pub fn cmd_set_viewport( + &self, + command_buffer: VkCommandBuffer, + first: u32, + viewports: &[VkViewport], + ) { + unsafe { + self.device_functions.vkCmdSetViewport( + command_buffer, + first, + viewports.len() as u32, + viewports.as_ptr(), + ) + } + } + + #[inline] + pub fn cmd_set_scissor( + &self, + command_buffer: VkCommandBuffer, + first: u32, + scissors: &[VkRect2D], + ) { + unsafe { + self.device_functions.vkCmdSetScissor( + command_buffer, + first, + scissors.len() as u32, + scissors.as_ptr(), + ) + } + } + + #[inline] + pub fn cmd_set_depth_bias( + &self, + command_buffer: VkCommandBuffer, + depth_bias_constant_factor: f32, + depth_bias_clamp: f32, + depth_bias_slope_factor: f32, + ) { + unsafe { + self.device_functions.vkCmdSetDepthBias( + command_buffer, + depth_bias_constant_factor, + depth_bias_clamp, + depth_bias_slope_factor, + ) + } + } + + #[inline] + pub fn cmd_bind_descriptor_sets( + &self, + command_buffer: VkCommandBuffer, + pipeline_bind_point: VkPipelineBindPoint, + pipeline_layout: VkPipelineLayout, + first_set: u32, + descriptor_sets: &[VkDescriptorSet], + dynamic_offsets: &[u32], + ) { + unsafe { + self.device_functions.vkCmdBindDescriptorSets( + command_buffer, + pipeline_bind_point, + pipeline_layout, + first_set, + descriptor_sets.len() as u32, + descriptor_sets.as_ptr(), + dynamic_offsets.len() as u32, + dynamic_offsets.as_ptr(), + ) + } + } + + #[inline] + pub fn cmd_bind_index_buffer( + &self, + command_buffer: VkCommandBuffer, + buffer: VkBuffer, + offset: VkDeviceSize, + index_type: VkIndexType, + ) { + unsafe { + self.device_functions + .vkCmdBindIndexBuffer(command_buffer, buffer, offset, index_type) + } + } + + #[inline] + pub fn cmd_bind_vertex_buffers( + &self, + command_buffer: VkCommandBuffer, + first_binding: u32, + buffers: &[VkBuffer], + offsets: &[VkDeviceSize], + ) { + // sanity check + debug_assert!(buffers.len() == offsets.len()); + + unsafe { + self.device_functions.vkCmdBindVertexBuffers( + command_buffer, + first_binding, + buffers.len() as u32, + buffers.as_ptr(), + offsets.as_ptr(), + ) + } + } + + #[inline] + pub fn cmd_draw( + &self, + command_buffer: VkCommandBuffer, + vertex_count: u32, + instance_count: u32, + first_vertex: u32, + first_instance: u32, + ) { + unsafe { + self.device_functions.vkCmdDraw( + command_buffer, + vertex_count, + instance_count, + first_vertex, + first_instance, + ) + } + } + + #[inline] + pub fn cmd_draw_indexed( + &self, + command_buffer: VkCommandBuffer, + index_count: u32, + instance_count: u32, + first_index: u32, + vertex_offset: i32, + first_instance: u32, + ) { + unsafe { + self.device_functions.vkCmdDrawIndexed( + command_buffer, + index_count, + instance_count, + first_index, + vertex_offset, + first_instance, + ); + } + } + + #[inline] + pub fn cmd_dispatch(&self, command_buffer: VkCommandBuffer, x: u32, y: u32, z: u32) { + unsafe { self.device_functions.vkCmdDispatch(command_buffer, x, y, z) } + } + + #[inline] + pub fn cmd_begin_render_pass( + &self, + command_buffer: VkCommandBuffer, + render_pass_begin: &VkRenderPassBeginInfo, + contents: VkSubpassContents, + ) { + unsafe { + self.device_functions + .vkCmdBeginRenderPass(command_buffer, render_pass_begin, contents) + } + } + + #[inline] + pub fn cmd_next_subpass(&self, command_buffer: VkCommandBuffer, contents: VkSubpassContents) { + unsafe { + self.device_functions + .vkCmdNextSubpass(command_buffer, contents) + } + } + + #[inline] + pub fn cmd_end_render_pass(&self, command_buffer: VkCommandBuffer) { + unsafe { self.device_functions.vkCmdEndRenderPass(command_buffer) } + } + + #[inline] + pub fn cmd_execute_commands( + &self, + command_buffer: VkCommandBuffer, + command_buffers: &[VkCommandBuffer], + ) { + unsafe { + self.device_functions.vkCmdExecuteCommands( + command_buffer, + command_buffers.len() as u32, + command_buffers.as_ptr(), + ) + } + } + + #[inline] + pub fn cmd_pipeline_barrier( + &self, + command_buffer: VkCommandBuffer, + src_stage_mask: impl Into, + dst_stage_mask: impl Into, + dependency_flags: impl Into, + memory_barriers: &[VkMemoryBarrier], + buffer_memory_barriers: &[VkBufferMemoryBarrier], + image_memory_barriers: &[VkImageMemoryBarrier], + ) { + unsafe { + self.device_functions.vkCmdPipelineBarrier( + command_buffer, + src_stage_mask.into(), + dst_stage_mask.into(), + dependency_flags.into(), + memory_barriers.len() as u32, + memory_barriers.as_ptr(), + buffer_memory_barriers.len() as u32, + buffer_memory_barriers.as_ptr(), + image_memory_barriers.len() as u32, + image_memory_barriers.as_ptr(), + ) + } + } + + #[inline] + pub fn cmd_copy_buffer( + &self, + command_buffer: VkCommandBuffer, + src_buffer: VkBuffer, + dst_buffer: VkBuffer, + regions: &[VkBufferCopy], + ) { + unsafe { + self.device_functions.vkCmdCopyBuffer( + command_buffer, + src_buffer, + dst_buffer, + regions.len() as u32, + regions.as_ptr(), + ) + } + } + + #[inline] + pub fn cmd_copy_image( + &self, + command_buffer: VkCommandBuffer, + src_image: VkImage, + src_image_layout: VkImageLayout, + dst_image: VkImage, + dst_image_layout: VkImageLayout, + regions: &[VkImageCopy], + ) { + unsafe { + self.device_functions.vkCmdCopyImage( + command_buffer, + src_image, + src_image_layout, + dst_image, + dst_image_layout, + regions.len() as u32, + regions.as_ptr(), + ) + } + } + + #[inline] + pub fn cmd_blit_image( + &self, + command_buffer: VkCommandBuffer, + src_image: VkImage, + src_image_layout: VkImageLayout, + dst_image: VkImage, + dst_image_layout: VkImageLayout, + regions: &[VkImageBlit], + filter: VkFilter, + ) { + unsafe { + self.device_functions.vkCmdBlitImage( + command_buffer, + src_image, + src_image_layout, + dst_image, + dst_image_layout, + regions.len() as u32, + regions.as_ptr(), + filter, + ) + } + } + + #[inline] + pub fn cmd_copy_buffer_to_image( + &self, + command_buffer: VkCommandBuffer, + src_buffer: VkBuffer, + dst_image: VkImage, + dst_image_layout: VkImageLayout, + regions: &[VkBufferImageCopy], + ) { + unsafe { + self.device_functions.vkCmdCopyBufferToImage( + command_buffer, + src_buffer, + dst_image, + dst_image_layout, + regions.len() as u32, + regions.as_ptr(), + ) + } + } + + #[inline] + pub fn cmd_copy_image_to_buffer( + &self, + command_buffer: VkCommandBuffer, + src_image: VkImage, + src_image_layout: VkImageLayout, + dst_buffer: VkBuffer, + regions: &[VkBufferImageCopy], + ) { + unsafe { + self.device_functions.vkCmdCopyImageToBuffer( + command_buffer, + src_image, + src_image_layout, + dst_buffer, + regions.len() as u32, + regions.as_ptr(), + ) + } + } + + #[inline] + pub fn cmd_push_constants( + &self, + command_buffer: VkCommandBuffer, + pipeline_layout: VkPipelineLayout, + stage_flags: impl Into, + offset: u32, + data: &T, + ) { + unsafe { + self.device_functions.vkCmdPushConstants( + command_buffer, + pipeline_layout, + stage_flags.into(), + offset, + size_of::() as u32, + data as *const T as *const c_void, + ) + } + } + + #[inline] + pub fn cmd_begin_query( + &self, + command_buffer: VkCommandBuffer, + query_pool: VkQueryPool, + query: u32, + flags: impl Into, + ) { + unsafe { + self.device_functions + .vkCmdBeginQuery(command_buffer, query_pool, query, flags.into()) + } + } + + #[inline] + pub fn cmd_end_query( + &self, + command_buffer: VkCommandBuffer, + query_pool: VkQueryPool, + query: u32, + ) { + unsafe { + self.device_functions + .vkCmdEndQuery(command_buffer, query_pool, query) + } + } + + #[inline] + pub fn cmd_reset_query_pool( + &self, + command_buffer: VkCommandBuffer, + query_pool: VkQueryPool, + first_query: u32, + query_count: u32, + ) { + unsafe { + self.device_functions.vkCmdResetQueryPool( + command_buffer, + query_pool, + first_query, + query_count, + ) + } + } + + #[inline] + pub fn cmd_write_timestamp( + &self, + command_buffer: VkCommandBuffer, + pipeline_stage: impl Into, + query_pool: VkQueryPool, + query: u32, + ) { + unsafe { + self.device_functions.vkCmdWriteTimestamp( + command_buffer, + pipeline_stage.into(), + query_pool, + query, + ) + } + } + + #[inline] + pub fn cmd_clear_color_image( + &self, + command_buffer: VkCommandBuffer, + image: VkImage, + image_layout: VkImageLayout, + clear_color: VkClearColorValue, + ranges: &[VkImageSubresourceRange], + ) { + unsafe { + self.device_functions.vkCmdClearColorImage( + command_buffer, + image, + image_layout, + &clear_color, + ranges.len() as u32, + ranges.as_ptr(), + ) + } + } + + #[inline] + pub fn descriptor_set_layout_support( + &self, + create_info: &VkDescriptorSetLayoutCreateInfo, + support: &mut VkDescriptorSetLayoutSupport, + ) { + unsafe { + self.maintenance3_functions.vkGetDescriptorSetLayoutSupport( + self.device, + create_info, + support, + ); + } + } +} + +// khr ray tracing pipeline & acceleration structure +impl Device { + #[inline] + pub fn build_acceleration_structures( + &self, + deferred_operation: Option, + infos: &[VkAccelerationStructureBuildGeometryInfoKHR], + range_infos: &[&VkAccelerationStructureBuildRangeInfoKHR], + ) -> Result<()> { + debug_assert_eq!( + self.physical_device + .acceleration_structure_features() + .accelerationStructure, + VK_TRUE + ); + + let result = unsafe { + self._acceleration_structure_functions + .vkBuildAccelerationStructuresKHR( + self.device, + match deferred_operation { + Some(deferred_operation) => deferred_operation, + None => VkDeferredOperationKHR::NULL_HANDLE, + }, + infos.len() as u32, + infos.as_ptr(), + range_infos.as_ptr() as *const *const _, + ) + }; + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::new(result)) + } + } + + #[inline] + pub fn cmd_build_acceleration_structure_indirect( + &self, + command_buffer: VkCommandBuffer, + infos: &[VkAccelerationStructureBuildGeometryInfoKHR], + device_addresses: &[VkDeviceAddress], + strides: &[u32], + max_primitive_counts: &[&u32], + ) { + debug_assert_eq!( + self.physical_device + .acceleration_structure_features() + .accelerationStructure, + VK_TRUE + ); + + let count = infos.len(); + + debug_assert_eq!(infos.len(), count); + debug_assert_eq!(device_addresses.len(), count); + debug_assert_eq!(strides.len(), count); + debug_assert_eq!(max_primitive_counts.len(), count); + + unsafe { + self._acceleration_structure_functions + .vkCmdBuildAccelerationStructuresIndirectKHR( + command_buffer, + count as u32, + infos.as_ptr(), + device_addresses.as_ptr(), + strides.as_ptr(), + max_primitive_counts.as_ptr() as *const *const u32, + ) + } + } + + #[inline] + pub fn cmd_build_acceleration_structures( + &self, + command_buffer: VkCommandBuffer, + infos: &[VkAccelerationStructureBuildGeometryInfoKHR], + range_infos: &[&[VkAccelerationStructureBuildRangeInfoKHR]], + ) { + debug_assert_eq!( + self.physical_device + .acceleration_structure_features() + .accelerationStructure, + VK_TRUE + ); + + let range_info_ptr = range_infos + .iter() + .map(|slice| slice.as_ptr()) + .collect::>(); + + unsafe { + self._acceleration_structure_functions + .vkCmdBuildAccelerationStructuresKHR( + command_buffer, + infos.len() as u32, + infos.as_ptr(), + range_info_ptr.as_ptr(), + ) + } + } + + #[inline] + pub fn cmd_copy_acceleration_structure( + &self, + command_buffer: VkCommandBuffer, + info: &VkCopyAccelerationStructureInfoKHR, + ) { + debug_assert_eq!( + self.physical_device + .acceleration_structure_features() + .accelerationStructure, + VK_TRUE + ); + + unsafe { + self._acceleration_structure_functions + .vkCmdCopyAccelerationStructureKHR(command_buffer, info) + } + } + + #[inline] + pub fn cmd_copy_acceleration_structure_to_memory( + &self, + command_buffer: VkCommandBuffer, + info: &VkCopyAccelerationStructureToMemoryInfoKHR, + ) { + debug_assert_eq!( + self.physical_device + .acceleration_structure_features() + .accelerationStructure, + VK_TRUE + ); + + unsafe { + self._acceleration_structure_functions + .vkCmdCopyAccelerationStructureToMemoryKHR(command_buffer, info) + } + } + + #[inline] + pub fn cmd_copy_memory_to_acceleration_structure( + &self, + command_buffer: VkCommandBuffer, + info: &VkCopyMemoryToAccelerationStructureInfoKHR, + ) { + debug_assert_eq!( + self.physical_device + .acceleration_structure_features() + .accelerationStructure, + VK_TRUE + ); + + unsafe { + self._acceleration_structure_functions + .vkCmdCopyMemoryToAccelerationStructureKHR(command_buffer, info) + } + } + + #[inline] + pub fn cmd_trace_rays_indirect( + &self, + command_buffer: VkCommandBuffer, + raygen_shader_binding_table: &VkStridedDeviceAddressRegionKHR, + miss_shader_binding_table: &VkStridedDeviceAddressRegionKHR, + hit_shader_binding_table: &VkStridedDeviceAddressRegionKHR, + callable_shader_binding_table: &VkStridedDeviceAddressRegionKHR, + device_address: VkDeviceAddress, + ) { + debug_assert_eq!( + self.physical_device + .ray_tracing_features() + .rayTracingPipeline, + VK_TRUE + ); + + unsafe { + self._ray_tracing_pipeline_functions + .vkCmdTraceRaysIndirectKHR( + command_buffer, + raygen_shader_binding_table, + miss_shader_binding_table, + hit_shader_binding_table, + callable_shader_binding_table, + device_address, + ) + } + } + + #[inline] + pub fn cmd_trace_rays( + &self, + command_buffer: VkCommandBuffer, + raygen_shader_binding_table: &VkStridedDeviceAddressRegionKHR, + miss_shader_binding_table: &VkStridedDeviceAddressRegionKHR, + hit_shader_binding_table: &VkStridedDeviceAddressRegionKHR, + callable_shader_binding_table: &VkStridedDeviceAddressRegionKHR, + width: u32, + height: u32, + depth: u32, + ) { + debug_assert_eq!( + self.physical_device + .ray_tracing_features() + .rayTracingPipeline, + VK_TRUE + ); + + unsafe { + self._ray_tracing_pipeline_functions.vkCmdTraceRaysKHR( + command_buffer, + raygen_shader_binding_table, + miss_shader_binding_table, + hit_shader_binding_table, + callable_shader_binding_table, + width, + height, + depth, + ) + } + } + + #[inline] + pub fn cmd_write_acceleration_structure_properties( + &self, + command_buffer: VkCommandBuffer, + acceleration_structures: &[VkAccelerationStructureKHR], + query_type: VkQueryType, + query_pool: VkQueryPool, + first_query: u32, + ) { + debug_assert_eq!( + self.physical_device + .acceleration_structure_features() + .accelerationStructure, + VK_TRUE + ); + + unsafe { + self._acceleration_structure_functions + .vkCmdWriteAccelerationStructuresPropertiesKHR( + command_buffer, + acceleration_structures.len() as u32, + acceleration_structures.as_ptr(), + query_type, + query_pool, + first_query, + ) + } + } + + #[inline] + pub fn create_acceleration_structure( + &self, + create_info: &VkAccelerationStructureCreateInfoKHR, + allocator: Option<&VkAllocationCallbacks>, + ) -> Result { + debug_assert_eq!( + self.physical_device + .acceleration_structure_features() + .accelerationStructure, + VK_TRUE + ); + + unsafe { + let mut handle = MaybeUninit::uninit(); + + let result = self + ._acceleration_structure_functions + .vkCreateAccelerationStructureKHR( + self.device, + create_info, + match allocator { + Some(alloc) => alloc, + None => ptr::null(), + }, + handle.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(handle.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn create_ray_tracing_pipelines( + &self, + deferred_operation: Option, + pipeline_cache: Option, + pipeline_create_infos: &[VkRayTracingPipelineCreateInfoKHR], + allocator: Option<&VkAllocationCallbacks>, + ) -> Result> { + debug_assert_eq!( + self.physical_device + .ray_tracing_features() + .rayTracingPipeline, + VK_TRUE + ); + + unsafe { + let count = pipeline_create_infos.len() as usize; + + let mut pipelines = Vec::with_capacity(count); + pipelines.set_len(count); + + let result = self + ._ray_tracing_pipeline_functions + .vkCreateRayTracingPipelinesKHR( + self.device, + match deferred_operation { + Some(deferred_operation) => deferred_operation, + None => VkDeferredOperationKHR::NULL_HANDLE, + }, + match pipeline_cache { + Some(cache) => cache, + None => VkPipelineCache::NULL_HANDLE, + }, + pipeline_create_infos.len() as u32, + pipeline_create_infos.as_ptr(), + match allocator { + Some(alloc) => alloc, + None => ptr::null(), + }, + pipelines.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(pipelines) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn copy_acceleration_structure( + &self, + deferred_operation: Option, + info: &VkCopyAccelerationStructureInfoKHR, + ) -> Result<()> { + debug_assert_eq!( + self.physical_device + .acceleration_structure_features() + .accelerationStructure, + VK_TRUE + ); + + let result = unsafe { + self._acceleration_structure_functions + .vkCopyAccelerationStructureKHR( + self.device, + match deferred_operation { + Some(deferred_operation) => deferred_operation, + None => VkDeferredOperationKHR::NULL_HANDLE, + }, + info, + ) + }; + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::new(result)) + } + } + + #[inline] + pub fn copy_acceleration_structure_to_memory( + &self, + deferred_operation: Option, + info: &VkCopyAccelerationStructureToMemoryInfoKHR, + ) -> Result<()> { + debug_assert_eq!( + self.physical_device + .acceleration_structure_features() + .accelerationStructure, + VK_TRUE + ); + + let result = unsafe { + self._acceleration_structure_functions + .vkCopyAccelerationStructureToMemoryKHR( + self.device, + match deferred_operation { + Some(deferred_operation) => deferred_operation, + None => VkDeferredOperationKHR::NULL_HANDLE, + }, + info, + ) + }; + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::new(result)) + } + } + + #[inline] + pub fn destroy_acceleration_structure( + &self, + acceleration_structure: VkAccelerationStructureKHR, + allocator: Option<&VkAllocationCallbacks>, + ) { + debug_assert_eq!( + self.physical_device + .acceleration_structure_features() + .accelerationStructure, + VK_TRUE + ); + + unsafe { + self._acceleration_structure_functions + .vkDestroyAccelerationStructureKHR( + self.device, + acceleration_structure, + match allocator { + Some(alloc) => alloc, + None => ptr::null(), + }, + ) + } + } + + #[inline] + pub fn get_acceleration_structure_device_address( + &self, + info: &VkAccelerationStructureDeviceAddressInfoKHR, + ) -> VkDeviceAddress { + debug_assert_eq!( + self.physical_device + .acceleration_structure_features() + .accelerationStructure, + VK_TRUE + ); + + unsafe { + self._acceleration_structure_functions + .vkGetAccelerationStructureDeviceAddressKHR(self.device, info) + } + } + + #[inline] + pub fn get_device_acceleration_structure_compatibility( + &self, + version: &VkAccelerationStructureVersionInfoKHR, + ) -> VkAccelerationStructureCompatibilityKHR { + debug_assert_eq!( + self.physical_device + .acceleration_structure_features() + .accelerationStructure, + VK_TRUE + ); + + unsafe { + let mut compatibility = MaybeUninit::zeroed(); + + self._acceleration_structure_functions + .vkGetDeviceAccelerationStructureCompatibilityKHR( + self.device, + version, + compatibility.as_mut_ptr(), + ); + + compatibility.assume_init() + } + } + + #[inline] + pub fn get_ray_tracing_capture_replay_shader_group_handles( + &self, + pipeline: VkPipeline, + first_group: u32, + group_count: u32, + data: &mut [T], + ) -> Result<()> { + debug_assert_eq!( + self.physical_device + .ray_tracing_features() + .rayTracingPipeline, + VK_TRUE + ); + + let result = unsafe { + self._ray_tracing_pipeline_functions + .vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( + self.device, + pipeline, + first_group, + group_count, + (data.len() * size_of::()) as isize, + data.as_mut_ptr() as *mut c_void, + ) + }; + + if result == VK_SUCCESS { + Ok(()) + } else { + Err(anyhow::Error::new(result)) + } + } + + pub fn get_acceleration_structure_build_sizes( + &self, + build_type: VkAccelerationStructureBuildTypeKHR, + build_info: &VkAccelerationStructureBuildGeometryInfoKHR, + max_primitive_counts: &u32, + ) -> VkAccelerationStructureBuildSizesInfoKHR { + debug_assert_eq!( + self.physical_device + .acceleration_structure_features() + .accelerationStructure, + VK_TRUE + ); + + unsafe { + let mut res = VkAccelerationStructureBuildSizesInfoKHR::new(0, 0, 0); + + self._acceleration_structure_functions + .vkGetAccelerationStructureBuildSizesKHR( + self.device, + build_type, + build_info, + max_primitive_counts, + &mut res, + ); + + res + } + } + + pub fn get_ray_tracing_shader_group_handles( + &self, + pipeline: VkPipeline, + first_group: u32, + group_count: u32, + shader_group_handle_size: u32, + ) -> Result> { + debug_assert_eq!( + self.physical_device + .ray_tracing_features() + .rayTracingPipeline, + VK_TRUE + ); + + unsafe { + let mut data = vec![255; (group_count * shader_group_handle_size) as usize]; + + let result = self + ._ray_tracing_pipeline_functions + .vkGetRayTracingShaderGroupHandlesKHR( + self.device, + pipeline, + first_group, + group_count, + data.len() as isize, + data.as_mut_ptr() as *mut c_void, + ); + + if result == VK_SUCCESS { + Ok(data) + } else { + Err(anyhow::Error::new(result)) + } + } + } +} + +// deferred operations +impl Device { + #[inline] + pub fn create_deferred_operation( + &self, + allocator: Option<&VkAllocationCallbacks>, + ) -> Result { + unsafe { + let mut handle = MaybeUninit::uninit(); + + let result = self + .deferred_operation_functions + .vkCreateDeferredOperationKHR( + self.device, + match allocator { + Some(alloc) => alloc, + None => ptr::null(), + }, + handle.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(handle.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn destroy_deferred_operation( + &self, + deferred_operation: VkDeferredOperationKHR, + allocator: Option<&VkAllocationCallbacks>, + ) { + unsafe { + self.deferred_operation_functions + .vkDestroyDeferredOperationKHR( + self.device, + deferred_operation, + match allocator { + Some(alloc) => alloc, + None => ptr::null(), + }, + ) + } + } + + #[inline] + pub fn get_deferred_operation_max_concurrency( + &self, + deferred_operation: VkDeferredOperationKHR, + ) -> u32 { + unsafe { + self.deferred_operation_functions + .vkGetDeferredOperationMaxConcurrencyKHR(self.device, deferred_operation) + } + } + + #[inline] + pub fn get_deferred_operation_result( + &self, + deferred_operation: VkDeferredOperationKHR, + ) -> VkResult { + unsafe { + self.deferred_operation_functions + .vkGetDeferredOperationResultKHR(self.device, deferred_operation) + } + } + + #[inline] + pub fn deferred_operation_join(&self, deferred_operation: VkDeferredOperationKHR) -> VkResult { + unsafe { + self.deferred_operation_functions + .vkDeferredOperationJoinKHR(self.device, deferred_operation) + } + } +} diff --git a/vulkan-rs/src/fence.rs b/vulkan-rs/src/fence.rs new file mode 100644 index 0000000..13f7207 --- /dev/null +++ b/vulkan-rs/src/fence.rs @@ -0,0 +1,88 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std::{sync::Arc, time::Duration}; + +pub struct FenceBuilder { + signaled: bool, +} + +impl FenceBuilder { + pub fn set_signaled(mut self, signaled: bool) -> Self { + self.signaled = signaled; + + self + } + + pub fn build(self, device: Arc) -> Result> { + let flag: VkFenceCreateFlagBits = if self.signaled { + VK_FENCE_CREATE_SIGNALED_BIT.into() + } else { + 0u32.into() + }; + + let fence_ci = VkFenceCreateInfo::new(flag); + + let fence = device.create_fence(&fence_ci)?; + + Ok(Arc::new(Fence { device, fence })) + } +} + +#[derive(Debug)] +pub struct Fence { + device: Arc, + fence: VkFence, +} + +impl Fence { + pub fn builder() -> FenceBuilder { + FenceBuilder { signaled: false } + } + + pub fn wait(self: &Arc, timeout: Duration) -> Result<()> { + self.device.wait_for_fences(&[self], true, timeout) + } + + pub fn reset(&self) -> bool { + self.device.reset_fences(&[self.fence]).is_ok() + } +} + +impl VulkanDevice for Fence { + fn device(&self) -> &Arc { + &self.device + } +} + +impl_vk_handle!(Fence, VkFence, fence); + +impl Drop for Fence { + fn drop(&mut self) { + self.device.destroy_fence(self.fence); + } +} + +use crate::{ffi::*, handle_ffi_result}; + +#[no_mangle] +pub extern "C" fn create_fence(signaled: bool, device: *const Device) -> *const Fence { + let device = unsafe { Arc::from_raw(device) }; + + let fence_res = Fence::builder().set_signaled(signaled).build(device); + + handle_ffi_result!(fence_res) +} + +#[no_mangle] +pub extern "C" fn reset_fence(fence: *const Fence) -> bool { + let fence = unsafe { Arc::from_raw(fence) }; + + fence.reset() +} + +#[no_mangle] +pub extern "C" fn destroy_fence(fence: *const Fence) { + let _fence = unsafe { Arc::from_raw(fence) }; +} diff --git a/vulkan-rs/src/ffi.rs b/vulkan-rs/src/ffi.rs new file mode 100644 index 0000000..cc4ffc3 --- /dev/null +++ b/vulkan-rs/src/ffi.rs @@ -0,0 +1,70 @@ +use std::cell::RefCell; +use std::os::raw::{c_char, c_int}; + +#[macro_export] +macro_rules! handle_ffi_result { + ($result: expr) => { + match $result { + Ok(value) => Arc::into_raw(value), + Err(error) => { + update_last_error(error); + + std::ptr::null() + } + } + }; +} + +thread_local! { + static LAST_ERROR:RefCell>> = RefCell::new(None); +} + +pub(crate) fn update_last_error(err: anyhow::Error) { + LAST_ERROR.with(|prev| { + *prev.borrow_mut() = Some(Box::new(format!("{:?}", err))); + }); +} + +pub(crate) fn take_last_error() -> Option> { + LAST_ERROR.with(|prev| prev.borrow_mut().take()) +} + +#[no_mangle] +pub extern "C" fn last_error_length() -> c_int { + LAST_ERROR.with(|prev| match *prev.borrow() { + Some(ref err) => err.to_string().len() as c_int + 1, + None => 0, + }) +} + +#[no_mangle] +pub unsafe extern "C" fn last_error_message(buffer: *mut c_char, length: c_int) -> c_int { + if buffer.is_null() { + return -1; + } + + let last_error = match take_last_error() { + Some(err) => err, + None => return 0, + }; + + let error_message = last_error.to_string(); + + let buffer = std::slice::from_raw_parts_mut(buffer as *mut u8, length as usize); + + if error_message.len() >= buffer.len() { + return -1; + } + + std::ptr::copy_nonoverlapping( + error_message.as_ptr(), + buffer.as_mut_ptr(), + error_message.len(), + ); + + // Add a trailing null so people using the string as a `char *` don't + // accidentally read into garbage. + buffer[error_message.len()] = 0; + + error_message.len() as c_int +} diff --git a/vulkan-rs/src/framebuffer.rs b/vulkan-rs/src/framebuffer.rs new file mode 100644 index 0000000..93ff619 --- /dev/null +++ b/vulkan-rs/src/framebuffer.rs @@ -0,0 +1,145 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std::sync::Arc; + +pub struct FramebufferBuilder<'a> { + render_pass: Option<&'a Arc>, + attachments: Vec<&'a Arc>, + width: u32, + height: u32, + layers: u32, +} + +impl<'a> FramebufferBuilder<'a> { + pub fn set_render_pass(mut self, render_pass: &'a Arc) -> Self { + self.render_pass = Some(render_pass); + + self + } + + pub fn add_attachment(mut self, image: &'a Arc) -> Self { + self.attachments.push(image); + + self + } + + pub fn set_width(mut self, width: u32) -> Self { + self.width = width; + + self + } + + pub fn set_height(mut self, height: u32) -> Self { + self.height = height; + + self + } + + pub fn set_layer_count(mut self, layers: u32) -> Self { + self.layers = layers; + + self + } + + pub fn build(mut self, device: Arc) -> Result> { + if self.attachments.is_empty() { + panic!("no attachments added!"); + } + + // if width or height are not set, use first attachment as reference + // may not work, if images have different sizes + if self.width == 0 || self.height == 0 { + self.width = self.attachments[0].width(); + self.height = self.attachments[0].height(); + } + + let mut image_views = Vec::with_capacity(self.attachments.len()); + let mut images = Vec::with_capacity(self.attachments.len()); + + for attachment in self.attachments { + image_views.push(attachment.vk_handle()); + images.push(attachment.clone()); + } + + let framebuffer_ci = VkFramebufferCreateInfo::new( + VK_FRAMEBUFFER_CREATE_NULL_BIT, + match self.render_pass { + Some(render_pass) => render_pass.vk_handle(), + None => panic!("no render pass set!"), + }, + &image_views, + self.width, + self.height, + self.layers, + ); + + let framebuffer = device.create_framebuffer(&framebuffer_ci)?; + + Ok(Arc::new(Framebuffer { + device, + framebuffer, + images, + + width: self.width, + height: self.height, + })) + } +} + +#[derive(Debug)] +pub struct Framebuffer { + device: Arc, + framebuffer: VkFramebuffer, + images: Vec>, + + width: u32, + height: u32, +} + +impl Framebuffer { + pub fn builder<'a>() -> FramebufferBuilder<'a> { + FramebufferBuilder { + render_pass: None, + attachments: Vec::new(), + width: 0, + height: 0, + layers: 1, + } + } + + pub fn width(&self) -> u32 { + self.width + } + + pub fn height(&self) -> u32 { + self.height + } + + pub fn attachments(&self) -> &[Arc] { + &self.images + } + + pub fn image(&self, index: usize) -> &Arc { + &self.images[index] + } + + pub fn image_count(&self) -> usize { + self.images.len() + } +} + +impl VulkanDevice for Framebuffer { + fn device(&self) -> &Arc { + &self.device + } +} + +impl_vk_handle!(Framebuffer, VkFramebuffer, framebuffer); + +impl Drop for Framebuffer { + fn drop(&mut self) { + self.device.destroy_framebuffer(self.framebuffer); + } +} diff --git a/vulkan-rs/src/image.rs b/vulkan-rs/src/image.rs new file mode 100644 index 0000000..5337176 --- /dev/null +++ b/vulkan-rs/src/image.rs @@ -0,0 +1,1163 @@ +use assetpath::AssetPath; + +use crate::prelude::*; + +use anyhow::Result; + +use std::cmp; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +enum ImageSourceType { + Empty, + Raw(Vec), + Array(Vec>), +} + +struct ImageCreateInfo { + vk_image_create_info: VkImageCreateInfo, + + source_type: ImageSourceType, +} + +impl ImageCreateInfo { + fn default(source_type: ImageSourceType) -> Self { + ImageCreateInfo { + vk_image_create_info: VkImageCreateInfo::new( + 0, + VK_IMAGE_TYPE_2D, + VK_FORMAT_UNDEFINED, + VkExtent3D { + width: 0, + height: 0, + depth: 0, + }, + 1, + 1, + VK_SAMPLE_COUNT_1_BIT, + VK_IMAGE_TILING_OPTIMAL, + VK_IMAGE_USAGE_TRANSFER_DST_BIT, + VK_SHARING_MODE_EXCLUSIVE, + &[], + VK_IMAGE_LAYOUT_UNDEFINED, + ), + + source_type, + } + } +} + +struct PreinitializedImage { + image: VkImage, + format: VkFormat, + + width: u32, + height: u32, + + layers: u32, + sample_count: VkSampleCountFlagBits, + layout: VkImageLayout, + usage: VkImageUsageFlagBits, + assume_layout: bool, +} + +enum ImageBuilderInternalType { + PreinitializedImage(PreinitializedImage), + NewImage(ImageCreateInfo), +} + +/// Implements the builder pattern for Image +pub struct ImageBuilder { + file_name: Option, + builder_type: ImageBuilderInternalType, + components: VkComponentMapping, + view_type: VkImageViewType, + subresource_range: VkImageSubresourceRange, + + sampler: Option>, +} + +impl ImageBuilder { + /// Sets up the ImageBuilder for further use + fn new(internal_type: ImageBuilderInternalType) -> Self { + ImageBuilder { + file_name: None, + builder_type: internal_type, + components: VkComponentMapping::default(), + subresource_range: VkImageSubresourceRange { + aspectMask: VK_IMAGE_ASPECT_COLOR_BIT.into(), + baseMipLevel: 0, + levelCount: 1, + baseArrayLayer: 0, + layerCount: 1, + }, + view_type: VK_IMAGE_VIEW_TYPE_2D, + + sampler: None, + } + } + + pub fn build(self, device: &Arc, queue: &Arc>) -> Result> { + let mut image_view_ci = self.vk_image_view_create_info(); + + match self.builder_type { + ImageBuilderInternalType::PreinitializedImage(preinitialized_image) => { + image_view_ci.image = preinitialized_image.image; + + let image_view = device.create_image_view(&image_view_ci)?; + + let image = Arc::new(Image { + device: device.clone(), + queue: queue.clone(), + + image: preinitialized_image.image, + image_view, + _memory: None, + attached: true, + sampler: self.sampler, + + file_name: self.file_name, + + format: preinitialized_image.format, + image_layout: Mutex::new(VK_IMAGE_LAYOUT_UNDEFINED), + + aspect_mask: self.subresource_range.aspectMask, + + width: preinitialized_image.width, + height: preinitialized_image.height, + layers: preinitialized_image.layers, + levels: 1, + sample_count: preinitialized_image.sample_count, + _usage: preinitialized_image.usage, + }); + + // TODO: check necessity + if !preinitialized_image.assume_layout { + if preinitialized_image.layout != VK_IMAGE_LAYOUT_UNDEFINED { + Image::convert_layout(&image, preinitialized_image.layout)?; + } + } else { + *image.image_layout.lock().unwrap() = preinitialized_image.layout; + } + + Ok(image) + } + ImageBuilderInternalType::NewImage(ref info) => match info.source_type { + ImageSourceType::Array(ref array) => { + let arc_image = Self::create_from_source( + device, + queue, + info, + self.sampler, + image_view_ci, + self.file_name, + )?; + + copy_images_to_imagearray(device, queue, &arc_image, array)?; + + Ok(arc_image) + } + ImageSourceType::Raw(ref raw) => { + let arc_image = Self::create_from_source( + device, + queue, + info, + self.sampler, + image_view_ci, + self.file_name, + )?; + + Self::optimize_fill(device, queue, raw, &arc_image)?; + + Ok(arc_image) + } + ImageSourceType::Empty => { + let arc_image = Self::create_from_source( + device, + queue, + info, + self.sampler, + image_view_ci, + self.file_name, + )?; + + Ok(arc_image) + } + }, + } + } + + pub fn check_configuration(&self, device: &Arc) -> bool { + match &self.builder_type { + ImageBuilderInternalType::NewImage(create_info) => Image::check_configuration( + device, + create_info.vk_image_create_info.tiling, + create_info.vk_image_create_info.format, + create_info.vk_image_create_info.usage, + ), + _ => false, + } + } + + pub fn view_type(mut self, view_type: VkImageViewType) -> Self { + self.view_type = view_type; + + self + } + + pub fn component_swizzle( + mut self, + r: VkComponentSwizzle, + g: VkComponentSwizzle, + b: VkComponentSwizzle, + a: VkComponentSwizzle, + ) -> Self { + self.components.r = r; + self.components.g = g; + self.components.b = b; + self.components.a = a; + + self + } + + pub fn update_data(mut self, data: Vec) -> Self { + match self.builder_type { + ImageBuilderInternalType::NewImage(ref mut info) => match info.source_type { + ImageSourceType::Raw(ref mut old_data) => *old_data = data, + _ => panic!("wrong source type in ImageBuilder"), + }, + _ => panic!("wrong builder type in ImageBuilder"), + } + + self + } + + pub fn format(mut self, format: VkFormat) -> Self { + match &mut self.builder_type { + ImageBuilderInternalType::NewImage(info) => { + info.vk_image_create_info.format = format; + } + ImageBuilderInternalType::PreinitializedImage(preinitialized_image) => { + preinitialized_image.format = format; + } + } + + self + } + + pub fn sample_count(mut self, sample_count: impl Into) -> Self { + match &mut self.builder_type { + ImageBuilderInternalType::NewImage(info) => { + info.vk_image_create_info.samples = sample_count.into(); + } + ImageBuilderInternalType::PreinitializedImage(preinitialized_image) => { + preinitialized_image.sample_count = sample_count.into(); + } + } + + self + } + + pub fn add_usage(mut self, usage: T) -> Self + where + T: Into, + { + match self.builder_type { + ImageBuilderInternalType::NewImage(ref mut info) => { + info.vk_image_create_info.usage |= usage.into(); + } + _ => panic!("wrong builder type in ImageBuilder"), + } + + self + } + + pub fn flags(mut self, flags: T) -> Self + where + T: Into, + { + match self.builder_type { + ImageBuilderInternalType::NewImage(ref mut info) => { + info.vk_image_create_info.flags = flags.into(); + } + _ => panic!("wrong builder type in ImageBuilder"), + } + + self + } + + pub fn array_layers(mut self, layers: u32) -> Self { + match &mut self.builder_type { + ImageBuilderInternalType::NewImage(info) => { + info.vk_image_create_info.arrayLayers = layers; + self.subresource_range.layerCount = layers; + } + ImageBuilderInternalType::PreinitializedImage(preinitialized_image) => { + preinitialized_image.layers = layers; + } + } + + self + } + + pub fn attach_sampler(mut self, sampler: Arc) -> Self { + self.sampler = Some(sampler); + + if let ImageBuilderInternalType::NewImage(ref mut info) = self.builder_type { + info.vk_image_create_info.usage |= VK_IMAGE_USAGE_SAMPLED_BIT; + } + + self + } + + // pub fn mip_map_levels(mut self, levels: u32) -> Self { + // match self.builder_type { + // ImageBuilderInternalType::NewImage(ref mut info) => { + // info.vk_image_create_info.mipLevels = levels; + // self.subresource_range.levelCount = levels; + + // info.vk_image_create_info.usage |= + // VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; + + // if let Some(ref mut sampler) = self.sampler_info { + // sampler.maxLod = levels as f32; + // } + // } + // _ => panic!("wrong builder type in ImageBuilder"), + // } + + // self + // } + + pub fn max_mip_map_levels(mut self) -> Self { + match self.builder_type { + ImageBuilderInternalType::NewImage(ref mut info) => { + let levels = Self::calc_mip_map_levels( + info.vk_image_create_info.extent.width, + info.vk_image_create_info.extent.height, + ); + + info.vk_image_create_info.mipLevels = levels; + self.subresource_range.levelCount = levels; + + info.vk_image_create_info.usage |= + VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT; + } + _ => panic!("wrong builder type in ImageBuilder"), + } + + self + } + + pub fn aspect_mask(mut self, mask: VkImageAspectFlags) -> Self { + self.subresource_range.aspectMask = mask.into(); + + self + } + + fn calc_mip_map_levels(width: u32, height: u32) -> u32 { + 1 + (cmp::max(width, height) as f32).log2().floor() as u32 + } + + fn vk_image_view_create_info(&self) -> VkImageViewCreateInfo { + VkImageViewCreateInfo::new( + 0, + VkImage::NULL_HANDLE, + self.view_type, + match &self.builder_type { + ImageBuilderInternalType::NewImage(info) => info.vk_image_create_info.format, + ImageBuilderInternalType::PreinitializedImage(preinitialized_image) => { + preinitialized_image.format + } + }, + self.components.clone(), + self.subresource_range.clone(), + ) + } + + fn create_from_source( + device: &Arc, + queue: &Arc>, + info: &ImageCreateInfo, + sampler: Option>, + mut view_ci: VkImageViewCreateInfo, + file_name: Option, + ) -> Result> { + let format = view_ci.format; + + let (image, memory) = Self::create_texture(device, &info.vk_image_create_info)?; + + view_ci.image = image; + + let image_view = device.create_image_view(&view_ci)?; + + Ok(Arc::new(Image { + device: device.clone(), + queue: queue.clone(), + + image, + image_view, + attached: false, + _memory: Some(memory), + sampler, + + file_name, + + format, + image_layout: Mutex::new(info.vk_image_create_info.initialLayout), + + aspect_mask: view_ci.subresourceRange.aspectMask, + + width: info.vk_image_create_info.extent.width, + height: info.vk_image_create_info.extent.height, + layers: info.vk_image_create_info.arrayLayers, + levels: info.vk_image_create_info.mipLevels, + sample_count: info.vk_image_create_info.samples, + _usage: info.vk_image_create_info.usage, + })) + } + + fn create_texture( + device: &Arc, + image_ci: &VkImageCreateInfo, + ) -> Result<(VkImage, Arc>)> { + let image = Self::create_image(device, image_ci)?; + let memory = Memory::image_memory( + device, + image, + MemoryUsage::into_vma(Some(MemoryUsage::GpuOnly)), + )?; + + Ok((image, memory)) + } + + fn create_image(device: &Arc, image_ci: &VkImageCreateInfo) -> Result { + device.create_image(image_ci) + } + + fn optimize_fill( + device: &Arc, + queue: &Arc>, + data: &[u8], + image: &Arc, + ) -> Result<()> { + let staging_buffer = Buffer::builder() + .set_usage(VK_BUFFER_USAGE_TRANSFER_SRC_BIT) + .set_memory_usage(MemoryUsage::CpuToGpu) + .set_data(data) + .build(device.clone())?; + + copy_buffer_to_image(device, queue, &staging_buffer, image)?; + + Ok(()) + } +} + +/// Wrapper type around VkImage +/// +/// handles VkImage, VkSampler, VkDeviceSize and VkImageView internally +/// just as you set it up to +#[derive(Debug)] +pub struct Image { + // device handle + device: Arc, + + // queue handle + queue: Arc>, + + file_name: Option, + + // image handle + attached: bool, + image: VkImage, + + // image_view + image_view: VkImageView, + + // optional handles + _memory: Option>>, + sampler: Option>, + + // image information + format: VkFormat, + image_layout: Mutex, + + aspect_mask: VkImageAspectFlagBits, + width: u32, + height: u32, + layers: u32, // array layers + levels: u32, // mip map levels + sample_count: VkSampleCountFlagBits, + _usage: VkImageUsageFlagBits, +} + +impl Image { + /// Creates an `ImageBuilder` where you can define the image for your needs + /// + /// For example, this is used to wrap swapchain images + /// + /// # Arguments + /// + /// * `image` - valid VkImage handle + /// * `format` - format of this image + pub fn from_preinitialized( + image: VkImage, + format: VkFormat, + width: u32, + height: u32, + layout: VkImageLayout, + usage: impl Into, + assume_layout: bool, + ) -> ImageBuilder { + ImageBuilder::new(ImageBuilderInternalType::PreinitializedImage( + PreinitializedImage { + image, + format, + width, + height, + layers: 1, + sample_count: VK_SAMPLE_COUNT_1_BIT.into(), + layout, + usage: usage.into(), + assume_layout, + }, + )) + } + + /// Creates an `ImageBuilder` where you can define the image for your needs + /// + /// takes the image data in form of an Vec and sets up the `ImageBuilder` + /// for further configuration + /// + /// # Arguments + /// + /// * `source` - The color information for the image + /// * `width` - The target width of the image + /// * `height` - The target height of the image + pub fn from_raw(source: Vec, width: u32, height: u32) -> ImageBuilder { + let mut create_info = ImageCreateInfo::default(ImageSourceType::Raw(source)); + create_info.vk_image_create_info.extent.width = width; + create_info.vk_image_create_info.extent.height = height; + create_info.vk_image_create_info.extent.depth = 1; + create_info.vk_image_create_info.usage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT; + + ImageBuilder::new(ImageBuilderInternalType::NewImage(create_info)) + } + + /// Creates an `ImageBuilder` where you can define the image for your needs + /// + /// takes a path to the file and does the same as `raw_source`, but it + /// extracts all needed bits from the file + /// + /// # Arguments + /// + /// * `file` - The path to the file + pub fn from_file(file: AssetPath) -> Result { + let texture = match image::open(&file.full_path()) { + Ok(i) => i.to_rgba8(), + Err(err) => return Err(anyhow::Error::new(err).context(file.full_path())), + }; + + let (width, height) = texture.dimensions(); + + let mut builder = + Self::from_raw(texture.into_raw(), width, height).format(VK_FORMAT_R8G8B8A8_UNORM); + + builder.file_name = Some(file); + + Ok(builder) + } + + /// Creates an `ImageBuilder` where you can define the image for your needs + /// + /// takes a byte slice and does the same as `raw_source`, but it + /// extracts all needed bits from the slice + /// + /// # Usage + /// + /// let mut image_builder = Image::from_slice(include_bytes!("path/to/file"))?; + /// + /// # Arguments + /// + /// * `slice` - Slice of bytes + pub fn from_slice(data: &[u8]) -> Result { + let texture = image::load_from_memory(data)?.to_rgba8(); + + let (width, height) = texture.dimensions(); + + Ok(Self::from_raw(texture.into_raw(), width, height).format(VK_FORMAT_R8G8B8A8_UNORM)) + } + + /// Creates an `ImageBuilder` where you can define the image for your needs + /// + /// takes an array `Arc`'s and setups the `ImageBuilder` to create + /// a single `Arc` with an 2d image array created from the provided images + /// + /// # Arguments + /// + /// * `array` - Source images + pub fn from_array(array: Vec>) -> ImageBuilder { + debug_assert!(array.is_empty(), "images array must not be empty"); + + let width = array[0].width(); + let height = array[0].height(); + + if cfg!(debug_assertions) { + for image in &array { + if width != image.width() || height != image.height() { + panic!("images are not equally sized"); + } + } + } + + let array_len = array.len() as u32; + let mut create_info = ImageCreateInfo::default(ImageSourceType::Array(array)); + create_info.vk_image_create_info.arrayLayers = array_len; + create_info.vk_image_create_info.imageType = VK_IMAGE_TYPE_2D; + create_info.vk_image_create_info.extent.width = width; + create_info.vk_image_create_info.extent.height = height; + create_info.vk_image_create_info.extent.depth = 1; + + let mut image_builder = ImageBuilder::new(ImageBuilderInternalType::NewImage(create_info)); + image_builder.view_type = VK_IMAGE_VIEW_TYPE_2D_ARRAY; + image_builder.subresource_range.layerCount = array_len; + + image_builder + } + + /// Creates an `ImageBuilder` where you can define the image for your needs + /// + /// takes raw information to setup `ImageBuilder`, that creates an `Arc` + /// with no color information + /// + /// # Arguments + /// + /// * `width` - The target width of the image + /// * `height` - The target height of the image + /// * `usage` - `VkImageUsageFlagBits` mask to define the image usage + /// * `sample_count` - `VkSampleCountFlags` to define the image's sample count + pub fn empty( + width: u32, + height: u32, + usage: impl Into, + sample_count: VkSampleCountFlags, + ) -> ImageBuilder { + let mut create_info = ImageCreateInfo::default(ImageSourceType::Empty); + create_info.vk_image_create_info.samples = sample_count.into(); + create_info.vk_image_create_info.extent.width = width; + create_info.vk_image_create_info.extent.height = height; + create_info.vk_image_create_info.extent.depth = 1; + create_info.vk_image_create_info.usage = usage.into(); + + ImageBuilder::new(ImageBuilderInternalType::NewImage(create_info)) + } + + pub fn check_configuration( + device: &Arc, + tiling: VkImageTiling, + format: VkFormat, + usage: impl Into, + ) -> bool { + let physical_device = device.physical_device(); + + match tiling { + VK_IMAGE_TILING_OPTIMAL => physical_device.check_optimal_format_features(format, usage), + VK_IMAGE_TILING_LINEAR => physical_device.check_linear_format_features(format, usage), + } + } + + pub fn device(&self) -> &Arc { + &self.device + } + + pub fn queue(&self) -> &Arc> { + &self.queue + } + + pub fn file_name(&self) -> Option<&AssetPath> { + self.file_name.as_ref() + } + + pub fn convert_layout(me: &Arc, target_layout: VkImageLayout) -> Result<()> { + into_layout(me, target_layout) + } + + pub fn vk_format(&self) -> VkFormat { + self.format + } + + pub fn sampler(&self) -> &Option> { + &self.sampler + } + + pub fn width(&self) -> u32 { + self.width + } + + pub fn height(&self) -> u32 { + self.height + } + + pub fn layers(&self) -> u32 { + self.layers + } + + pub fn levels(&self) -> u32 { + self.levels + } + + pub fn sample_count(&self) -> VkSampleCountFlagBits { + self.sample_count + } + + pub fn image_layout(&self) -> VkImageLayout { + *self.image_layout.lock().unwrap() + } + + pub fn set_image_layout(&self, layout: VkImageLayout) { + *self.image_layout.lock().unwrap() = layout; + } + + pub fn full_resource_range(&self) -> VkImageSubresourceRange { + VkImageSubresourceRange { + aspectMask: self.aspect_mask, + baseMipLevel: 0, + levelCount: self.levels, + baseArrayLayer: 0, + layerCount: self.layers, + } + } + + pub fn full_resource_layers(&self) -> VkImageSubresourceLayers { + VkImageSubresourceLayers { + aspectMask: self.aspect_mask, + mipLevel: 0, + baseArrayLayer: 0, + layerCount: self.layers, + } + } + + pub fn src_layout_to_access(image_layout: VkImageLayout) -> VkAccessFlagBits { + match image_layout { + VK_IMAGE_LAYOUT_UNDEFINED => 0u32.into(), + VK_IMAGE_LAYOUT_PREINITIALIZED => VK_ACCESS_HOST_WRITE_BIT.into(), + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL => VK_ACCESS_TRANSFER_WRITE_BIT.into(), + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL => VK_ACCESS_TRANSFER_READ_BIT.into(), + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL => VK_ACCESS_SHADER_READ_BIT.into(), + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL => VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT.into(), + VK_IMAGE_LAYOUT_PRESENT_SRC_KHR => VK_ACCESS_MEMORY_READ_BIT.into(), + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL => { + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT + | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT + } + VK_IMAGE_LAYOUT_GENERAL => VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT, + _ => unimplemented!("source image layout ({:?})", image_layout), + } + } + + pub fn dst_layout_to_access(image_layout: VkImageLayout) -> VkAccessFlagBits { + match image_layout { + VK_IMAGE_LAYOUT_UNDEFINED => { + panic!("target image layout must not be VK_IMAGE_LAYOUT_UNDEFINED") + } + VK_IMAGE_LAYOUT_PREINITIALIZED => { + panic!("target image layout must not be VK_IMAGE_LAYOUT_PREINITIALIZED") + } + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL => VK_ACCESS_TRANSFER_WRITE_BIT.into(), + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL => VK_ACCESS_TRANSFER_READ_BIT.into(), + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL => VK_ACCESS_SHADER_READ_BIT.into(), + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL => VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT.into(), + VK_IMAGE_LAYOUT_PRESENT_SRC_KHR => VK_ACCESS_MEMORY_READ_BIT.into(), + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL => { + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT + | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT + } + VK_IMAGE_LAYOUT_GENERAL => VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL => VK_ACCESS_SHADER_READ_BIT.into(), + } + } +} + +impl VulkanDevice for Image { + fn device(&self) -> &Arc { + &self.device + } +} + +impl_vk_handle!(Image, VkImage, image); +impl_vk_handle!(Image, VkImageView, image_view); + +impl Drop for Image { + fn drop(&mut self) { + self.device.destroy_image_view(self.image_view); + + if !self.attached { + self.device.destroy_image(self.image); + } + } +} + +fn into_layout(image: &Arc, layout: VkImageLayout) -> Result<()> { + // create a new command buffer + let command_buffer = + CommandBuffer::new_primary().build(image.device.clone(), image.queue.clone())?; + + { + // begin recording into this command buffer + let mut buffer_recorder = command_buffer.begin(VkCommandBufferBeginInfo::new( + VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, + ))?; + + // subresource information + let subresource_range = VkImageSubresourceRange { + aspectMask: image.aspect_mask, + baseMipLevel: 0, + levelCount: image.levels(), + baseArrayLayer: 0, + layerCount: image.layers(), + }; + + // change image layout + buffer_recorder.set_image_layout(image, layout, subresource_range); + } + + // submit current queue + let submit = SubmitInfo::default().add_command_buffer(&command_buffer); + let fence = Fence::builder().build(image.device.clone())?; + + image + .queue + .lock() + .map_err(|_| anyhow::Error::msg("Failed getting vulkan device queue lock"))? + .submit(Some(&fence), &[submit])?; + + image + .device + .wait_for_fences(&[&fence], true, Duration::from_secs(1))?; + + Ok(()) +} + +fn copy_buffer_to_image( + device: &Arc, + queue: &Arc>, + buffer: &Arc>, + image: &Arc, +) -> Result<()> +where + T: Copy, +{ + // create a new command buffer + let command_buffer = CommandBuffer::new_primary().build(device.clone(), queue.clone())?; + + { + // begin recording into this command buffer + let mut buffer_recorder = command_buffer.begin(VkCommandBufferBeginInfo::new( + VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, + ))?; + + // copy info for copying the content of the buffer into the image + let buffer_image_copy = VkBufferImageCopy { + bufferOffset: 0, + bufferRowLength: 0, + bufferImageHeight: 0, + imageSubresource: VkImageSubresourceLayers { + aspectMask: VK_IMAGE_ASPECT_COLOR_BIT.into(), + mipLevel: 0, + baseArrayLayer: 0, + layerCount: 1, + }, + imageOffset: VkOffset3D { x: 0, y: 0, z: 0 }, + imageExtent: VkExtent3D { + width: image.width(), + height: image.height(), + depth: 1, + }, + }; + + // subresource information + let mut subresource_range = VkImageSubresourceRange { + aspectMask: VK_IMAGE_ASPECT_COLOR_BIT.into(), + baseMipLevel: 0, + levelCount: image.levels(), + baseArrayLayer: 0, + layerCount: 1, + }; + + // set image layout to receive content + buffer_recorder.set_image_layout( + image, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + subresource_range.clone(), + ); + + // the actual copy command + buffer_recorder.copy_buffer_to_image( + buffer, + image, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + &[buffer_image_copy], + ); + + // just transition one mip level at a time + subresource_range.levelCount = 1; + + // mip map creation + if image.levels() > 1 { + blit_mip_maps( + &mut buffer_recorder, + image, + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + ); + } else { + // set image to be usable inside a shader + buffer_recorder.set_image_layout( + image, + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + subresource_range, + ); + } + } + + // submit current queue + let submit = SubmitInfo::default().add_command_buffer(&command_buffer); + let fence = Fence::builder().build(device.clone())?; + + queue + .lock() + .map_err(|_| anyhow::Error::msg("Failed getting vulkan device queue lock"))? + .submit(Some(&fence), &[submit])?; + + device.wait_for_fences(&[&fence], true, Duration::from_secs(1))?; + + Ok(()) +} + +fn copy_images_to_imagearray( + device: &Arc, + queue: &Arc>, + image_array: &Arc, + images: &[Arc], +) -> Result<()> { + // create a new command buffer + let command_buffer = CommandBuffer::new_primary().build(device.clone(), queue.clone())?; + + { + // set command buffer buffer in recording state + let mut buffer_recorder = command_buffer.begin(VkCommandBufferBeginInfo::new( + VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, + ))?; + + // subresource range of the receiving image + let array_subresource_range = VkImageSubresourceRange { + aspectMask: VK_IMAGE_ASPECT_COLOR_BIT.into(), + baseMipLevel: 0, + levelCount: image_array.levels(), + baseArrayLayer: 0, + layerCount: image_array.layers(), + }; + + // set the target image into receiving state + buffer_recorder.set_image_layout( + image_array, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + array_subresource_range.clone(), + ); + + for (i, image) in images.iter().enumerate() { + // if source and target image have the same count of + // mip maps or the source has more mip maps, + // we can just copy every mip level into the + // correct target level and layer + if image.levels() >= image_array.levels() { + for k in 0..image_array.levels() { + copy_image_to_image(&mut buffer_recorder, image, image_array, k, i as u32); + } + // if the source image has less mip maps than the target image, + // we just gonna copy the first level and blit the rest + } else { + copy_image_to_image(&mut buffer_recorder, image, image_array, 0, i as u32); + blit_mip_maps( + &mut buffer_recorder, + image_array, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + ); + } + } + + // set the target image into a shader usable state + buffer_recorder.set_image_layout( + image_array, + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + array_subresource_range, + ); + } + + // submit current queue + let submit = SubmitInfo::default().add_command_buffer(&command_buffer); + let fence = Fence::builder().build(device.clone())?; + + queue + .lock() + .map_err(|_| anyhow::Error::msg("Failed getting vulkan device queue lock"))? + .submit(Some(&fence), &[submit])?; + + device.wait_for_fences(&[&fence], true, Duration::from_secs(1))?; + + Ok(()) +} + +fn copy_image_to_image( + buffer_recorder: &mut CommandBufferRecorder<'_>, + src_image: &Arc, + dst_image: &Arc, + mip_level: u32, + dst_layer: u32, +) { + // copy information to get every source into the right target slot + let image_copy = VkImageCopy { + srcSubresource: VkImageSubresourceLayers { + aspectMask: VK_IMAGE_ASPECT_COLOR_BIT.into(), + mipLevel: mip_level, + baseArrayLayer: 0, + layerCount: 1, + }, + srcOffset: VkOffset3D { x: 0, y: 0, z: 0 }, + dstSubresource: VkImageSubresourceLayers { + aspectMask: VK_IMAGE_ASPECT_COLOR_BIT.into(), + mipLevel: mip_level, + baseArrayLayer: dst_layer, + layerCount: 1, + }, + dstOffset: VkOffset3D { x: 0, y: 0, z: 0 }, + extent: VkExtent3D { + width: src_image.width(), + height: src_image.height(), + depth: 1, + }, + }; + + let subresource_range = VkImageSubresourceRange { + aspectMask: VK_IMAGE_ASPECT_COLOR_BIT.into(), + baseMipLevel: mip_level, + levelCount: 1, + baseArrayLayer: 0, + layerCount: 1, + }; + + // set the source image into sending state + buffer_recorder.set_image_layout( + src_image, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + subresource_range.clone(), + ); + + // copy the source data into the target slot + buffer_recorder.copy_image( + src_image, + dst_image, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + &[image_copy], + ); + + // set the source image back to a usable state for shaders + buffer_recorder.set_image_layout( + src_image, + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + subresource_range, + ); +} + +fn blit_mip_maps( + buffer_recorder: &mut CommandBufferRecorder<'_>, + image: &Arc, + target_image_layout: VkImageLayout, +) { + let mut mip_width = image.width(); + let mut mip_height = image.height(); + + // subresource information + let mut subresource_range = VkImageSubresourceRange { + aspectMask: VK_IMAGE_ASPECT_COLOR_BIT.into(), + baseMipLevel: 0, + levelCount: 1, + baseArrayLayer: 0, + layerCount: 1, + }; + + for i in 1..image.levels() { + let source_mip_level = i - 1; + let target_mip_level = i; + + // transition the previous mip level from destination to source + subresource_range.baseMipLevel = source_mip_level; + buffer_recorder.set_image_layout( + image, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + subresource_range.clone(), + ); + + // create the blit information to blit the data from one mip level to another + let image_blit = VkImageBlit { + srcSubresource: VkImageSubresourceLayers { + aspectMask: VK_IMAGE_ASPECT_COLOR_BIT.into(), + mipLevel: source_mip_level, + baseArrayLayer: 0, + layerCount: 1, + }, + srcOffsets: [ + VkOffset3D { x: 0, y: 0, z: 0 }, + VkOffset3D { + x: mip_width as i32, + y: mip_height as i32, + z: 1, + }, + ], + dstSubresource: VkImageSubresourceLayers { + aspectMask: VK_IMAGE_ASPECT_COLOR_BIT.into(), + mipLevel: target_mip_level, + baseArrayLayer: 0, + layerCount: 1, + }, + dstOffsets: [ + VkOffset3D { x: 0, y: 0, z: 0 }, + VkOffset3D { + x: if mip_width > 1 { + mip_width as i32 / 2 + } else { + 1 + }, + y: if mip_height > 1 { + mip_height as i32 / 2 + } else { + 1 + }, + z: 1, + }, + ], + }; + + // execute the actual blit + buffer_recorder.blit_image( + image, + image, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + &[image_blit], + VK_FILTER_LINEAR, + ); + + // set mip level i - 1 to target layout + buffer_recorder.set_image_layout(image, target_image_layout, subresource_range.clone()); + + image.set_image_layout(VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); + + mip_width = if mip_width > 1 { mip_width / 2 } else { 1 }; + mip_height = if mip_height > 1 { mip_height / 2 } else { 1 }; + } + + // set last level to be target layout + subresource_range.baseMipLevel = image.levels() - 1; + buffer_recorder.set_image_layout(image, target_image_layout, subresource_range); +} diff --git a/vulkan-rs/src/instance.rs b/vulkan-rs/src/instance.rs new file mode 100644 index 0000000..4e13812 --- /dev/null +++ b/vulkan-rs/src/instance.rs @@ -0,0 +1,1128 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std::collections::HashSet; +use std::fmt; +use std::mem::MaybeUninit; +use std::ptr; +use std::sync::Arc; + +use std::os::raw::c_char; +use std::os::raw::c_void; + +use std::ffi::CStr; +use std::ffi::CString; + +Extensions!(InstanceExtensions, { + (xlib_surface, "VK_KHR_xlib_surface"), + (wayland_surface, "VK_KHR_wayland_surface"), + (android_surface, "VK_KHR_android_surface"), + (macos_surface, "VK_KHR_macos_surface"), + (win32_surface, "VK_KHR_win32_surface"), + (surface, "VK_KHR_surface"), + (physical_device_properties2, "VK_KHR_get_physical_device_properties2"), +}); + +#[derive(Copy, Clone, Eq, PartialEq, Debug, Default)] +pub struct VulkanDebugInfo { + pub debugging: bool, + pub steam_layer: bool, + pub verbose: bool, + pub renderdoc: bool, +} + +pub struct Instance { + _static_functions: StaticFunctions, + _entry_functions: EntryFunctions, + pub(crate) instance_functions: InstanceFunctions, + instance_wsi_functions: InstanceWSIFunctions, + physical_device_properties2_functions: PhysicalDeviceProperties2Functions, + + debug_report_callback_functions: DebugReportCallbackFunctions, + instance: VkInstance, + + instance_extensions: InstanceExtensions, + + debug_report: Option, + + api_version: u32, +} + +struct Layer { + props: Vec, +} + +impl Layer { + fn create(entry_functions: &EntryFunctions) -> Result { + Ok(Layer { + props: Instance::enumerate_layer_properties( + entry_functions.vkEnumerateInstanceLayerProperties, + )?, + }) + } + + fn names( + &self, + debugging: bool, + steam_layer: bool, + verbose: bool, + renderdoc: bool, + ) -> Result> { + let mut names = Vec::new(); + + for i in 0..self.props.len() { + let name_string = self.props[i].layer_name()?; + let name = name_string.as_str(); + + if debugging && name == "VK_LAYER_KHRONOS_validation" { + names.push(name_string.clone()); + } + + if verbose && name == "VK_LAYER_LUNARG_api_dump" { + names.push(name_string.clone()); + } + + if renderdoc && name == "VK_LAYER_RENDERDOC_Capture" { + names.push(name_string.clone()); + } + + if steam_layer + && (name == "VK_LAYER_VALVE_steam_overlay_64" + || name == "VK_LAYER_VALVE_steam_overlay_32") + { + names.push(name_string.clone()); + } + } + + Ok(names) + } +} + +impl Instance { + pub fn preinitialized( + instance: VkInstance, + proc_addr: PFN_vkGetInstanceProcAddr, + extensions: &[VkString], + api_version: u32, + ) -> Result> { + let static_functions = StaticFunctions { + _lib: None, + vkGetInstanceProcAddr: proc_addr, + }; + let entry_functions = EntryFunctions::new(&static_functions); + let instance_functions = InstanceFunctions::new(&static_functions, instance); + let instance_wsi_functions = InstanceWSIFunctions::new(&static_functions, instance); + let physical_device_properties2_functions = + PhysicalDeviceProperties2Functions::new(&static_functions, instance); + let debug_report_callback_functions = + DebugReportCallbackFunctions::new(&static_functions, instance); + + Ok(Arc::new(Instance { + _static_functions: static_functions, + _entry_functions: entry_functions, + instance_functions, + instance_wsi_functions, + physical_device_properties2_functions, + + debug_report_callback_functions, + + instance, + + instance_extensions: InstanceExtensions::from_list(extensions), + + debug_report: None, + + api_version, + })) + } + + pub fn new( + app_info: VkApplicationInfo<'_>, + debug_info: VulkanDebugInfo, + extensions: InstanceExtensions, + ) -> Result> { + let static_functions = StaticFunctions::load()?; + let entry_functions = EntryFunctions::new(&static_functions); + + let layers = if debug_info.debugging { + let layer_object = Layer::create(&entry_functions)?; + + layer_object.names( + true, + debug_info.steam_layer, + debug_info.verbose, + debug_info.renderdoc, + )? + } else if debug_info.renderdoc { + let layer_object = Layer::create(&entry_functions)?; + + // render doc only + layer_object.names(false, false, false, true)? + } else { + Vec::new() + }; + + let mut checked_extensions = Vec::new(); + let mut extension_list = extensions.as_list(); + + if debug_info.debugging || debug_info.renderdoc { + extension_list.push(VkString::new("VK_EXT_debug_report")); + } + + if !extension_list.is_empty() { + let extension_properties = Self::get_extension_properties(&entry_functions, &layers)?; + + for extension in extension_list { + for ext_prop in &extension_properties { + if extension == *ext_prop { + checked_extensions.push(extension); + break; + } + } + } + } + + // instance create info + let layer_names = VkNames::new(layers.as_slice()); + let extension_names = VkNames::new(checked_extensions.as_slice()); + let instance_ci = VkInstanceCreateInfo::new( + VK_INSTANCE_CREATE_NULL_BIT, + &app_info, + &layer_names, + &extension_names, + ); + + println!("enabled layers ({}):", layer_names.len()); + + for layer_name in layer_names.iter() { + println!("\t- {:?}", layer_name); + } + + println!("\nenabled instance extensions ({}):", extension_names.len()); + + for extension_name in extension_names.iter() { + println!("\t- {:?}", extension_name); + } + + println!(); + + let enabled_extensions = InstanceExtensions::from_list(&checked_extensions); + + if let Err(missing_extensions) = extensions.check_availability(&enabled_extensions) { + for m in missing_extensions { + println!("{}", m); + } + } + + let instance = unsafe { + let mut instance = MaybeUninit::uninit(); + let result = + entry_functions.vkCreateInstance(&instance_ci, ptr::null(), instance.as_mut_ptr()); + + if result == VK_SUCCESS { + instance.assume_init() + } else { + return Err(anyhow::Error::new(result)); + } + }; + + let instance_functions = InstanceFunctions::new(&static_functions, instance); + let instance_wsi_functions = InstanceWSIFunctions::new(&static_functions, instance); + let physical_device_properties2_functions = + PhysicalDeviceProperties2Functions::new(&static_functions, instance); + let debug_report_callback_functions = + DebugReportCallbackFunctions::new(&static_functions, instance); + + let mut instance = Instance { + _static_functions: static_functions, + _entry_functions: entry_functions, + instance_functions, + instance_wsi_functions, + physical_device_properties2_functions, + + debug_report_callback_functions, + + instance, + + instance_extensions: enabled_extensions, + + debug_report: None, + + api_version: app_info.apiVersion, + }; + + if !layers.is_empty() { + if let Err(msg) = instance.create_debug_report() { + println!("failed creating debug report: {}", msg); + } + } + + Ok(Arc::new(instance)) + } + + pub(crate) fn api_version(&self) -> u32 { + self.api_version + } + + pub fn enabled_extensions(&self) -> &InstanceExtensions { + &self.instance_extensions + } +} + +impl_vk_handle!(Instance, VkInstance, instance); + +// private +impl Instance { + fn create_debug_report(&mut self) -> Result<()> { + let debug_report_info = VkDebugReportCallbackCreateInfoEXT::new( + VK_DEBUG_REPORT_WARNING_BIT_EXT + | VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT + | VK_DEBUG_REPORT_ERROR_BIT_EXT, + Instance::debug_report_callback, + ); + + let debug_report = self.create_debug_report_callbacks(&debug_report_info)?; + + self.debug_report = Some(debug_report); + + Ok(()) + } + + fn get_extension_properties( + entry_functions: &EntryFunctions, + layers: &[VkString], + ) -> Result> { + let mut properties = HashSet::new(); + + let default_properties = Self::enumerate_extension_properties( + entry_functions.vkEnumerateInstanceExtensionProperties, + None, + )?; + + for property in default_properties { + let prop_string = VkString::new(&property.extension_name()?); + + properties.insert(prop_string); + } + + for layer in layers { + let tmp_properties = Self::enumerate_extension_properties( + entry_functions.vkEnumerateInstanceExtensionProperties, + Some(layer), + )?; + + for property in tmp_properties { + let prop_string = VkString::new(&property.extension_name()?); + + properties.insert(prop_string); + } + } + + Ok(properties.iter().cloned().collect()) + } +} + +// debug +impl Instance { + extern "system" fn debug_report_callback( + flags: VkDebugReportFlagsEXT, + object_type: VkDebugReportObjectTypeEXT, + _src_object: u64, + _location: usize, + _msg_code: i32, + _layer_prefix: *const c_char, + msg: *const c_char, + _user_data: *mut c_void, + ) -> VkBool32 { + let mut output: String = String::new(); + + output += match flags { + VK_DEBUG_REPORT_INFORMATION_BIT_EXT => "INFO", + VK_DEBUG_REPORT_WARNING_BIT_EXT => "WARNING", + VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT => "PERFORMANCE", + VK_DEBUG_REPORT_ERROR_BIT_EXT => "ERROR", + VK_DEBUG_REPORT_DEBUG_BIT_EXT => "DEBUG", + }; + + output += ": OBJ( "; + + output += match object_type { + VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT => "UNKNOWN", + VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT => "INSTANCE", + VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT => "PHYSICAL DEVICE", + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT => "DEVICE", + VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT => "QUEUE", + VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT => "SEMAPHORE", + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT => "COMMAND BUFFER", + VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT => "FENCE", + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT => "DEVICE MEMORY", + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT => "BUFFER", + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT => "IMAGE", + VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT => "EVENT", + VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT => "QUERY POOL", + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT => "BUFFER VIEW", + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT => "IMAGE VIEW", + VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT => "SHADER MODULE", + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT => "PIPELINE CACHE", + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT => "PIPELINE LAYOUT", + VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT => "RENDER PASS", + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT => "PIPELINE", + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT => "DESCRIPTOR SET LAYOUT", + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT => "SAMPLER", + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT => "DESCRIPTOR POOL", + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT => "DESCRIPTOR SET", + VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT => "FRAME BUFFER", + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT => "COMMAND POOL", + VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT => "SURFACE KHR", + VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT => "SWAPCHAIN KHR", + VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT => "DEBUG REPORT", + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT => "DISPLAY KHR", + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT => "DISPLAY MODE KHR", + VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT => "OBJECT TABLE NVX", + VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT => { + "INDIRECT COMMANDS LAYOUT NVX" + } + VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT => "VALIDATION CACHE ", + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT => "SAMPLER YCBCR CONVERSION ", + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT => { + "DESCRIPTOR UPDATE TEMPLATE " + } + VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT => { + "ACCELERATION STRUCTURE NV" + } + }; + + let tmp1 = unsafe { CString::from_raw(msg as *mut c_char) }; + let tmp2 = match tmp1.into_string() { + Ok(string) => string, + Err(err) => { + println!("{}", err); + + return VK_FALSE; + } + }; + + output += " ):\n\t"; + output += tmp2.as_ref(); + + println!("{}", output); + + VK_TRUE + } +} + +impl fmt::Debug for Instance { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Instance (VkInstance: {:#?})", self.instance) + } +} + +impl Drop for Instance { + fn drop(&mut self) { + if let Some(debug_report) = &self.debug_report { + self.destroy_debug_report_callbacks(*debug_report); + } + + self.destroy_instance(); + } +} + +// private wrapper +impl Instance { + #[inline] + fn enumerate_layer_properties( + enumerate_instance_layer_properties: PFN_vkEnumerateInstanceLayerProperties, + ) -> Result> { + let mut property_count: u32 = 0; + + // get the amount of properties + let result = enumerate_instance_layer_properties(&mut property_count, ptr::null_mut()); + + if result != VK_SUCCESS { + return Err(anyhow::Error::new(result)); + } + + let mut properties = Vec::with_capacity(property_count as usize); + unsafe { properties.set_len(property_count as usize) }; + + // get the properties + let result = + enumerate_instance_layer_properties(&mut property_count, properties.as_mut_ptr()); + + if result == VK_SUCCESS { + Ok(properties) + } else { + Err(anyhow::Error::new(result)) + } + } + + #[inline] + fn enumerate_extension_properties( + enumerate_instance_extension_properties: PFN_vkEnumerateInstanceExtensionProperties, + layer_name: Option<&VkString>, + ) -> Result> { + let mut count = 0; + let name = match layer_name { + Some(name) => name.as_ptr(), + None => ptr::null(), + }; + + let mut result = enumerate_instance_extension_properties(name, &mut count, ptr::null_mut()); + + if result != VK_SUCCESS { + return Err(anyhow::Error::new(result)); + } + + let mut properties = Vec::with_capacity(count as usize); + unsafe { properties.set_len(count as usize) }; + + result = enumerate_instance_extension_properties(name, &mut count, properties.as_mut_ptr()); + + if result == VK_SUCCESS { + Ok(properties) + } else { + Err(anyhow::Error::new(result)) + } + } + + #[inline] + fn destroy_instance(&self) { + unsafe { + self.instance_functions + .vkDestroyInstance(self.instance, ptr::null()); + } + } +} + +// public, wrapped vulkan calls +impl Instance { + #[inline] + pub fn create_debug_report_callbacks( + &self, + debug_report_callback_create_info: &VkDebugReportCallbackCreateInfoEXT, + ) -> Result { + unsafe { + let mut debug_report_callback = MaybeUninit::uninit(); + + let result = self + .debug_report_callback_functions + .vkCreateDebugReportCallbackEXT( + self.instance, + debug_report_callback_create_info, + ptr::null(), + debug_report_callback.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(debug_report_callback.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn destroy_debug_report_callbacks(&self, debug_report_callback: VkDebugReportCallbackEXT) { + unsafe { + self.debug_report_callback_functions + .vkDestroyDebugReportCallbackEXT(self.instance, debug_report_callback, ptr::null()) + } + } + + #[inline] + pub fn get_device_proc_addr(&self, device: VkDevice, name: VkString) -> PFN_vkVoidFunction { + unsafe { + self.instance_functions + .vkGetDeviceProcAddr(device, name.as_ptr()) + } + } + + #[inline] + pub fn get_device_proc_addr_raw(&self, device: VkDevice, name: &CStr) -> PFN_vkVoidFunction { + unsafe { + self.instance_functions + .vkGetDeviceProcAddr(device, name.as_ptr()) + } + } + + #[inline] + pub fn enumerate_physical_devices(&self) -> Result> { + let mut count = 0; + + let result = unsafe { + self.instance_functions.vkEnumeratePhysicalDevices( + self.instance, + &mut count, + ptr::null_mut(), + ) + }; + + if result != VK_SUCCESS { + return Err(anyhow::Error::new(result)); + } + + let mut physical_devices = Vec::with_capacity(count as usize); + unsafe { physical_devices.set_len(count as usize) }; + + let result = unsafe { + self.instance_functions.vkEnumeratePhysicalDevices( + self.instance, + &mut count, + physical_devices.as_mut_ptr(), + ) + }; + + if result == VK_SUCCESS { + Ok(physical_devices) + } else { + Err(anyhow::Error::new(result)) + } + } + + #[inline] + pub fn physical_device_properties( + &self, + physical_device: VkPhysicalDevice, + ) -> VkPhysicalDeviceProperties { + unsafe { + let mut physical_device_properties = MaybeUninit::uninit(); + + self.instance_functions.vkGetPhysicalDeviceProperties( + physical_device, + physical_device_properties.as_mut_ptr(), + ); + + physical_device_properties.assume_init() + } + } + + #[inline] + pub fn physical_device_features( + &self, + physical_device: VkPhysicalDevice, + ) -> VkPhysicalDeviceFeatures { + unsafe { + let mut physical_device_features = MaybeUninit::uninit(); + + self.instance_functions.vkGetPhysicalDeviceFeatures( + physical_device, + physical_device_features.as_mut_ptr(), + ); + + physical_device_features.assume_init() + } + } + + #[inline] + pub fn physical_device_format_properties( + &self, + physical_device: VkPhysicalDevice, + format: VkFormat, + ) -> VkFormatProperties { + unsafe { + let mut physical_device_format_properties = MaybeUninit::uninit(); + + self.instance_functions.vkGetPhysicalDeviceFormatProperties( + physical_device, + format, + physical_device_format_properties.as_mut_ptr(), + ); + + physical_device_format_properties.assume_init() + } + } + + #[inline] + pub fn physical_device_queue_family_properties( + &self, + physical_device: VkPhysicalDevice, + ) -> Vec { + let mut count = 0; + + unsafe { + self.instance_functions + .vkGetPhysicalDeviceQueueFamilyProperties( + physical_device, + &mut count, + ptr::null_mut(), + ); + } + + let mut queue_family_properties = Vec::with_capacity(count as usize); + unsafe { queue_family_properties.set_len(count as usize) }; + + unsafe { + self.instance_functions + .vkGetPhysicalDeviceQueueFamilyProperties( + physical_device, + &mut count, + queue_family_properties.as_mut_ptr(), + ); + } + + queue_family_properties + } + + #[inline] + pub fn physical_device_memory_properties( + &self, + physical_device: VkPhysicalDevice, + ) -> VkPhysicalDeviceMemoryProperties { + unsafe { + let mut physical_device_memory_properties = MaybeUninit::uninit(); + + self.instance_functions.vkGetPhysicalDeviceMemoryProperties( + physical_device, + physical_device_memory_properties.as_mut_ptr(), + ); + + physical_device_memory_properties.assume_init() + } + } + + #[inline] + pub fn physical_device_sparse_image_format_properties( + &self, + physical_device: VkPhysicalDevice, + format: VkFormat, + ty: VkImageType, + samples: VkSampleCountFlags, + usage: impl Into, + tiling: VkImageTiling, + ) -> Vec { + let mut count = 0; + let usage = usage.into(); + + unsafe { + self.instance_functions + .vkGetPhysicalDeviceSparseImageFormatProperties( + physical_device, + format, + ty, + samples, + usage, + tiling, + &mut count, + ptr::null_mut(), + ); + } + + let mut sparse_image_formats = Vec::with_capacity(count as usize); + unsafe { sparse_image_formats.set_len(count as usize) }; + + unsafe { + self.instance_functions + .vkGetPhysicalDeviceSparseImageFormatProperties( + physical_device, + format, + ty, + samples, + usage, + tiling, + &mut count, + sparse_image_formats.as_mut_ptr(), + ); + } + + sparse_image_formats + } + + #[inline] + pub fn physical_device_image_format_properties( + &self, + physical_device: VkPhysicalDevice, + format: VkFormat, + image_type: VkImageType, + tiling: VkImageTiling, + usage: impl Into, + flags: impl Into, + ) -> Result { + unsafe { + let mut image_format_properties = MaybeUninit::uninit(); + + let result = self + .instance_functions + .vkGetPhysicalDeviceImageFormatProperties( + physical_device, + format, + image_type, + tiling, + usage.into(), + flags.into(), + image_format_properties.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(image_format_properties.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn create_device<'a>( + &self, + physical_device: VkPhysicalDevice, + device_create_info: &'a VkDeviceCreateInfo<'a>, + ) -> Result { + unsafe { + let mut device = MaybeUninit::uninit(); + + let result = self.instance_functions.vkCreateDevice( + physical_device, + device_create_info, + ptr::null(), + device.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(device.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn physical_device_surface_support( + &self, + physical_device: VkPhysicalDevice, + queue_family_index: u32, + surface: VkSurfaceKHR, + ) -> Result { + unsafe { + let mut supported = MaybeUninit::uninit(); + + let result = self + .instance_wsi_functions + .vkGetPhysicalDeviceSurfaceSupportKHR( + physical_device, + queue_family_index, + surface, + supported.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(supported.assume_init() == VK_TRUE) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn physical_device_surface_capabilities( + &self, + physical_device: VkPhysicalDevice, + surface: VkSurfaceKHR, + ) -> Result { + unsafe { + let mut surface_capabilities = MaybeUninit::uninit(); + + let result = self + .instance_wsi_functions + .vkGetPhysicalDeviceSurfaceCapabilitiesKHR( + physical_device, + surface, + surface_capabilities.as_mut_ptr(), + ); + + if result == VK_SUCCESS { + Ok(surface_capabilities.assume_init()) + } else { + Err(anyhow::Error::new(result)) + } + } + } + + #[inline] + pub fn physical_device_surface_formats( + &self, + physical_device: VkPhysicalDevice, + surface: VkSurfaceKHR, + ) -> Result> { + let mut count = 0; + + let result = unsafe { + self.instance_wsi_functions + .vkGetPhysicalDeviceSurfaceFormatsKHR( + physical_device, + surface, + &mut count, + ptr::null_mut(), + ) + }; + + if result != VK_SUCCESS { + return Err(anyhow::Error::new(result)); + } + + let mut surface_formats = Vec::with_capacity(count as usize); + unsafe { surface_formats.set_len(count as usize) }; + + let result = unsafe { + self.instance_wsi_functions + .vkGetPhysicalDeviceSurfaceFormatsKHR( + physical_device, + surface, + &mut count, + surface_formats.as_mut_ptr(), + ) + }; + + if result == VK_SUCCESS { + Ok(surface_formats) + } else { + Err(anyhow::Error::new(result)) + } + } + + #[inline] + pub fn physical_device_present_modes( + &self, + physical_device: VkPhysicalDevice, + surface: VkSurfaceKHR, + ) -> Result> { + let mut count = 0; + + let result = unsafe { + self.instance_wsi_functions + .vkGetPhysicalDeviceSurfacePresentModesKHR( + physical_device, + surface, + &mut count, + ptr::null_mut(), + ) + }; + + if result != VK_SUCCESS { + return Err(anyhow::Error::new(result)); + } + + let mut surface_present_modes = Vec::with_capacity(count as usize); + unsafe { surface_present_modes.set_len(count as usize) }; + + let result = unsafe { + self.instance_wsi_functions + .vkGetPhysicalDeviceSurfacePresentModesKHR( + physical_device, + surface, + &mut count, + surface_present_modes.as_mut_ptr(), + ) + }; + + if result == VK_SUCCESS { + Ok(surface_present_modes) + } else { + Err(anyhow::Error::new(result)) + } + } + + #[inline] + pub fn enumerate_device_extensions( + &self, + physical_device: VkPhysicalDevice, + ) -> Result> { + let mut count = 0; + + let result = unsafe { + self.instance_functions + .vkEnumerateDeviceExtensionProperties( + physical_device, + ptr::null(), + &mut count, + ptr::null_mut(), + ) + }; + + if result != VK_SUCCESS { + return Err(anyhow::Error::new(result)); + } + + let mut extension_properties = Vec::with_capacity(count as usize); + unsafe { extension_properties.set_len(count as usize) }; + + let result = unsafe { + self.instance_functions + .vkEnumerateDeviceExtensionProperties( + physical_device, + ptr::null(), + &mut count, + extension_properties.as_mut_ptr(), + ) + }; + + if result == VK_SUCCESS { + Ok(extension_properties) + } else { + Err(anyhow::Error::new(result)) + } + } + + #[inline] + pub fn physical_device_properties2( + &self, + physical_device: VkPhysicalDevice, + device_properties: &mut VkPhysicalDeviceProperties2KHR, + ) { + unsafe { + self.physical_device_properties2_functions + .vkGetPhysicalDeviceProperties2KHR(physical_device, device_properties); + } + } + + #[inline] + pub fn physical_device_features2( + &self, + physical_device: VkPhysicalDevice, + device_features: &mut VkPhysicalDeviceFeatures2KHR, + ) { + unsafe { + self.physical_device_properties2_functions + .vkGetPhysicalDeviceFeatures2KHR(physical_device, device_features); + } + } + + #[inline] + pub fn physical_device_format_properties2( + &self, + physical_device: VkPhysicalDevice, + ) -> VkFormatProperties2KHR<'_> { + unsafe { + let mut handle = MaybeUninit::uninit(); + + self.physical_device_properties2_functions + .vkGetPhysicalDeviceFormatProperties2KHR(physical_device, handle.as_mut_ptr()); + + handle.assume_init() + } + } + + #[inline] + pub fn physical_device_image_format_properties2( + &self, + physical_device: VkPhysicalDevice, + image_format_info: &VkPhysicalDeviceImageFormatInfo2KHR, + ) -> VkImageFormatProperties2KHR<'_> { + unsafe { + let mut handle = MaybeUninit::uninit(); + + self.physical_device_properties2_functions + .vkGetPhysicalDeviceImageFormatProperties2KHR( + physical_device, + image_format_info, + handle.as_mut_ptr(), + ); + + handle.assume_init() + } + } + + #[inline] + pub fn physical_device_queue_family_properties2( + &self, + physical_device: VkPhysicalDevice, + ) -> Vec { + let mut count = 0; + + unsafe { + self.physical_device_properties2_functions + .vkGetPhysicalDeviceQueueFamilyProperties2KHR( + physical_device, + &mut count, + ptr::null_mut(), + ) + }; + + let mut family_queue_properties = Vec::with_capacity(count as usize); + unsafe { family_queue_properties.set_len(count as usize) }; + + unsafe { + self.physical_device_properties2_functions + .vkGetPhysicalDeviceQueueFamilyProperties2KHR( + physical_device, + &mut count, + family_queue_properties.as_mut_ptr(), + ) + }; + + family_queue_properties + } + + #[inline] + pub fn physical_device_memory_properties2( + &self, + physical_device: VkPhysicalDevice, + ) -> VkPhysicalDeviceMemoryProperties2KHR { + unsafe { + let mut handle = MaybeUninit::uninit(); + + self.physical_device_properties2_functions + .vkGetPhysicalDeviceMemoryProperties2KHR(physical_device, handle.as_mut_ptr()); + + handle.assume_init() + } + } + + #[inline] + pub fn physical_device_memory_budget( + &self, + physical_device: VkPhysicalDevice, + ) -> (VkPhysicalDeviceMemoryBudgetPropertiesEXT, u32) { + unsafe { + let mut properties = VkPhysicalDeviceMemoryProperties2KHR::default(); + let mut memory_budget = VkPhysicalDeviceMemoryBudgetPropertiesEXT::default(); + properties.chain(&mut memory_budget); + + self.physical_device_properties2_functions + .vkGetPhysicalDeviceMemoryProperties2KHR(physical_device, &mut properties); + + (memory_budget, properties.memoryProperties.memoryHeapCount) + } + } + + #[inline] + pub fn physical_device_sparse_image_format_properties2( + &self, + physical_device: VkPhysicalDevice, + format_info: &VkPhysicalDeviceSparseImageFormatInfo2KHR, + ) -> Vec { + let mut count = 0; + + unsafe { + self.physical_device_properties2_functions + .vkGetPhysicalDeviceSparseImageFormatProperties2KHR( + physical_device, + format_info, + &mut count, + ptr::null_mut(), + ) + }; + + let mut sparse_image_formats = Vec::with_capacity(count as usize); + unsafe { sparse_image_formats.set_len(count as usize) }; + + unsafe { + self.physical_device_properties2_functions + .vkGetPhysicalDeviceSparseImageFormatProperties2KHR( + physical_device, + format_info, + &mut count, + sparse_image_formats.as_mut_ptr(), + ) + }; + + sparse_image_formats + } + + #[inline] + pub fn destroy_surface(&self, surface: VkSurfaceKHR) { + unsafe { + self.instance_wsi_functions + .vkDestroySurfaceKHR(self.instance, surface, ptr::null()) + }; + } +} diff --git a/vulkan-rs/src/lib.rs b/vulkan-rs/src/lib.rs new file mode 100644 index 0000000..c40ebdd --- /dev/null +++ b/vulkan-rs/src/lib.rs @@ -0,0 +1,59 @@ +//! `vulkan` module is a collection of abstractions for vulkan functions +#![deny(rust_2018_idioms)] + +pub mod prelude; + +#[macro_use] +mod macros; + +// mod error; + +// pub use error::Result; + +pub mod acceleration_structure; +pub mod address; +pub mod buffer; +pub mod commandbuffer; +pub mod commandpool; +pub mod deferred_operation; +pub mod descriptorpool; +pub mod descriptorset; +pub mod descriptorsetlayout; +pub mod device; +pub mod fence; +pub mod framebuffer; +pub mod image; +pub mod instance; +pub mod memory; +pub mod physicaldevice; +pub mod pipeline; +pub mod pipelinecache; +pub mod pipelinelayout; +pub mod pipelines; +pub mod querypool; +pub mod queue; +pub mod render_target; +pub mod renderpass; +pub mod semaphore; +pub mod shadermodule; +pub mod surface; +pub mod swapchain; + +pub mod ffi; + +mod sampler_manager; + +#[derive(Clone, Debug)] +pub enum OutOfDate { + Ok(T), + OutOfDate, + TimeOut, +} + +pub trait VkHandle { + fn vk_handle(&self) -> T; +} + +pub trait VulkanDevice { + fn device(&self) -> &std::sync::Arc; +} diff --git a/vulkan-rs/src/macros.rs b/vulkan-rs/src/macros.rs new file mode 100644 index 0000000..fe919ac --- /dev/null +++ b/vulkan-rs/src/macros.rs @@ -0,0 +1,165 @@ +macro_rules! impl_vk_handle { + ($struct_name:ident, $target_name:ident, $value:ident) => { + impl VkHandle<$target_name> for $struct_name { + fn vk_handle(&self) -> $target_name { + self.$value + } + } + + impl<'a> VkHandle<$target_name> for &'a $struct_name { + fn vk_handle(&self) -> $target_name { + self.$value + } + } + + impl VkHandle<$target_name> for Arc<$struct_name> { + fn vk_handle(&self) -> $target_name { + self.$value + } + } + + impl<'a> VkHandle<$target_name> for &'a Arc<$struct_name> { + fn vk_handle(&self) -> $target_name { + self.$value + } + } + }; +} + +macro_rules! impl_vk_handle_t { + ($struct_name:ident, $target_name:ident, $value:ident) => { + impl VkHandle<$target_name> for $struct_name { + fn vk_handle(&self) -> $target_name { + self.$value + } + } + + impl<'a, T> VkHandle<$target_name> for &'a $struct_name { + fn vk_handle(&self) -> $target_name { + self.$value + } + } + + impl VkHandle<$target_name> for Arc<$struct_name> { + fn vk_handle(&self) -> $target_name { + self.$value + } + } + + impl<'a, T> VkHandle<$target_name> for &'a Arc<$struct_name> { + fn vk_handle(&self) -> $target_name { + self.$value + } + } + }; +} + +macro_rules! Extensions { +($struct_name:ident, { $(($var:ident, $name:expr),)+ }) => { + pub struct $struct_name { + $( + pub $var: bool, + )+ + + raw_names: Vec, + } + + impl $struct_name { + pub fn into_list(self) -> Vec { + let mut list = Vec::new(); + + $( + if self.$var { + list.push(VkString::new($name)); + } + )+ + + list + } + + pub fn as_list(&self) -> Vec { + let mut list = Vec::new(); + + $( + if self.$var { + list.push(VkString::new($name)); + } + )+ + + let mut raw_vk_names = self.raw_names.iter().map(|raw_name| VkString::new(raw_name)).collect(); + list.append(&mut raw_vk_names); + + list + } + + pub fn from_list(list: &[VkString]) -> Self { + let mut extensions = Self::default(); + + $( + if list.contains(&VkString::new($name)) { + extensions.$var = true; + } + )+ + + extensions + } + + pub fn check_availability(&self, other: &$struct_name) -> std::result::Result<(), Vec> { + let mut missings = Vec::new(); + + // requested extensions is not available in other + $( + if self.$var && !other.$var { + missings.push(format!("{} is not available", $name)); + } + )+ + + if missings.is_empty() { + Ok(()) + } else { + Err(missings) + } + } + + pub fn activate(&mut self, extension_name: &str) -> std::result::Result<(), String> { + if self.check(extension_name) { + return Ok(()); + } + + Err(format!("Extension ({}) currently not supported!", extension_name)) + } + + pub unsafe fn add_raw_name(&mut self, extension_name: &str) { + if self.check(extension_name) { + return; + } + + println!("Add raw extension name: {}", extension_name); + self.raw_names.push(extension_name.to_string()); + } + + fn check(&mut self, extension_name: &str) -> bool { + $( + if extension_name == $name { + self.$var = true; + return true; + } + )+ + + false + } + } + + impl Default for $struct_name { + fn default() -> Self { + $struct_name { + $( + $var: false, + )+ + + raw_names: Vec::new(), + } + } + } +}; +} diff --git a/vulkan-rs/src/memory.rs b/vulkan-rs/src/memory.rs new file mode 100644 index 0000000..d0e9766 --- /dev/null +++ b/vulkan-rs/src/memory.rs @@ -0,0 +1,179 @@ +use crate::prelude::*; + +use anyhow::Result; + +use vma_rs::prelude::*; + +use std::marker::PhantomData; +use std::sync::Arc; + +#[derive(Debug, Clone, PartialEq, Hash, Eq)] +pub enum MemoryUsage { + GpuOnly, + CpuOnly, + CpuToGpu, + GpuToCpu, +} + +impl MemoryUsage { + pub fn into_vma(usage: Option) -> VmaMemoryUsage { + match usage { + Some(usage) => usage.into(), + None => VMA_MEMORY_USAGE_UNKNOWN, + } + } +} + +impl Into for MemoryUsage { + fn into(self) -> VmaMemoryUsage { + match self { + Self::GpuOnly => VMA_MEMORY_USAGE_GPU_ONLY, + Self::CpuOnly => VMA_MEMORY_USAGE_CPU_ONLY, + Self::CpuToGpu => VMA_MEMORY_USAGE_CPU_TO_GPU, + Self::GpuToCpu => VMA_MEMORY_USAGE_GPU_TO_CPU, + } + } +} + +#[derive(Debug)] +pub struct Memory { + device: Arc, + + allocation: Allocation, + + data_type: PhantomData, +} + +impl Memory { + pub(crate) fn forced_requirements( + device: &Arc, + memory_requirements: VkMemoryRequirements, + buffer: VkBuffer, + memory_usage: VmaMemoryUsage, + ) -> Result>> { + let mut memory = Self::create_and_bind(device, memory_requirements, memory_usage, ())?; + + if let Some(mut_memory) = Arc::get_mut(&mut memory) { + mut_memory.allocation.bind_buffer_memory(buffer)?; + } + + Ok(memory) + } + + pub(crate) fn buffer_memory( + device: &Arc, + buffer: VkBuffer, + memory_usage: VmaMemoryUsage, + ) -> Result>> { + let memory_requirements = device.buffer_memory_requirements(buffer); + + Self::create_and_bind(device, memory_requirements, memory_usage, buffer) + } + + pub(crate) fn image_memory( + device: &Arc, + image: VkImage, + memory_usage: VmaMemoryUsage, + ) -> Result>> { + let memory_requirements = device.image_memory_requirements(image); + + Self::create_and_bind(device, memory_requirements, memory_usage, image) + } + + pub(crate) fn vk_handle(&self) -> VkDeviceMemory { + self.allocation.device_memory() + } +} + +trait MemoryBinder { + fn create_and_bind( + device: &Arc, + memory_requirements: VkMemoryRequirements, + memory_usage: VmaMemoryUsage, + argument: T, + ) -> Result>>; +} + +impl MemoryBinder<(), K> for Memory { + fn create_and_bind( + device: &Arc, + memory_requirements: VkMemoryRequirements, + memory_usage: VmaMemoryUsage, + _: (), + ) -> Result>> { + let allocation = device + .allocator() + .allocate() + .set_usage(memory_usage) + .set_memory_type_bits(memory_requirements.memoryTypeBits.into()) + .build(&memory_requirements)?; + + Ok(Arc::new(Memory { + device: device.clone(), + + allocation, + + data_type: PhantomData, + })) + } +} + +impl MemoryBinder for Memory { + fn create_and_bind( + device: &Arc, + memory_requirements: VkMemoryRequirements, + memory_usage: VmaMemoryUsage, + image: VkImage, + ) -> Result>> { + let allocation = device + .allocator() + .allocate() + .set_usage(memory_usage) + .set_memory_type_bits(memory_requirements.memoryTypeBits.into()) + .build(image)?; + + Ok(Arc::new(Memory { + device: device.clone(), + + allocation, + + data_type: PhantomData, + })) + } +} + +impl MemoryBinder for Memory { + fn create_and_bind( + device: &Arc, + memory_requirements: VkMemoryRequirements, + memory_usage: VmaMemoryUsage, + buffer: VkBuffer, + ) -> Result>> { + let allocation = device + .allocator() + .allocate() + .set_usage(memory_usage) + .set_memory_type_bits(memory_requirements.memoryTypeBits.into()) + .build(buffer)?; + + Ok(Arc::new(Memory { + device: device.clone(), + + allocation, + + data_type: PhantomData, + })) + } +} + +impl VulkanDevice for Memory { + fn device(&self) -> &Arc { + &self.device + } +} + +impl Memory { + pub fn map(&self, length: VkDeviceSize) -> Result> { + self.allocation.map(length) + } +} diff --git a/vulkan-rs/src/physicaldevice.rs b/vulkan-rs/src/physicaldevice.rs new file mode 100644 index 0000000..bc81034 --- /dev/null +++ b/vulkan-rs/src/physicaldevice.rs @@ -0,0 +1,337 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std::{ptr, sync::Arc}; + +#[derive(Debug)] +pub struct PhysicalDevice { + instance: Arc, + physical_device: VkPhysicalDevice, + properties: VkPhysicalDeviceProperties, + features: VkPhysicalDeviceFeatures, + memory_properties: VkPhysicalDeviceMemoryProperties, + supported_extensions: Vec, + + // extension info + ray_tracing_properties: VkPhysicalDeviceRayTracingPropertiesKHR, + ray_tracing_features: VkPhysicalDeviceRayTracingFeaturesKHR, + acceleration_structure_properties: VkPhysicalDeviceAccelerationStructurePropertiesKHR, + acceleration_structure_features: VkPhysicalDeviceAccelerationStructureFeaturesKHR, + + descriptor_indexing_features: VkPhysicalDeviceDescriptorIndexingFeaturesEXT, + descriptor_indexing_properties: VkPhysicalDeviceDescriptorIndexingPropertiesEXT, + + buffer_device_address_features: VkPhysicalDeviceBufferDeviceAddressFeaturesEXT, +} + +impl PhysicalDevice { + pub fn new(instance: Arc) -> Result> { + let physical_devices = instance.enumerate_physical_devices()?; + + let (mut physical_device, mut device_properties) = PhysicalDevice::find_phys_dev( + &instance, + &physical_devices, + VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU, + ); + + if physical_device.is_none() { + let (_physical_device, _device_properties) = PhysicalDevice::find_phys_dev( + &instance, + &physical_devices, + VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU, + ); + + if _physical_device.is_none() { + return Err(anyhow::Error::msg("Could not find an apropriate device")); + } + + physical_device = _physical_device; + device_properties = _device_properties; + } + + let exported_device = physical_device.unwrap(); + let device_props = device_properties.unwrap(); + + Self::internal_new(instance, exported_device, device_props) + } + + pub fn from_raw( + instance: Arc, + physical_device: VkPhysicalDevice, + ) -> Result> { + let properties = instance.physical_device_properties(physical_device); + + Self::internal_new(instance, physical_device, properties) + } + + fn internal_new( + instance: Arc, + physical_device: VkPhysicalDevice, + properties: VkPhysicalDeviceProperties, + ) -> Result> { + let device_features = instance.physical_device_features(physical_device); + + let device_memory_properties = instance.physical_device_memory_properties(physical_device); + + let extensions = Self::query_extensions(&instance, physical_device)?; + + // get extension properties + let mut device_properties2 = VkPhysicalDeviceProperties2KHR::default(); + + // get ray tracing properties + let mut ray_tracing_properties = VkPhysicalDeviceRayTracingPropertiesKHR::default(); + let mut acceleration_structure_properties = + VkPhysicalDeviceAccelerationStructurePropertiesKHR::default(); + + device_properties2.chain(&mut ray_tracing_properties); + device_properties2.chain(&mut acceleration_structure_properties); + + // get descriptor indexing properties + let mut descriptor_indexing_properties = + VkPhysicalDeviceDescriptorIndexingPropertiesEXT::default(); + + device_properties2.chain(&mut descriptor_indexing_properties); + instance.physical_device_properties2(physical_device, &mut device_properties2); + + // get extension features + let mut device_features2 = VkPhysicalDeviceFeatures2KHR::default(); + + // get ray tracing features + let mut ray_tracing_features = VkPhysicalDeviceRayTracingFeaturesKHR::default(); + let mut acceleration_structure_features = + VkPhysicalDeviceAccelerationStructureFeaturesKHR::default(); + + device_features2.chain(&mut ray_tracing_features); + device_features2.chain(&mut acceleration_structure_features); + + // get buffer device address features + let mut buffer_device_address_features = + VkPhysicalDeviceBufferDeviceAddressFeaturesEXT::default(); + device_features2.chain(&mut buffer_device_address_features); + + // get descriptor indexing features + let mut descriptor_indexing_features = + VkPhysicalDeviceDescriptorIndexingFeaturesEXT::default(); + device_features2.chain(&mut descriptor_indexing_features); + + instance.physical_device_features2(physical_device, &mut device_features2); + + // clear pNext indices for later chaining + buffer_device_address_features.pNext = ptr::null_mut(); + descriptor_indexing_features.pNext = ptr::null_mut(); + ray_tracing_features.pNext = ptr::null_mut(); + acceleration_structure_features.pNext = ptr::null_mut(); + + let (major, minor, patch) = VK_GET_VERSION(properties.apiVersion); + + println!( + "\nVulkan Device ({}, Driver: {}, {}.{}.{})", + properties.device_name(), + properties.driverVersion, + major, + minor, + patch + ); + + Ok(Arc::new(PhysicalDevice { + instance, + physical_device, + properties, + features: device_features, + memory_properties: device_memory_properties, + supported_extensions: extensions, + + ray_tracing_properties, + ray_tracing_features, + acceleration_structure_features, + acceleration_structure_properties, + + descriptor_indexing_properties, + descriptor_indexing_features, + + buffer_device_address_features, + })) + } +} + +// getter +impl PhysicalDevice { + pub fn instance(&self) -> &Arc { + &self.instance + } + + pub fn features(&self) -> VkPhysicalDeviceFeatures { + self.features + } + + pub fn memory_properties(&self) -> &VkPhysicalDeviceMemoryProperties { + &self.memory_properties + } + + pub fn extensions(&self) -> &Vec { + &self.supported_extensions + } + + pub fn properties(&self) -> &VkPhysicalDeviceProperties { + &self.properties + } + + pub fn ray_tracing_properties(&self) -> &VkPhysicalDeviceRayTracingPropertiesKHR { + &self.ray_tracing_properties + } + + pub fn ray_tracing_features(&self) -> &VkPhysicalDeviceRayTracingFeaturesKHR { + &self.ray_tracing_features + } + + pub fn acceleration_structure_features( + &self, + ) -> &VkPhysicalDeviceAccelerationStructureFeaturesKHR { + &self.acceleration_structure_features + } + + pub fn acceleration_structure_properties( + &self, + ) -> &VkPhysicalDeviceAccelerationStructurePropertiesKHR { + &self.acceleration_structure_properties + } + + pub fn descriptor_indexing_properties( + &self, + ) -> &VkPhysicalDeviceDescriptorIndexingPropertiesEXT { + &self.descriptor_indexing_properties + } + + pub fn descriptor_indexing_features(&self) -> &VkPhysicalDeviceDescriptorIndexingFeaturesEXT { + &self.descriptor_indexing_features + } + + pub fn buffer_device_address_features( + &self, + ) -> &VkPhysicalDeviceBufferDeviceAddressFeaturesEXT { + &self.buffer_device_address_features + } + + pub fn check_optimal_format_features( + &self, + format: VkFormat, + usage: impl Into, + ) -> bool { + let format_properties = self + .instance + .physical_device_format_properties(self.physical_device, format); + + let features = Self::image_usage_into_features(usage.into()); + + if (format_properties.optimalTilingFeatures & features) == features { + return true; + } + + false + } + + pub fn check_linear_format_features( + &self, + format: VkFormat, + usage: impl Into, + ) -> bool { + let format_properties = self + .instance + .physical_device_format_properties(self.physical_device, format); + + let features = Self::image_usage_into_features(usage.into()); + + if (format_properties.linearTilingFeatures & features) == features { + return true; + } + + false + } + + pub fn check_buffer_format_features( + &self, + format: VkFormat, + features: impl Into, + ) -> bool { + let format_properties = self + .instance + .physical_device_format_properties(self.physical_device, format); + + let features = features.into(); + + if (format_properties.bufferFeatures & features) == features { + return true; + } + + false + } + + fn image_usage_into_features(usage: VkImageUsageFlagBits) -> VkFormatFeatureFlagBits { + let mut features = 0u32.into(); + + if (usage & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) == VK_IMAGE_USAGE_TRANSFER_SRC_BIT { + features |= VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR; + } + + if (usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == VK_IMAGE_USAGE_TRANSFER_DST_BIT { + features |= VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR; + } + + if (usage & VK_IMAGE_USAGE_SAMPLED_BIT) == VK_IMAGE_USAGE_SAMPLED_BIT { + features |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT; + } + + if (usage & VK_IMAGE_USAGE_STORAGE_BIT) == VK_IMAGE_USAGE_STORAGE_BIT { + features |= VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT; + } + + if (usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) == VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT { + features |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT; + } + + if (usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) + == VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT + { + features |= VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT; + } + + features + } +} + +impl_vk_handle!(PhysicalDevice, VkPhysicalDevice, physical_device); + +// private +impl PhysicalDevice { + fn find_phys_dev( + instance: &Arc, + physical_devices: &[VkPhysicalDevice], + device_type: VkPhysicalDeviceType, + ) -> (Option, Option) { + for physical_device in physical_devices { + let properties = instance.physical_device_properties(*physical_device); + + if properties.deviceType == device_type { + return (Some(*physical_device), Some(properties)); + } + } + + (None, None) + } + + fn query_extensions( + instance: &Arc, + physical_device: VkPhysicalDevice, + ) -> Result> { + let extensions = instance.enumerate_device_extensions(physical_device)?; + + let mut vkstrings = Vec::new(); + + for extension_property in extensions { + vkstrings.push(extension_property.extension_name()?); + } + + Ok(vkstrings) + } +} diff --git a/vulkan-rs/src/pipeline.rs b/vulkan-rs/src/pipeline.rs new file mode 100644 index 0000000..61a19f5 --- /dev/null +++ b/vulkan-rs/src/pipeline.rs @@ -0,0 +1,85 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std::sync::Arc; + +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum PipelineType { + Graphics, + Compute, + RayTracing, +} + +#[derive(Debug)] +pub struct Pipeline { + device: Arc, + pipeline_layout: Arc, + + pipeline_type: PipelineType, + + pipeline: VkPipeline, +} + +impl Pipeline { + pub(crate) fn new( + device: Arc, + pipeline_layout: Arc, + pipeline_type: PipelineType, + pipeline: VkPipeline, + ) -> Self { + Pipeline { + device, + pipeline_layout, + pipeline_type, + pipeline, + } + } + + pub fn new_graphics() -> GraphicsPipelineBuilder { + GraphicsPipelineBuilder::default() + } + + pub fn new_compute<'a>() -> ComputePipelineBuilder<'a> { + ComputePipelineBuilder::default() + } + + pub fn new_ray_tracing<'a>() -> RayTracingPipelineBuilder<'a> { + RayTracingPipelineBuilder::default() + } + + pub(crate) fn ray_tracing_shader_group_handles( + &self, + group_count: u32, + handle_size: u32, + ) -> Result> { + if self.pipeline_type != PipelineType::RayTracing { + panic!("wrong pipeline type"); + } + + self.device + .get_ray_tracing_shader_group_handles(self.pipeline, 0, group_count, handle_size) + } + + pub fn pipeline_layout(&self) -> &Arc { + &self.pipeline_layout + } + + pub fn pipeline_type(&self) -> PipelineType { + self.pipeline_type + } +} + +impl VulkanDevice for Pipeline { + fn device(&self) -> &Arc { + &self.device + } +} + +impl_vk_handle!(Pipeline, VkPipeline, pipeline); + +impl Drop for Pipeline { + fn drop(&mut self) { + self.device.destroy_pipeline(self.pipeline); + } +} diff --git a/vulkan-rs/src/pipelinecache.rs b/vulkan-rs/src/pipelinecache.rs new file mode 100644 index 0000000..cbd0798 --- /dev/null +++ b/vulkan-rs/src/pipelinecache.rs @@ -0,0 +1,52 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std::sync::Arc; + +#[derive(Debug)] +pub struct PipelineCache { + device: Arc, + pipeline_cache: VkPipelineCache, +} + +impl PipelineCache { + pub fn new(device: Arc, data: &T) -> Result> { + let mut pipeline_cache_ci = + VkPipelineCacheCreateInfo::new(VK_PIPELINE_CACHE_CREATE_NULL_BIT); + + pipeline_cache_ci.set_data(data); + + let pipeline_cache = device.create_pipeline_cache(&pipeline_cache_ci)?; + + Ok(Arc::new(PipelineCache { + device, + pipeline_cache, + })) + } + + pub fn get_data(&self) -> Result { + self.device.pipeline_cache_data(self.pipeline_cache) + } + + pub fn merge(&self, src_caches: &[&Arc]) -> Result<()> { + let vk_caches: Vec = src_caches.iter().map(|c| c.vk_handle()).collect(); + + self.device + .merge_pipeline_cache(vk_caches.as_slice(), self.pipeline_cache) + } +} + +impl VulkanDevice for PipelineCache { + fn device(&self) -> &Arc { + &self.device + } +} + +impl_vk_handle!(PipelineCache, VkPipelineCache, pipeline_cache); + +impl Drop for PipelineCache { + fn drop(&mut self) { + self.device.destroy_pipeline_cache(self.pipeline_cache); + } +} diff --git a/vulkan-rs/src/pipelinelayout.rs b/vulkan-rs/src/pipelinelayout.rs new file mode 100644 index 0000000..3443e94 --- /dev/null +++ b/vulkan-rs/src/pipelinelayout.rs @@ -0,0 +1,73 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std::sync::Arc; + +#[derive(Debug)] +pub struct PipelineLayoutBuilder { + descriptor_set_layouts: Vec, + push_constant_ranges: Vec, +} + +impl PipelineLayoutBuilder { + pub fn add_descriptor_set_layout( + mut self, + descriptor_set_layout: &dyn VkHandle, + ) -> Self { + self.descriptor_set_layouts + .push(descriptor_set_layout.vk_handle()); + + self + } + + pub fn add_push_constant(mut self, push_constant: VkPushConstantRange) -> Self { + self.push_constant_ranges.push(push_constant); + + self + } + + pub fn build(self, device: Arc) -> Result> { + let pipeline_layout_ci = VkPipelineLayoutCreateInfo::new( + VK_PIPELINE_LAYOUT_CREATE_NULL_BIT, + &self.descriptor_set_layouts, + &self.push_constant_ranges, + ); + + let pipeline_layout = device.create_pipeline_layout(&pipeline_layout_ci)?; + + Ok(Arc::new(PipelineLayout { + device, + pipeline_layout, + })) + } +} + +#[derive(Debug)] +pub struct PipelineLayout { + device: Arc, + pipeline_layout: VkPipelineLayout, +} + +impl PipelineLayout { + pub fn builder() -> PipelineLayoutBuilder { + PipelineLayoutBuilder { + descriptor_set_layouts: Vec::new(), + push_constant_ranges: Vec::new(), + } + } +} + +impl VulkanDevice for PipelineLayout { + fn device(&self) -> &Arc { + &self.device + } +} + +impl_vk_handle!(PipelineLayout, VkPipelineLayout, pipeline_layout); + +impl Drop for PipelineLayout { + fn drop(&mut self) { + self.device.destroy_pipeline_layout(self.pipeline_layout); + } +} diff --git a/vulkan-rs/src/pipelines/compute_pipeline.rs b/vulkan-rs/src/pipelines/compute_pipeline.rs new file mode 100644 index 0000000..0f618ce --- /dev/null +++ b/vulkan-rs/src/pipelines/compute_pipeline.rs @@ -0,0 +1,84 @@ +use anyhow::Result; + +use crate::pipeline::PipelineType; +use crate::prelude::*; + +use std::sync::Arc; + +pub struct ComputePipelineBuilder<'a> { + shader_module: Option<&'a Arc>, + pipeline_cache: Option<&'a Arc>, + flags: VkPipelineCreateFlagBits, +} + +impl<'a> ComputePipelineBuilder<'a> { + // TODO: add support for specialization constants + pub fn set_shader_module(mut self, shader_module: &'a Arc) -> Self { + if cfg!(debug_assertions) { + if self.shader_module.is_some() { + panic!("shader already set!"); + } + + if shader_module.shader_type() != ShaderType::Compute { + panic!("shader has wrong type!"); + } + } + + self.shader_module = Some(shader_module); + + self + } + + pub fn set_pipeline_cache(mut self, pipeline_cache: &'a Arc) -> Self { + self.pipeline_cache = Some(pipeline_cache); + + self + } + + pub fn set_flags(mut self, flags: impl Into) -> Self { + self.flags = flags.into(); + + self + } + + pub fn build( + self, + device: &Arc, + pipeline_layout: &Arc, + ) -> Result> { + let pipeline_ci = match self.shader_module { + Some(module) => VkComputePipelineCreateInfo::new( + self.flags, + module.pipeline_stage_info(), + pipeline_layout.vk_handle(), + ), + None => { + return Err(anyhow::Error::msg( + "Required shader module could not be found", + )) + } + }; + + let pipeline = device.create_compute_pipelines( + self.pipeline_cache.map(|cache| cache.vk_handle()), + &[pipeline_ci], + )?[0]; + + Ok(Arc::new(Pipeline::new( + device.clone(), + pipeline_layout.clone(), + PipelineType::Compute, + pipeline, + ))) + } +} + +impl<'a> Default for ComputePipelineBuilder<'a> { + fn default() -> Self { + ComputePipelineBuilder { + shader_module: None, + pipeline_cache: None, + flags: 0.into(), + } + } +} diff --git a/vulkan-rs/src/pipelines/graphics_pipeline.rs b/vulkan-rs/src/pipelines/graphics_pipeline.rs new file mode 100644 index 0000000..4c05077 --- /dev/null +++ b/vulkan-rs/src/pipelines/graphics_pipeline.rs @@ -0,0 +1,463 @@ +use crate::pipeline::PipelineType; +use crate::prelude::*; + +use anyhow::Result; + +use std::sync::Arc; + +pub struct GraphicsPipelineBuilder { + flags: VkPipelineCreateFlagBits, + + pipeline_cache: Option>, + + amd_rasterization_order: Option, + + vertex_shader: Option>, + vertex_binding_description: Vec, + vertex_attribute_description: Vec, + + input_assembly: Option, + + tesselation_shader: Option<(Arc, Arc)>, + patch_control_points: u32, + + geometry_shader: Option>, + + fragment_shader: Option>, + + viewports: Vec, + scissors: Vec, + + rasterization: Option, + multisample: Option, + depth_stencil: Option, + + blend_attachments: Vec, + color_blend: Option, + + dynamic_states: Vec, +} + +impl GraphicsPipelineBuilder { + // TODO: add support for specialization constants + pub fn set_vertex_shader( + mut self, + shader: Arc, + vertex_binding_description: Vec, + vertex_attribute_description: Vec, + ) -> Self { + if cfg!(debug_assertions) { + assert_eq!(shader.shader_type(), ShaderType::Vertex); + } + + self.vertex_shader = Some(shader); + self.vertex_binding_description = vertex_binding_description; + self.vertex_attribute_description = vertex_attribute_description; + + self + } + + // TODO: add support for specialization constants + pub fn set_tesselation_shader( + mut self, + tesselation_control: Arc, + tesselation_evaluation: Arc, + patch_control_points: u32, + ) -> Self { + if cfg!(debug_assertions) { + assert_eq!( + tesselation_control.shader_type(), + ShaderType::TesselationControl + ); + + assert_eq!( + tesselation_evaluation.shader_type(), + ShaderType::TesselationEvaluation + ); + } + + self.tesselation_shader = Some((tesselation_control, tesselation_evaluation)); + self.patch_control_points = patch_control_points; + + self + } + + // TODO: add support for specialization constants + pub fn set_geometry_shader(mut self, shader: Arc) -> Self { + if cfg!(debug_assertions) { + assert_eq!(shader.shader_type(), ShaderType::Geometry); + } + + self.geometry_shader = Some(shader); + + self + } + + // TODO: add support for specialization constants + pub fn set_fragment_shader(mut self, shader: Arc) -> Self { + if cfg!(debug_assertions) { + assert_eq!(shader.shader_type(), ShaderType::Fragment); + } + + self.fragment_shader = Some(shader); + + self + } + + pub fn set_flags(mut self, flags: impl Into) -> Self { + self.flags = flags.into(); + + self + } + + pub fn enable_rasterization_order(mut self, order: VkRasterizationOrderAMD) -> Self { + self.amd_rasterization_order = Some( + VkPipelineRasterizationStateRasterizationOrderAMD::new(order), + ); + + self + } + + pub fn input_assembly( + mut self, + topology: VkPrimitiveTopology, + primitive_restart_enable: bool, + ) -> Self { + self.input_assembly = Some(VkPipelineInputAssemblyStateCreateInfo::new( + 0, + topology, + primitive_restart_enable, + )); + + self + } + + pub fn default_rasterization( + mut self, + cull_mode: VkCullModeFlags, + front_face: VkFrontFace, + ) -> Self { + self.rasterization = Some(VkPipelineRasterizationStateCreateInfo::new( + 0, + false, + false, + VK_POLYGON_MODE_FILL, + cull_mode, + front_face, + false, + 0.0, + 0.0, + 0.0, + 1.0, + )); + + self + } + + pub fn custom_rasterization( + mut self, + depth_clamp_enable: bool, + rasterization_discard_enable: bool, + polygon_mode: VkPolygonMode, + cull_mode: VkCullModeFlags, + front_face: VkFrontFace, + depth_bias_enable: bool, + depth_bias_constant_factor: f32, + depth_bias_clamp: f32, + depth_bias_slope_factor: f32, + line_width: f32, + ) -> Self { + self.rasterization = Some(VkPipelineRasterizationStateCreateInfo::new( + 0, + depth_clamp_enable, + rasterization_discard_enable, + polygon_mode, + cull_mode, + front_face, + depth_bias_enable, + depth_bias_constant_factor, + depth_bias_clamp, + depth_bias_slope_factor, + line_width, + )); + + self + } + + pub fn default_multisample(mut self, sample_count: VkSampleCountFlags) -> Self { + self.multisample = Some(VkPipelineMultisampleStateCreateInfo::new( + 0, + sample_count, + false, + 0.0, + &[], + false, + false, + )); + + self + } + + pub fn custom_multisample( + mut self, + sample_count: VkSampleCountFlags, + sample_shading_enable: bool, + min_sample_shading: f32, + sample_masks: &[VkSampleMask], + alpha_to_coverage_enable: bool, + alpha_to_one_enable: bool, + ) -> Self { + self.multisample = Some(VkPipelineMultisampleStateCreateInfo::new( + 0, + sample_count, + sample_shading_enable, + min_sample_shading, + sample_masks, + alpha_to_coverage_enable, + alpha_to_one_enable, + )); + + self + } + + pub fn add_dynamic_state(mut self, dynamic_state: VkDynamicState) -> Self { + self.dynamic_states.push(dynamic_state); + + self + } + + pub fn default_depth_stencil(mut self, depth_test: bool, stencil_test: bool) -> Self { + let stencil_op_state = VkStencilOpState { + failOp: VK_STENCIL_OP_KEEP, + passOp: VK_STENCIL_OP_KEEP, + depthFailOp: VK_STENCIL_OP_KEEP, + compareOp: VK_COMPARE_OP_ALWAYS, + compareMask: 0, + writeMask: 0, + reference: 0, + }; + + self.depth_stencil = Some(VkPipelineDepthStencilStateCreateInfo::new( + VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_NULL_BIT, + depth_test, + depth_test, + VK_COMPARE_OP_LESS, + false, + stencil_test, + stencil_op_state.clone(), + stencil_op_state, + 0.0, + 0.0, + )); + + self + } + + pub fn custom_depth_stencil( + mut self, + depth_test_enable: bool, + depth_write_enable: bool, + depth_compare_op: VkCompareOp, + depth_bounds_test_enable: bool, + stencil_test_enable: bool, + front: VkStencilOpState, + back: VkStencilOpState, + min_depth_bounds: f32, + max_depth_bounds: f32, + ) -> Self { + self.depth_stencil = Some(VkPipelineDepthStencilStateCreateInfo::new( + 0, + depth_test_enable, + depth_write_enable, + depth_compare_op, + depth_bounds_test_enable, + stencil_test_enable, + front, + back, + min_depth_bounds, + max_depth_bounds, + )); + + self + } + + pub fn default_color_blend( + mut self, + attachments: Vec, + ) -> Self { + self.blend_attachments = attachments; + + self.color_blend = Some(VkPipelineColorBlendStateCreateInfo::new( + 0, + false, + VK_LOGIC_OP_NO_OP, + &self.blend_attachments, + [1.0, 1.0, 1.0, 1.0], + )); + + self + } + + pub fn custom_color_blend( + mut self, + logic_op_enable: bool, + logic_op: VkLogicOp, + attachments: Vec, + blend_constants: [f32; 4], + ) -> Self { + self.blend_attachments = attachments; + + self.color_blend = Some(VkPipelineColorBlendStateCreateInfo::new( + 0, + logic_op_enable, + logic_op, + &self.blend_attachments, + blend_constants, + )); + + self + } + + pub fn add_viewport(mut self, viewport: VkViewport) -> Self { + self.viewports.push(viewport); + + self + } + + pub fn add_scissor(mut self, scissor: VkRect2D) -> Self { + self.scissors.push(scissor); + + self + } + + pub fn build( + mut self, + device: Arc, + pipeline_layout: &Arc, + render_pass: &Arc, + subpass: u32, + ) -> Result> { + let mut rasterization = self.rasterization.expect("rasterization state is required"); + + if let Some(amd_rasterization_order) = &self.amd_rasterization_order { + if device.enabled_extensions().amd_rasterization_order { + rasterization.chain(amd_rasterization_order); + } + } + + let vertex_input = VkPipelineVertexInputStateCreateInfo::new( + 0, + &self.vertex_binding_description, + &self.vertex_attribute_description, + ); + + let mut stages = Vec::new(); + + if let Some(shader) = &self.vertex_shader { + stages.push(shader.pipeline_stage_info()); + } + + if let Some(shader) = &self.geometry_shader { + stages.push(shader.pipeline_stage_info()); + } + + if let Some((tesselation_control, tesselation_evaluation)) = &self.tesselation_shader { + stages.push(tesselation_control.pipeline_stage_info()); + stages.push(tesselation_evaluation.pipeline_stage_info()); + } + + if let Some(shader) = &self.fragment_shader { + stages.push(shader.pipeline_stage_info()); + } + + if self.viewports.is_empty() { + self.dynamic_states.push(VK_DYNAMIC_STATE_VIEWPORT); + self.viewports.push(VkViewport::default()); + } + + if self.scissors.is_empty() { + self.dynamic_states.push(VK_DYNAMIC_STATE_SCISSOR); + self.scissors.push(VkRect2D::default()); + } + + let viewport_state = + VkPipelineViewportStateCreateInfo::new(0, &self.viewports, &self.scissors); + + let tesselation = if self.patch_control_points != 0 { + Some(VkPipelineTessellationStateCreateInfo::new( + 0, + self.patch_control_points, + )) + } else { + None + }; + + let dynamic_state = VkPipelineDynamicStateCreateInfo::new(0, &self.dynamic_states); + + let pipeline_ci = VkGraphicsPipelineCreateInfo::new( + self.flags, + &stages, + Some(&vertex_input), + self.input_assembly.as_ref(), + tesselation.as_ref(), + Some(&viewport_state), + &rasterization, + self.multisample.as_ref(), + self.depth_stencil.as_ref(), + self.color_blend.as_ref(), + Some(&dynamic_state), + pipeline_layout.vk_handle(), + render_pass.vk_handle(), + subpass, + ); + + let pipeline = device.create_graphics_pipelines( + self.pipeline_cache.map(|cache| cache.vk_handle()), + &[pipeline_ci], + )?[0]; + + Ok(Arc::new(Pipeline::new( + device, + pipeline_layout.clone(), + PipelineType::Graphics, + pipeline, + ))) + } +} + +impl Default for GraphicsPipelineBuilder { + fn default() -> Self { + GraphicsPipelineBuilder { + flags: 0.into(), + + pipeline_cache: None, + + amd_rasterization_order: None, + + vertex_shader: None, + vertex_binding_description: Vec::new(), + vertex_attribute_description: Vec::new(), + + input_assembly: None, + + tesselation_shader: None, + patch_control_points: 0, + + geometry_shader: None, + + fragment_shader: None, + + viewports: Vec::new(), + scissors: Vec::new(), + + rasterization: None, + multisample: None, + depth_stencil: None, + + blend_attachments: Vec::new(), + color_blend: None, + + dynamic_states: Vec::new(), + } + } +} diff --git a/vulkan-rs/src/pipelines/mod.rs b/vulkan-rs/src/pipelines/mod.rs new file mode 100644 index 0000000..7ed0199 --- /dev/null +++ b/vulkan-rs/src/pipelines/mod.rs @@ -0,0 +1,4 @@ +pub mod compute_pipeline; +pub mod graphics_pipeline; +pub mod ray_tracing_pipeline; +pub mod shader_binding_table; diff --git a/vulkan-rs/src/pipelines/ray_tracing_pipeline.rs b/vulkan-rs/src/pipelines/ray_tracing_pipeline.rs new file mode 100644 index 0000000..c166af6 --- /dev/null +++ b/vulkan-rs/src/pipelines/ray_tracing_pipeline.rs @@ -0,0 +1,270 @@ +use crate::pipeline::PipelineType; +use crate::prelude::*; + +use anyhow::Result; + +use std::sync::Arc; + +use super::shader_binding_table::ShaderBindingTableBuilder; + +pub struct Library<'a> { + pipeline: &'a Arc, + + max_payload_size: u32, + max_attribute_size: u32, +} + +impl<'a> Library<'a> { + pub fn new( + pipeline: &'a Arc, + max_payload_size: u32, + max_attribute_size: u32, + ) -> Self { + Library { + pipeline, + + max_payload_size, + max_attribute_size, + } + } +} + +pub struct RayTracingPipelineBuilder<'a> { + shader_modules: Vec<(Arc, Option)>, + + shader_groups: Vec, + + libraries: Vec>, + + dynamic_states: Vec, + + flags: VkPipelineCreateFlagBits, + max_recursion: u32, + + shader_binding_table_builder: ShaderBindingTableBuilder, + + pipeline_cache: Option<&'a Arc>, +} + +impl<'a> RayTracingPipelineBuilder<'a> { + pub fn check_max_recursion(device: &Arc, max_recursion: u32) -> u32 { + max_recursion.min( + device + .physical_device() + .ray_tracing_properties() + .maxRayRecursionDepth, + ) + } + + pub fn add_dynamic_state(mut self, dynamic_state: VkDynamicState) -> Self { + self.dynamic_states.push(dynamic_state); + + self + } + + pub fn set_pipeline_cache(mut self, pipeline_cache: &'a Arc) -> Self { + self.pipeline_cache = Some(pipeline_cache); + + self + } + + pub fn set_flags(mut self, flags: impl Into) -> Self { + self.flags = flags.into(); + + self + } + + pub fn add_library(mut self, library: Library<'a>) -> Self { + self.libraries.push(library); + + self + } + + pub fn add_shader( + mut self, + shader_module: Arc, + data: Option>, + specialization_constants: Option, + ) -> Self { + self.shader_binding_table_builder = match shader_module.shader_type() { + ShaderType::RayGeneration => self + .shader_binding_table_builder + .add_ray_gen_program(self.shader_groups.len() as u32, data), + ShaderType::Miss => self + .shader_binding_table_builder + .add_miss_program(self.shader_groups.len() as u32, data), + _ => panic!( + "unsupported shader type: {:?}, expected RayGen or Miss Shader", + shader_module.shader_type() + ), + }; + + let shader_index = self.shader_modules.len(); + self.shader_modules + .push((shader_module, specialization_constants)); + + self.shader_groups + .push(VkRayTracingShaderGroupCreateInfoKHR::new( + VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR, + shader_index as u32, + VK_SHADER_UNUSED_KHR, + VK_SHADER_UNUSED_KHR, + VK_SHADER_UNUSED_KHR, + )); + + self + } + + pub fn add_hit_shaders( + mut self, + shader_modules: impl IntoIterator, Option)>, + data: Option>, + ) -> Self { + let mut group = VkRayTracingShaderGroupCreateInfoKHR::new( + VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR, + VK_SHADER_UNUSED_KHR, + VK_SHADER_UNUSED_KHR, + VK_SHADER_UNUSED_KHR, + VK_SHADER_UNUSED_KHR, + ); + + for (shader_module, specialization_constant) in shader_modules.into_iter() { + let shader_index = self.shader_modules.len() as u32; + + match shader_module.shader_type() { + ShaderType::AnyHit => { + // sanity check + if cfg!(debug_assertions) && group.anyHitShader != VK_SHADER_UNUSED_KHR { + panic!("any hit shader already used in current hit group"); + } + + group.anyHitShader = shader_index; + } + ShaderType::ClosestHit => { + // sanity check + if cfg!(debug_assertions) && group.closestHitShader != VK_SHADER_UNUSED_KHR { + panic!("closest hit shader already used in current hit group"); + } + + group.closestHitShader = shader_index; + } + ShaderType::Intersection => { + // sanity check + if cfg!(debug_assertions) && group.intersectionShader != VK_SHADER_UNUSED_KHR { + panic!("intersection shader already used in current hit group"); + } + + group.intersectionShader = shader_index; + group.r#type = VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR; + } + _ => panic!("unsupported shader type: {:?}, expected AnyHit, ClosestHit or Intersection Shader", shader_module.shader_type()), + } + + self.shader_modules + .push((shader_module, specialization_constant)); + } + self.shader_binding_table_builder = self + .shader_binding_table_builder + .add_hit_group_program(self.shader_groups.len() as u32, data); + self.shader_groups.push(group); + + self + } + + pub fn max_recursion_depth(mut self, max_recursion_depth: u32) -> Self { + self.max_recursion = max_recursion_depth; + + self + } + + pub fn build( + mut self, + device: Arc, + pipeline_layout: &Arc, + ) -> Result<(Arc, ShaderBindingTable)> { + let shader_stages: Vec = self + .shader_modules + .iter() + .map(|(shader, specialization_constant)| { + let mut stage_info = shader.pipeline_stage_info(); + if let Some(specialization_constant) = specialization_constant { + stage_info.set_specialization_info(specialization_constant.vk_handle()); + } + + stage_info + }) + .collect(); + + // check that we dont exceed the gpu's capabilities + let max_recursion = Self::check_max_recursion(&device, self.max_recursion); + + let pipeline = { + let mut libraries = Vec::with_capacity(self.libraries.len()); + let mut library_interface = VkRayTracingPipelineInterfaceCreateInfoKHR::new(0, 0); + + for library in self.libraries.iter() { + libraries.push(library.pipeline.vk_handle()); + + library_interface.maxPipelineRayPayloadSize = library_interface + .maxPipelineRayPayloadSize + .max(library.max_payload_size); + + library_interface.maxPipelineRayHitAttributeSize = library_interface + .maxPipelineRayHitAttributeSize + .max(library.max_attribute_size); + } + + let lib_create_info = VkPipelineLibraryCreateInfoKHR::new(&libraries); + + let dynamic_states = VkPipelineDynamicStateCreateInfo::new(0, &self.dynamic_states); + + device.create_ray_tracing_pipelines( + None, + self.pipeline_cache.map(|cache| cache.vk_handle()), + &[VkRayTracingPipelineCreateInfoKHR::new( + self.flags, + &shader_stages, // stages + &self.shader_groups, // groups + max_recursion, + &lib_create_info, // libraries + &library_interface, // library interfaces + &dynamic_states, + pipeline_layout.vk_handle(), + )], + None, + )?[0] + }; + + let pipeline = Arc::new(Pipeline::new( + device.clone(), + pipeline_layout.clone(), + PipelineType::RayTracing, + pipeline, + )); + + let sbt = self + .shader_binding_table_builder + .build(&device, &pipeline)?; + + Ok((pipeline, sbt)) + } +} + +impl<'a> Default for RayTracingPipelineBuilder<'a> { + fn default() -> Self { + RayTracingPipelineBuilder { + shader_modules: Vec::new(), + shader_groups: Vec::new(), + + flags: 0.into(), + max_recursion: 2, + + libraries: Vec::new(), + dynamic_states: Vec::new(), + + shader_binding_table_builder: ShaderBindingTableBuilder::new(), + + pipeline_cache: None, + } + } +} diff --git a/vulkan-rs/src/pipelines/shader_binding_table.rs b/vulkan-rs/src/pipelines/shader_binding_table.rs new file mode 100644 index 0000000..babb447 --- /dev/null +++ b/vulkan-rs/src/pipelines/shader_binding_table.rs @@ -0,0 +1,289 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std::sync::Arc; + +struct ShaderBindingTableEntry { + group_index: u32, + inline_data: Vec, +} +pub(crate) struct ShaderBindingTableBuilder { + ray_gen_entries: Vec, + miss_entries: Vec, + hit_group_entries: Vec, +} + +pub struct ShaderBindingTable { + _sbt_buffer: Arc>, + + raygen_shader_binding_table: VkStridedDeviceAddressRegionKHR, + miss_shader_binding_table: VkStridedDeviceAddressRegionKHR, + hit_shader_binding_table: VkStridedDeviceAddressRegionKHR, + callable_shader_binding_table: VkStridedDeviceAddressRegionKHR, +} + +impl ShaderBindingTable { + pub fn raygen_shader_binding_table(&self) -> &VkStridedDeviceAddressRegionKHR { + &self.raygen_shader_binding_table + } + + pub fn miss_shader_binding_table(&self) -> &VkStridedDeviceAddressRegionKHR { + &self.miss_shader_binding_table + } + + pub fn hit_shader_binding_table(&self) -> &VkStridedDeviceAddressRegionKHR { + &self.hit_shader_binding_table + } + + pub fn callable_shader_binding_table(&self) -> &VkStridedDeviceAddressRegionKHR { + &self.callable_shader_binding_table + } + + fn create( + sbt_buffer: Arc>, + ray_gen_entry_size: VkDeviceSize, + ray_gen_entry_count: VkDeviceSize, + miss_offset: VkDeviceSize, + miss_entry_size: VkDeviceSize, + miss_entry_count: VkDeviceSize, + hit_group_offset: VkDeviceSize, + hit_group_entry_size: VkDeviceSize, + hit_group_entry_count: VkDeviceSize, + ) -> Self { + let device_address: VkDeviceAddress = sbt_buffer.device_address().into(); + + ShaderBindingTable { + raygen_shader_binding_table: VkStridedDeviceAddressRegionKHR { + deviceAddress: device_address, + stride: ray_gen_entry_size, + size: ray_gen_entry_size * ray_gen_entry_count, + }, + + miss_shader_binding_table: VkStridedDeviceAddressRegionKHR { + deviceAddress: device_address + miss_offset, + stride: miss_entry_size, + size: miss_entry_size * miss_entry_count, + }, + + hit_shader_binding_table: VkStridedDeviceAddressRegionKHR { + deviceAddress: device_address + hit_group_offset, + stride: hit_group_entry_size, + size: hit_group_entry_size * hit_group_entry_count, + }, + + callable_shader_binding_table: VkStridedDeviceAddressRegionKHR { + deviceAddress: 0, + stride: 0, + size: 0, + }, + + _sbt_buffer: sbt_buffer, + } + } +} + +impl ShaderBindingTableBuilder { + pub(crate) fn new() -> ShaderBindingTableBuilder { + ShaderBindingTableBuilder { + ray_gen_entries: Vec::new(), + miss_entries: Vec::new(), + hit_group_entries: Vec::new(), + } + } + + pub(crate) fn add_ray_gen_program(mut self, group_index: u32, data: Option>) -> Self { + self.ray_gen_entries.push(ShaderBindingTableEntry { + group_index, + inline_data: match data { + Some(data) => data, + None => Vec::new(), + }, + }); + + self + } + + pub(crate) fn add_miss_program(mut self, group_index: u32, data: Option>) -> Self { + self.miss_entries.push(ShaderBindingTableEntry { + group_index, + inline_data: match data { + Some(data) => data, + None => Vec::new(), + }, + }); + + self + } + + pub(crate) fn add_hit_group_program(mut self, group_index: u32, data: Option>) -> Self { + self.hit_group_entries.push(ShaderBindingTableEntry { + group_index, + inline_data: match data { + Some(data) => data, + None => Vec::new(), + }, + }); + + self + } + + pub(crate) fn build( + &mut self, + device: &Arc, + pipeline: &Arc, + ) -> Result { + let ray_tracing_properties = device.physical_device().ray_tracing_properties(); + + let prog_id_size = ray_tracing_properties.shaderGroupHandleSize; + let base_alignment = ray_tracing_properties.shaderGroupBaseAlignment; + + let ray_gen_entry_size = + Self::entry_size(prog_id_size, &self.ray_gen_entries, prog_id_size as u64); + let miss_entry_size = + Self::entry_size(prog_id_size, &self.miss_entries, prog_id_size as u64); + let hit_group_entry_size = + Self::entry_size(prog_id_size, &self.hit_group_entries, prog_id_size as u64); + + let sbt_size = (ray_gen_entry_size * self.ray_gen_entries.len() as VkDeviceSize) + .max(base_alignment as VkDeviceSize) + + (miss_entry_size * self.miss_entries.len() as VkDeviceSize) + .max(base_alignment as VkDeviceSize) + + hit_group_entry_size * self.hit_group_entries.len() as VkDeviceSize; + + let group_count = + self.ray_gen_entries.len() + self.miss_entries.len() + self.hit_group_entries.len(); + + let shader_handle_storage = + pipeline.ray_tracing_shader_group_handles(group_count as u32, prog_id_size)?; + + let mut sbt_data = vec![0; sbt_size as usize]; + let mut offset = 0; + + Self::copy_shader_data( + &mut sbt_data, + prog_id_size, + &mut offset, + &self.ray_gen_entries, + ray_gen_entry_size, + base_alignment, + &shader_handle_storage, + ); + + let miss_offset = offset; + + Self::copy_shader_data( + &mut sbt_data, + prog_id_size, + &mut offset, + &self.miss_entries, + miss_entry_size, + base_alignment, + &shader_handle_storage, + ); + + let hit_group_offset = offset; + + Self::copy_shader_data( + &mut sbt_data, + prog_id_size, + &mut offset, + &self.hit_group_entries, + hit_group_entry_size, + base_alignment, + &shader_handle_storage, + ); + + let sbt_buffer = Buffer::builder() + .set_usage( + VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR + | VK_BUFFER_USAGE_TRANSFER_SRC_BIT + | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT, + ) + .set_memory_usage(MemoryUsage::CpuToGpu) + .set_data(&sbt_data) + .build(device.clone())?; + + Ok(ShaderBindingTable::create( + sbt_buffer, + ray_gen_entry_size, + self.ray_gen_entries.len() as VkDeviceSize, + miss_offset, + miss_entry_size, + self.miss_entries.len() as VkDeviceSize, + hit_group_offset, + hit_group_entry_size, + self.hit_group_entries.len() as VkDeviceSize, + )) + } +} + +impl ShaderBindingTableBuilder { + #[inline] + fn entry_size( + prog_id_size: u32, + entries: &[ShaderBindingTableEntry], + padding: u64, + ) -> VkDeviceSize { + let mut max_args = 0; + + for entry in entries { + max_args = max_args.max(entry.inline_data.len()); + } + + let mut entry_size = prog_id_size as VkDeviceSize + max_args as VkDeviceSize; + + entry_size = Self::round_up(entry_size, padding); + + entry_size + } + + #[inline] + fn round_up(source: u64, value: u64) -> u64 { + ((source) + (value) - 1) & !((value) - 1) + } + + #[inline] + fn copy_shader_data( + sbt_data: &mut Vec, + prog_id_size: u32, + offset: &mut VkDeviceSize, + entries: &[ShaderBindingTableEntry], + _entry_size: VkDeviceSize, + base_alignment: u32, + shader_handle_storage: &[u8], + ) { + for entry in entries { + // copy the shader identifier + { + let sbt_start = *offset as usize; + let sbt_end = sbt_start + prog_id_size as usize; + + let shs_start = (entry.group_index * prog_id_size) as usize; + let shs_end = shs_start + prog_id_size as usize; + + sbt_data[sbt_start..sbt_end] + .copy_from_slice(&shader_handle_storage[shs_start..shs_end]); + } + + // copy data if present + if !entry.inline_data.is_empty() { + let tmp_offset = *offset + prog_id_size as VkDeviceSize; + + let sbt_start = tmp_offset as usize; + let sbt_end = sbt_start + entry.inline_data.len(); + + sbt_data[sbt_start..sbt_end].copy_from_slice(&entry.inline_data); + } + + *offset += prog_id_size as VkDeviceSize; + } + + // increase offset with correct alignment + let modulo = *offset % base_alignment as VkDeviceSize; + + if modulo != 0 { + *offset += base_alignment as VkDeviceSize - modulo; + } + } +} diff --git a/vulkan-rs/src/prelude.rs b/vulkan-rs/src/prelude.rs new file mode 100644 index 0000000..4278819 --- /dev/null +++ b/vulkan-rs/src/prelude.rs @@ -0,0 +1,49 @@ +// vulkan structures +pub use super::address::Address; +pub use super::buffer::Buffer; +pub use super::commandbuffer::{ + CommandBuffer, CommandBufferBuilder, CommandBufferRecorder, QueryEnable, +}; +pub use super::deferred_operation::*; +pub use super::descriptorpool::DescriptorPool; +pub use super::descriptorset::*; +pub use super::descriptorsetlayout::DescriptorSetLayout; +pub use super::device::{Device, DeviceExtensions, DeviceFeatures}; +pub use super::fence::Fence; +pub use super::framebuffer::{Framebuffer, FramebufferBuilder}; +pub use super::image::*; +pub use super::instance::*; +pub use super::memory::{Memory, MemoryUsage}; +pub use super::physicaldevice::PhysicalDevice; +pub use super::pipeline::Pipeline; +pub use super::pipelinecache::PipelineCache; +pub use super::pipelinelayout::{PipelineLayout, PipelineLayoutBuilder}; +pub use super::querypool::QueryPool; +pub use super::queue::*; +pub use super::renderpass::RenderPass; +pub use super::sampler_manager::{Sampler, SamplerBuilder}; +pub use super::semaphore::Semaphore; +pub use super::shadermodule::{ + AddSpecializationConstant, ShaderModule, ShaderType, SpecializationConstants, +}; +pub use super::surface::Surface; +pub use super::swapchain::Swapchain; + +pub use super::pipelines::{ + compute_pipeline::ComputePipelineBuilder, graphics_pipeline::GraphicsPipelineBuilder, + shader_binding_table::ShaderBindingTable, +}; + +pub use super::pipelines::ray_tracing_pipeline::RayTracingPipelineBuilder; + +pub use super::acceleration_structure::{AccelerationStructure, AccelerationStructureBuilder}; + +pub use super::{OutOfDate, VkHandle, VulkanDevice}; + +pub use image; +pub use vulkan_sys::prelude::*; + +pub use super::render_target::{ + sub_pass::{ClearValue, CustomTarget, SubPass, SubPassBuilder}, + RenderTarget, +}; diff --git a/vulkan-rs/src/querypool.rs b/vulkan-rs/src/querypool.rs new file mode 100644 index 0000000..fc0aff7 --- /dev/null +++ b/vulkan-rs/src/querypool.rs @@ -0,0 +1,66 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std::mem; +use std::sync::Arc; + +#[derive(Debug)] +pub struct QueryPool { + device: Arc, + query_pool: VkQueryPool, + query_count: u32, +} + +impl QueryPool { + pub fn new( + device: Arc, + query_type: VkQueryType, + query_count: u32, + pipeline_statistics: impl Into, + ) -> Result> { + let query_pool_ci = VkQueryPoolCreateInfo::new( + VK_QUERY_POOL_CREATE_NULL_BIT, + query_type, + query_count, + pipeline_statistics, + ); + + let query_pool = device.create_query_pool(&query_pool_ci)?; + + Ok(Arc::new(QueryPool { + device, + query_pool, + query_count, + })) + } + + pub fn get_results(&self) -> Result> { + let mut data = vec![0; self.query_count as usize]; + + self.device.query_pool_results( + self.query_pool, + 0, + self.query_count, + &mut data, + mem::size_of::() as u64, + VK_QUERY_RESULT_64_BIT, + )?; + + Ok(data) + } +} + +impl VulkanDevice for QueryPool { + fn device(&self) -> &Arc { + &self.device + } +} + +impl_vk_handle!(QueryPool, VkQueryPool, query_pool); + +impl Drop for QueryPool { + fn drop(&mut self) { + self.device.destroy_query_pool(self.query_pool); + } +} diff --git a/vulkan-rs/src/queue.rs b/vulkan-rs/src/queue.rs new file mode 100644 index 0000000..fc90653 --- /dev/null +++ b/vulkan-rs/src/queue.rs @@ -0,0 +1,242 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std::{ + slice, + sync::{Arc, Mutex}, + time::Duration, +}; + +pub struct QueueRequestInfo { + pub queue_create_info: VkDeviceQueueCreateInfo, + pub queue_family_index: u32, + pub queue_index: u32, +} + +#[derive(Debug)] +pub struct Queue { + device: Arc, + queue: VkQueue, + family_index: u32, + queue_index: u32, +} + +impl Queue { + pub fn create_presentable_request_info( + physical_device: &Arc, + surface: &Arc, + queue_type: impl Into, + ) -> Result { + let index = + Self::find_presentable_queue_index(physical_device, surface, queue_type.into())?; + + let priorities = &[0.0f32]; + + Ok(QueueRequestInfo { + queue_create_info: VkDeviceQueueCreateInfo::new(0, index, priorities), + queue_family_index: index, + queue_index: 0, + }) + } + + pub fn create_non_presentable_request_info( + physical_device: &Arc, + queue_type: impl Into, + ) -> Result { + let index = Self::find_non_presentable_queue_index(physical_device, queue_type.into())?; + + let priorities = &[0.0f32]; + + Ok(QueueRequestInfo { + queue_create_info: VkDeviceQueueCreateInfo::new(0, index, priorities), + queue_family_index: index, + queue_index: 0, + }) + } + + pub fn new( + device: Arc, + queue: VkQueue, + family_index: u32, + queue_index: u32, + ) -> Arc> { + Arc::new(Mutex::new(Queue { + device, + queue, + family_index, + queue_index, + })) + } + + pub fn family_index(&self) -> u32 { + self.family_index + } + + pub fn queue_index(&self) -> u32 { + self.queue_index + } + + /// really expensiv call, since its locks the queue until it is idle + pub fn submit(&self, fence: Option<&Arc>, submits: &[SubmitInfo]) -> Result<()> { + let submit_infos: Vec = submits.iter().map(|s| s.as_vk_submit()).collect(); + + let fence = match fence { + Some(fence) => fence.vk_handle(), + None => VkFence::NULL_HANDLE, + }; + + self.device + .queue_submit(self.queue, submit_infos.as_slice(), fence) + } + + pub fn minimal_submit( + &self, + time_out: Duration, + command_buffers: &[Arc], + ) -> Result<()> { + let mut submit = SubmitInfo::default(); + + for command_buffer in command_buffers.iter() { + submit = submit.add_command_buffer(command_buffer); + } + + let fence = Fence::builder().build(self.device.clone())?; + + self.submit(Some(&fence), slice::from_ref(&submit))?; + + // make sure command_buffer is ready + self.device.wait_for_fences(&[&fence], true, time_out)?; + + Ok(()) + } + + pub fn present( + &self, + swapchains: &[&Arc], + image_indices: &[u32], + wait_semaphores: &[&Arc], + ) -> Result> { + let wait_semaphores: Vec = + wait_semaphores.iter().map(|sem| sem.vk_handle()).collect(); + + let swapchains: Vec = swapchains + .iter() + .map(|swapchain| swapchain.vk_handle()) + .collect(); + + let present_info = VkPresentInfoKHR::new( + wait_semaphores.as_slice(), + swapchains.as_slice(), + image_indices, + &mut [], + ); + + self.device.queue_present(self.queue, &present_info) + } + + pub fn wait_idle(&self) -> Result<()> { + self.device.queue_wait_idle(self.queue) + } +} + +impl VulkanDevice for Queue { + fn device(&self) -> &Arc { + &self.device + } +} + +impl_vk_handle!(Queue, VkQueue, queue); + +impl Queue { + fn find_presentable_queue_index( + physical_device: &Arc, + surface: &Arc, + flags: VkQueueFlagBits, + ) -> Result { + let surface = surface.vk_handle(); + let vk_physical_device = physical_device.vk_handle(); + + let queue_family_properties = physical_device + .instance() + .physical_device_queue_family_properties(vk_physical_device); + + for (i, queue) in queue_family_properties.iter().enumerate() { + if (queue.queueFlagBits & flags) == flags { + let presentable = physical_device.instance().physical_device_surface_support( + vk_physical_device, + i as u32, + surface, + )?; + + if presentable { + return Ok(i as u32); + } + } + } + + Err(anyhow::Error::msg("Requested queue could not be found")) + } + + fn find_non_presentable_queue_index( + physical_device: &Arc, + flags: VkQueueFlagBits, + ) -> Result { + let vk_physical_device = physical_device.vk_handle(); + + let queue_family_properties = physical_device + .instance() + .physical_device_queue_family_properties(vk_physical_device); + + for (i, queue) in queue_family_properties.iter().enumerate() { + if (queue.queueFlagBits & flags) == flags { + return Ok(i as u32); + } + } + + Err(anyhow::Error::msg("Requested queue could not be found")) + } +} + +#[derive(Default)] +pub struct SubmitInfo { + wait_semaphores: Vec, + wait_stages: Vec, + command_buffers: Vec, + signal_semaphores: Vec, +} + +impl SubmitInfo { + pub fn add_wait_semaphore(mut self, wait_semaphore: impl VkHandle) -> Self { + self.wait_semaphores.push(wait_semaphore.vk_handle()); + + self + } + + pub fn add_wait_stage(mut self, wait_stage: impl Into) -> Self { + self.wait_stages.push(wait_stage.into()); + + self + } + + pub fn add_command_buffer(mut self, command_buffer: impl VkHandle) -> Self { + self.command_buffers.push(command_buffer.vk_handle()); + + self + } + + pub fn add_signal_semaphore(mut self, signal_semaphore: impl VkHandle) -> Self { + self.signal_semaphores.push(signal_semaphore.vk_handle()); + + self + } + + pub fn as_vk_submit(&self) -> VkSubmitInfo { + VkSubmitInfo::new( + self.wait_semaphores.as_slice(), + self.wait_stages.as_slice(), + self.command_buffers.as_slice(), + self.signal_semaphores.as_slice(), + ) + } +} diff --git a/vulkan-rs/src/render_target/mod.rs b/vulkan-rs/src/render_target/mod.rs new file mode 100644 index 0000000..e04e0d3 --- /dev/null +++ b/vulkan-rs/src/render_target/mod.rs @@ -0,0 +1,390 @@ +use crate::prelude::*; +use anyhow::Result; +use std::sync::Arc; + +pub mod sub_pass; +use sub_pass::{AttachmentInfo, AttachmentInfoUsage, SubPass}; + +pub struct RenderTargetBuilder { + sub_passes: Vec, +} + +impl RenderTargetBuilder { + pub fn add_sub_pass(mut self, sub_pass: SubPass) -> Self { + self.sub_passes.push(sub_pass); + + self + } + + pub fn build(self, device: &Arc) -> Result { + #[cfg(debug_assertions)] + { + // sub passes must not be empty + assert!(!self.sub_passes.is_empty()); + + // sub passes must all have the same extent + let first_extent = self.sub_passes[0].extent(); + + for sub_pass in self.sub_passes.iter() { + assert!(sub_pass.extent() == first_extent); + } + } + + // create render pass + + // gather attachment descriptions + let mut attachments = Vec::new(); + + self.map_attachment(|attachment| { + attachments.push(attachment.description.clone()); + }); + + // create attachment references + let mut attachment_references: Vec = Vec::new(); + let mut attachment_index = 0; + + // gather all color, depth and resolve attachment and add input attachments from previous sup passes + for sub_pass in self.sub_passes.iter() { + let mut references = SubPassAttachmentReferences::default(); + references.offset = attachment_index as usize; + + for attachment in sub_pass.attachments().iter() { + let attachment_reference = VkAttachmentReference { + attachment: attachment_index, + layout: attachment.layout, + }; + + match attachment.usage { + AttachmentInfoUsage::Output => { + references.color_attachments.push(attachment_reference); + } + AttachmentInfoUsage::Resolve => { + references.resolve_attachments.push(attachment_reference); + } + AttachmentInfoUsage::Depth => { + // make sure only 1 depth attachment is used per subpass + debug_assert!( + references.depth_stencil_attachment.is_none(), + "only 1 depth attachment per sub pass allowed" + ); + + references.depth_stencil_attachment = Some(attachment_reference); + } + } + + attachment_index += 1; + } + + // check if input infos are set + if let Some(input_info) = sub_pass.inputs() { + debug_assert!(input_info.sub_pass_index < attachment_references.len()); + + let input_pass_references = &attachment_references[input_info.sub_pass_index]; + + for input_index in input_info.input_indices.iter() { + references.input_attachments.push(VkAttachmentReference { + attachment: (input_index + input_pass_references.offset) as u32, + layout: VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + }); + } + } + + attachment_references.push(references); + } + + // gather sub pass descriptions + let mut descriptions = Vec::new(); + + for attachment_reference in attachment_references.iter() { + descriptions.push(VkSubpassDescription::new( + 0, + &attachment_reference.input_attachments, + &attachment_reference.color_attachments, + &attachment_reference.resolve_attachments, + attachment_reference.depth_stencil_attachment.as_ref(), + &[], + )); + } + + // gather sub pass dependencies + let mut dependencies = Vec::new(); + + dependencies.push(VkSubpassDependency::new( + VK_SUBPASS_EXTERNAL, + 0, + VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, + VK_ACCESS_MEMORY_READ_BIT, + VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, + VK_DEPENDENCY_BY_REGION_BIT, + )); + + for (index, sub_pass) in self.sub_passes.iter().enumerate() { + dependencies.push(VkSubpassDependency::new( + index as u32, + index as u32 + 1, + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, + CommandBuffer::access_to_stage(sub_pass.output_usage()), + VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, + sub_pass.output_usage(), + VK_DEPENDENCY_BY_REGION_BIT, + )); + } + + if let Some(last_dependency) = dependencies.last_mut() { + last_dependency.dstSubpass = VK_SUBPASS_EXTERNAL; + last_dependency.dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT.into(); + last_dependency.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT.into(); + } + + // dependencies.push(VkSubpassDependency::new( + // self.sub_passes.len() as u32 - 1, + // VK_SUBPASS_EXTERNAL, + // VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, + // VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, + // VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, + // VK_ACCESS_MEMORY_READ_BIT, + // VK_DEPENDENCY_BY_REGION_BIT, + // )); + + let render_pass = + RenderPass::new(device.clone(), &descriptions, &attachments, &dependencies)?; + + // create frame buffers + let max_images = self.max_images(); + let extent = self.sub_passes[0].extent(); + + let framebuffers: Result>> = (0..max_images) + .map(|i| { + let mut framebuffer_builder = Framebuffer::builder() + .set_render_pass(&render_pass) + .set_width(extent.width) + .set_height(extent.height); + + for sub_pass in self.sub_passes.iter() { + for attachment in sub_pass.attachments().iter() { + framebuffer_builder = + framebuffer_builder.add_attachment(attachment.image(i)); + } + } + + framebuffer_builder.build(render_pass.device().clone()) + }) + .collect(); + + let mut clear_values = Vec::new(); + + self.map_attachment(|attachment| { + clear_values.push(attachment.clear_value.clone()); + }); + + Ok(RenderTarget { + render_pass, + framebuffers: framebuffers?, + clear_values, + + extent, + + sub_passes: self.sub_passes, + }) + } + + #[inline] + fn max_images(&self) -> usize { + let mut max_images = 0; + + for sub_pass in self.sub_passes.iter() { + max_images = max_images.max(sub_pass.max_images_per_attachment()); + } + + max_images + } + + #[inline] + fn map_attachment<'a, F>(&'a self, mut f: F) + where + F: FnMut(&'a AttachmentInfo) -> (), + { + for sub_pass in self.sub_passes.iter() { + for attachment in sub_pass.attachments().iter() { + f(attachment); + } + } + } +} + +#[derive(Default)] +struct SubPassAttachmentReferences { + offset: usize, + + input_attachments: Vec, + color_attachments: Vec, + + resolve_attachments: Vec, + depth_stencil_attachment: Option, +} + +pub struct RenderTarget { + render_pass: Arc, + framebuffers: Vec>, + clear_values: Vec, + + extent: VkExtent2D, + + sub_passes: Vec, +} + +impl RenderTarget { + pub fn builder() -> RenderTargetBuilder { + RenderTargetBuilder { + sub_passes: Vec::new(), + } + } + + pub fn render_pass(&self) -> &Arc { + &self.render_pass + } + + pub fn framebuffer(&self, index: usize) -> &Arc { + &self.framebuffers[index] + } + + pub fn sub_pass(&self, index: usize) -> &SubPass { + &self.sub_passes[index] + } + + pub fn width(&self) -> u32 { + self.extent.width + } + + pub fn height(&self) -> u32 { + self.extent.height + } + + pub fn begin( + &self, + buffer_recorder: &CommandBufferRecorder<'_>, + subpass_content: VkSubpassContents, + framebuffer_index: usize, + ) { + let renderpass_begin = VkRenderPassBeginInfo::new( + self.render_pass.vk_handle(), + self.framebuffers[framebuffer_index].vk_handle(), + VkRect2D { + offset: VkOffset2D { x: 0, y: 0 }, + extent: self.extent, + }, + self.clear_values.as_slice(), + ); + + buffer_recorder.begin_render_pass(renderpass_begin, subpass_content); + } + + pub fn next_subpass( + &self, + buffer_recorder: &CommandBufferRecorder<'_>, + subpass_content: VkSubpassContents, + ) { + buffer_recorder.next_subpass(subpass_content); + } + + pub fn end(&self, buffer_recorder: &CommandBufferRecorder<'_>) { + buffer_recorder.end_render_pass(); + } +} + +// impl<'a> RenderTargetBuilder<'a> { +// fn create_images_and_renderpass(&self, device: &Arc) -> Result> { +// let subpass_descriptions = [match resolve_reference { +// Some(resvole_ref) => VkSubpassDescription::new( +// 0, +// &[], +// color_references.as_slice(), +// &[resvole_ref], +// match depth_reference { +// Some(ref depth_ref) => Some(depth_ref), +// None => None, +// }, +// &[], +// ), +// None => VkSubpassDescription::new( +// 0, +// &[], +// color_references.as_slice(), +// &[], +// match depth_reference { +// Some(ref depth_ref) => Some(depth_ref), +// None => None, +// }, +// &[], +// ), +// }]; + +// let dependencies = if color_references.is_empty() { +// // assume, that when no color references are given, +// // we want to store the depth information for later +// if depth_reference.is_some() { +// for attachment in &mut attachments { +// if attachment.format == VK_FORMAT_D16_UNORM { +// attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE; +// attachment.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; +// break; +// } +// } +// } + +// [ +// VkSubpassDependency::new( +// VK_SUBPASS_EXTERNAL, +// 0, +// VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, +// VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, +// 0, +// VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT +// | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, +// VK_DEPENDENCY_BY_REGION_BIT, +// ), +// VkSubpassDependency::new( +// 0, +// VK_SUBPASS_EXTERNAL, +// VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT, +// VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, +// VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT +// | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, +// VK_ACCESS_SHADER_READ_BIT, +// VK_DEPENDENCY_BY_REGION_BIT, +// ), +// ] +// } else { +// [ +// VkSubpassDependency::new( +// VK_SUBPASS_EXTERNAL, +// 0, +// VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, +// VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, +// VK_ACCESS_MEMORY_READ_BIT, +// VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, +// VK_DEPENDENCY_BY_REGION_BIT, +// ), +// VkSubpassDependency::new( +// 0, +// VK_SUBPASS_EXTERNAL, +// VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, +// VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, +// VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, +// VK_ACCESS_MEMORY_READ_BIT, +// VK_DEPENDENCY_BY_REGION_BIT, +// ), +// ] +// }; + +// let renderpass = RenderPass::new( +// device.clone(), +// &subpass_descriptions, +// attachments.as_slice(), +// &dependencies, +// )?; + +// Ok(renderpass) +// } +// } diff --git a/vulkan-rs/src/render_target/sub_pass.rs b/vulkan-rs/src/render_target/sub_pass.rs new file mode 100644 index 0000000..1f6b42f --- /dev/null +++ b/vulkan-rs/src/render_target/sub_pass.rs @@ -0,0 +1,439 @@ +use crate::prelude::*; +use anyhow::Result; +use std::sync::{Arc, Mutex}; + +pub enum ClearValue { + Color([f32; 4]), + Depth(f32, u32), +} + +pub struct CustomTarget { + pub usage: VkImageUsageFlagBits, + pub format: VkFormat, + pub clear_on_load: bool, + pub store_on_save: bool, + pub attach_sampler: bool, + pub use_as_input: bool, + pub clear_value: ClearValue, +} + +impl CustomTarget { + pub fn depth() -> CustomTarget { + CustomTarget { + usage: VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT.into(), + format: VK_FORMAT_D16_UNORM, + clear_on_load: true, + store_on_save: false, + attach_sampler: false, + use_as_input: false, + clear_value: ClearValue::Depth(1.0, 0), + } + } + + fn to_attachment_info( + &self, + device: &Arc, + queue: &Arc>, + width: u32, + height: u32, + sample_count: VkSampleCountFlags, + ) -> Result { + let clear_operation = SubPassBuilder::clear_op(self.clear_on_load); + let store_operation = SubPassBuilder::store_op(self.store_on_save); + + // set clear values + let clear_value = match self.clear_value { + ClearValue::Color(color) => VkClearValue::color(VkClearColorValue::float32(color)), + ClearValue::Depth(depth, stencil) => { + VkClearValue::depth_stencil(VkClearDepthStencilValue { depth, stencil }) + } + }; + + // check for color attachment flag + let (format, aspect, description, usage, layout) = + if (self.usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) != 0 { + // set color attachment + let description = VkAttachmentDescription::new( + 0, + self.format, + sample_count, + clear_operation, + store_operation, + VK_ATTACHMENT_LOAD_OP_DONT_CARE, + VK_ATTACHMENT_STORE_OP_DONT_CARE, + VK_IMAGE_LAYOUT_UNDEFINED, + if self.attach_sampler || self.use_as_input { + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL + } else { + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL + }, + ); + + ( + self.format, + VK_IMAGE_ASPECT_COLOR_BIT, + description, + AttachmentInfoUsage::Output, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + ) + // check for depth attachment flag + } else if (self.usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) != 0 { + // set depth attachment + let description = VkAttachmentDescription::new( + 0, + self.format, + sample_count, + clear_operation, + store_operation, + VK_ATTACHMENT_LOAD_OP_DONT_CARE, + VK_ATTACHMENT_STORE_OP_DONT_CARE, + VK_IMAGE_LAYOUT_UNDEFINED, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, + ); + + // take format and aspect mask + ( + self.format, + VK_IMAGE_ASPECT_DEPTH_BIT, + description, + AttachmentInfoUsage::Depth, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, + ) + } else { + // TODO: add more as required + unimplemented!(); + }; + + let mut image_builder = Image::empty(width, height, self.usage, sample_count) + .format(format) + .aspect_mask(aspect); + + if self.attach_sampler { + image_builder = image_builder.attach_sampler(Sampler::nearest_sampler().build(device)?); + } + + let image = image_builder.build(device, queue)?; + + match aspect { + VK_IMAGE_ASPECT_DEPTH_BIT => { + Image::convert_layout(&image, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL)? + } + VK_IMAGE_ASPECT_COLOR_BIT => { + Image::convert_layout(&image, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)? + } + _ => unimplemented!(), + } + + Ok(AttachmentInfo { + images: vec![image], + clear_value, + layout, + description, + usage, + }) + } +} + +pub enum ResolveTarget { + CustomTarget(CustomTarget), + PreparedTargets(Vec>), +} + +impl From for ResolveTarget { + fn from(custom_target: CustomTarget) -> Self { + Self::CustomTarget(custom_target) + } +} + +impl From>> for ResolveTarget { + fn from(prepared_targets: Vec>) -> Self { + Self::PreparedTargets(prepared_targets) + } +} + +impl<'a> From<&'a Vec>> for ResolveTarget { + fn from(prepared_targets: &'a Vec>) -> Self { + Self::PreparedTargets(prepared_targets.clone()) + } +} + +pub struct InputAttachmentInfo { + pub sub_pass_index: usize, + pub input_indices: Vec, +} + +pub struct SubPassBuilder<'a> { + width: u32, + height: u32, + sample_count: VkSampleCountFlags, + + target_infos: Vec, + + input_info: Option, + + // (images, index, clear_color, clear_on_load) + prepared_targets: Option<(&'a [Arc], usize, [f32; 4], bool)>, + resolve_targets: Vec, + + output_usage: VkAccessFlagBits, +} + +impl<'a> SubPassBuilder<'a> { + pub fn set_sample_count(mut self, sample_count: VkSampleCountFlags) -> Self { + self.sample_count = sample_count; + + self + } + + pub fn add_target_info(mut self, target: CustomTarget) -> Self { + self.target_infos.push(target); + + self + } + + pub fn set_input_attachment_info(mut self, input_info: InputAttachmentInfo) -> Self { + self.input_info = Some(input_info); + + self + } + + pub fn set_output_usage(mut self, output_usage: impl Into) -> Self { + self.output_usage = output_usage.into(); + + self + } + + pub fn set_prepared_targets( + mut self, + prepared_targets: &'a [Arc], + target_index: usize, + clear_color: impl Into<[f32; 4]>, + clear_on_load: bool, + ) -> Self { + self.prepared_targets = Some(( + prepared_targets, + target_index, + clear_color.into(), + clear_on_load, + )); + + self + } + + pub fn add_resolve_targets(mut self, resolve_target: impl Into) -> Self { + self.resolve_targets.push(resolve_target.into()); + + self + } + + pub fn build(self, device: &Arc, queue: &Arc>) -> Result { + let attachments = self.create_images(device, queue)?; + + Ok(SubPass { + extent: VkExtent2D { + width: self.width, + height: self.height, + }, + + input_info: self.input_info, + attachments, + + output_usage: self.output_usage, + }) + } + + #[inline] + fn create_images( + &self, + device: &Arc, + queue: &Arc>, + ) -> Result> { + // check for correct sample count + let checked_sample_count = device.max_supported_sample_count(self.sample_count); + + // throw an error if we don't use muultisampling and have an resolve target + if checked_sample_count == VK_SAMPLE_COUNT_1_BIT && !self.resolve_targets.is_empty() { + panic!("Sample count 1 and using resolve target is not supported"); + } + + let mut attachment_infos = Vec::new(); + + for target_info in self.target_infos.iter() { + attachment_infos.push(target_info.to_attachment_info( + device, + queue, + self.width, + self.height, + self.sample_count, + )?); + } + + // insert prepared images + if let Some((prepared_images, index, clear_color, clear_on_load)) = self.prepared_targets { + let clear_operation = Self::clear_op(clear_on_load); + + attachment_infos.insert( + index, + AttachmentInfo { + images: prepared_images.iter().map(|image| image.clone()).collect(), + clear_value: VkClearValue::color(VkClearColorValue::float32(clear_color)), + layout: VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + description: VkAttachmentDescription::new( + 0, + prepared_images[0].vk_format(), + VK_SAMPLE_COUNT_1_BIT, + clear_operation, + VK_ATTACHMENT_STORE_OP_STORE, + VK_ATTACHMENT_LOAD_OP_DONT_CARE, + VK_ATTACHMENT_STORE_OP_DONT_CARE, + prepared_images[0].image_layout(), + prepared_images[0].image_layout(), + ), + usage: AttachmentInfoUsage::Output, + }, + ); + } + + // add resolve target if possible + for resolve_target in self.resolve_targets.iter() { + match resolve_target { + ResolveTarget::CustomTarget(custom_target) => { + let mut attachment_info = custom_target.to_attachment_info( + device, + queue, + self.width, + self.height, + VK_SAMPLE_COUNT_1_BIT, + )?; + + attachment_info.usage = AttachmentInfoUsage::Resolve; + + attachment_infos.push(attachment_info); + } + ResolveTarget::PreparedTargets(prepared_targets) => { + attachment_infos.push(AttachmentInfo { + images: prepared_targets.iter().map(|image| image.clone()).collect(), + clear_value: VkClearValue::color(VkClearColorValue::float32([ + 0.0, 0.0, 0.0, 1.0, + ])), + layout: VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + description: VkAttachmentDescription::new( + 0, + prepared_targets[0].vk_format(), + VK_SAMPLE_COUNT_1_BIT, + VK_ATTACHMENT_LOAD_OP_CLEAR, + VK_ATTACHMENT_STORE_OP_STORE, + VK_ATTACHMENT_LOAD_OP_DONT_CARE, + VK_ATTACHMENT_STORE_OP_DONT_CARE, + VK_IMAGE_LAYOUT_UNDEFINED, + prepared_targets[0].image_layout(), + ), + usage: AttachmentInfoUsage::Resolve, + }); + } + } + } + + Ok(attachment_infos) + } + + #[inline] + fn clear_op(clear_on_load: bool) -> VkAttachmentLoadOp { + if clear_on_load { + VK_ATTACHMENT_LOAD_OP_CLEAR + } else { + VK_ATTACHMENT_LOAD_OP_LOAD + } + } + + #[inline] + fn store_op(store_on_save: bool) -> VkAttachmentStoreOp { + if store_on_save { + VK_ATTACHMENT_STORE_OP_STORE + } else { + VK_ATTACHMENT_STORE_OP_DONT_CARE + } + } +} + +#[derive(Eq, PartialEq, Hash, Clone, Copy)] +pub enum AttachmentInfoUsage { + Depth, + Resolve, + Output, +} + +pub struct AttachmentInfo { + images: Vec>, + pub(crate) clear_value: VkClearValue, + pub(crate) layout: VkImageLayout, + pub(crate) description: VkAttachmentDescription, + pub(crate) usage: AttachmentInfoUsage, +} + +impl AttachmentInfo { + pub fn image(&self, mut index: usize) -> &Arc { + debug_assert!(!self.images.is_empty()); + + if index >= self.images.len() { + index = self.images.len() - 1; + } + + &self.images[index] + } +} + +pub struct SubPass { + extent: VkExtent2D, + + input_info: Option, + attachments: Vec, + + output_usage: VkAccessFlagBits, +} + +impl SubPass { + pub fn builder<'a>(width: u32, height: u32) -> SubPassBuilder<'a> { + SubPassBuilder { + width, + height, + sample_count: VK_SAMPLE_COUNT_1_BIT, + + input_info: None, + + target_infos: Vec::new(), + + prepared_targets: None, + resolve_targets: Vec::new(), + + output_usage: VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT + | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT, + } + } + + pub(crate) fn inputs(&self) -> Option<&InputAttachmentInfo> { + self.input_info.as_ref() + } + + pub(crate) fn output_usage(&self) -> VkAccessFlagBits { + self.output_usage + } + + pub fn extent(&self) -> VkExtent2D { + self.extent + } + + pub fn attachments(&self) -> &[AttachmentInfo] { + &self.attachments + } + + pub fn max_images_per_attachment(&self) -> usize { + let mut max_images = 0; + + for attachment in self.attachments.iter() { + max_images = max_images.max(attachment.images.len()); + } + + max_images + } +} diff --git a/vulkan-rs/src/renderpass.rs b/vulkan-rs/src/renderpass.rs new file mode 100644 index 0000000..ec1d254 --- /dev/null +++ b/vulkan-rs/src/renderpass.rs @@ -0,0 +1,48 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std::sync::Arc; + +#[derive(Debug)] +pub struct RenderPass { + device: Arc, + render_pass: VkRenderPass, +} + +impl RenderPass { + pub fn new( + device: Arc, + sub_passes: &[VkSubpassDescription], + attachments: &[VkAttachmentDescription], + dependencies: &[VkSubpassDependency], + ) -> Result> { + let render_pass_ci = VkRenderPassCreateInfo::new( + VK_RENDERPASS_CREATE_NULL_BIT, + attachments, + sub_passes, + dependencies, + ); + + let render_pass = device.create_render_pass(&render_pass_ci)?; + + Ok(Arc::new(RenderPass { + device, + render_pass, + })) + } +} + +impl VulkanDevice for RenderPass { + fn device(&self) -> &Arc { + &self.device + } +} + +impl_vk_handle!(RenderPass, VkRenderPass, render_pass); + +impl Drop for RenderPass { + fn drop(&mut self) { + self.device.destroy_render_pass(self.render_pass); + } +} diff --git a/vulkan-rs/src/sampler_manager.rs b/vulkan-rs/src/sampler_manager.rs new file mode 100644 index 0000000..be878ad --- /dev/null +++ b/vulkan-rs/src/sampler_manager.rs @@ -0,0 +1,182 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std::collections::HashMap; +use std::sync::{Arc, Mutex}; + +pub struct SamplerBuilder { + create_info: VkSamplerCreateInfo, +} + +impl SamplerBuilder { + pub fn min_mag_filter(mut self, min_filter: VkFilter, mag_filter: VkFilter) -> Self { + self.create_info.minFilter = min_filter; + self.create_info.magFilter = mag_filter; + + self + } + + pub fn map_map_mode(mut self, mode: VkSamplerMipmapMode) -> Self { + self.create_info.mipmapMode = mode; + + self + } + + pub fn address_mode( + mut self, + u: VkSamplerAddressMode, + v: VkSamplerAddressMode, + w: VkSamplerAddressMode, + ) -> Self { + self.create_info.addressModeU = u; + self.create_info.addressModeV = v; + self.create_info.addressModeW = w; + + self + } + + pub fn min_load_bias(mut self, bias: f32) -> Self { + self.create_info.mipLodBias = bias; + + self + } + + pub fn anisotropy(mut self, anisotropy: f32) -> Self { + self.create_info.anisotropyEnable = VK_TRUE; + self.create_info.maxAnisotropy = anisotropy; + + self + } + + pub fn compare(mut self, compare_op: VkCompareOp) -> Self { + self.create_info.compareEnable = VK_TRUE; + self.create_info.compareOp = compare_op; + + self + } + + pub fn min_max_lod(mut self, min_lod: f32, max_lod: f32) -> Self { + self.create_info.minLod = min_lod; + self.create_info.maxLod = max_lod; + + self + } + + pub fn border_color(mut self, border_color: VkBorderColor) -> Self { + self.create_info.borderColor = border_color; + + self + } + + pub fn coordinates(mut self, unnormalized_coordinates: T) -> Self + where + T: Into, + { + self.create_info.unnormalizedCoordinates = unnormalized_coordinates.into(); + + self + } + + pub fn build(self, device: &Device) -> Result> { + device.create_sampler_from_manager(self.create_info) + } +} + +#[derive(Debug)] +pub struct Sampler { + sampler: VkSampler, +} + +impl Sampler { + pub fn nearest_sampler() -> SamplerBuilder { + SamplerBuilder { + create_info: VkSamplerCreateInfo::new( + 0, + VK_FILTER_NEAREST, + VK_FILTER_NEAREST, + VK_SAMPLER_MIPMAP_MODE_NEAREST, + VK_SAMPLER_ADDRESS_MODE_REPEAT, + VK_SAMPLER_ADDRESS_MODE_REPEAT, + VK_SAMPLER_ADDRESS_MODE_REPEAT, + 0.0, + false, + 1.0, + false, + VK_COMPARE_OP_NEVER, + 0.0, + 0.0, + VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE, + false, + ), + } + } + + pub fn pretty_sampler() -> SamplerBuilder { + SamplerBuilder { + create_info: VkSamplerCreateInfo::new( + 0, + VK_FILTER_LINEAR, + VK_FILTER_LINEAR, + VK_SAMPLER_MIPMAP_MODE_LINEAR, + VK_SAMPLER_ADDRESS_MODE_REPEAT, + VK_SAMPLER_ADDRESS_MODE_REPEAT, + VK_SAMPLER_ADDRESS_MODE_REPEAT, + 0.0, + true, + 8.0, + false, + VK_COMPARE_OP_NEVER, + 0.0, + 0.0, + VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE, + false, + ), + } + } +} + +impl_vk_handle!(Sampler, VkSampler, sampler); + +pub struct SamplerManager { + samplers: HashMap>, +} + +unsafe impl Sync for SamplerManager {} +unsafe impl Send for SamplerManager {} + +impl SamplerManager { + pub fn new() -> Mutex { + Mutex::new(SamplerManager { + samplers: HashMap::new(), + }) + } + + pub fn create_sampler( + &mut self, + create_info: VkSamplerCreateInfo, + device: &Device, + ) -> Result> { + match self.samplers.get(&create_info) { + Some(sampler) => Ok(sampler.clone()), + None => { + let new_sampler = Arc::new(Sampler { + sampler: device.create_sampler(&create_info)?, + }); + + self.samplers.insert(create_info, new_sampler.clone()); + + Ok(new_sampler) + } + } + } + + /// This will destroy all VkSampler handles, no matter if they are in use or not + pub unsafe fn clear(&mut self, device: &Device) { + self.samplers + .iter() + .for_each(|(_, sampler)| device.destroy_sampler(sampler.vk_handle())); + + self.samplers.clear(); + } +} diff --git a/vulkan-rs/src/semaphore.rs b/vulkan-rs/src/semaphore.rs new file mode 100644 index 0000000..a0c3377 --- /dev/null +++ b/vulkan-rs/src/semaphore.rs @@ -0,0 +1,49 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std::sync::Arc; + +#[derive(Debug)] +pub struct Semaphore { + device: Arc, + semaphore: VkSemaphore, +} + +impl Semaphore { + pub fn new(device: Arc) -> Result> { + let semaphore_ci = VkSemaphoreCreateInfo::new(VK_SEMAPHORE_CREATE_NULL_BIT); + + let semaphore = device.create_semaphore(&semaphore_ci)?; + + Ok(Arc::new(Semaphore { device, semaphore })) + } +} + +impl VulkanDevice for Semaphore { + fn device(&self) -> &Arc { + &self.device + } +} + +impl_vk_handle!(Semaphore, VkSemaphore, semaphore); + +impl Drop for Semaphore { + fn drop(&mut self) { + self.device.destroy_semaphore(self.semaphore); + } +} + +use crate::{ffi::*, handle_ffi_result}; + +#[no_mangle] +pub extern "C" fn create_semaphore(device: *const Device) -> *const Semaphore { + let device = unsafe { Arc::from_raw(device) }; + + handle_ffi_result!(Semaphore::new(device)) +} + +#[no_mangle] +pub extern "C" fn destroy_semaphore(semaphore: *const Semaphore) { + let _semaphore = unsafe { Arc::from_raw(semaphore) }; +} diff --git a/vulkan-rs/src/shadermodule.rs b/vulkan-rs/src/shadermodule.rs new file mode 100644 index 0000000..7cce834 --- /dev/null +++ b/vulkan-rs/src/shadermodule.rs @@ -0,0 +1,178 @@ +use crate::prelude::*; + +use anyhow::{Context, Result}; + +use std::fs::File; +use std::io::Read; +use std::sync::Arc; + +#[allow(clippy::cast_ptr_alignment)] +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum ShaderType { + None, + Vertex, + Fragment, + Geometry, + TesselationControl, + TesselationEvaluation, + Compute, + RayGeneration, + ClosestHit, + Miss, + AnyHit, + Intersection, +} + +impl Default for ShaderType { + fn default() -> Self { + ShaderType::None + } +} + +#[derive(Debug)] +pub struct ShaderModule { + device: Arc, + shader_module: VkShaderModule, + shader_type: ShaderType, +} + +impl ShaderModule { + pub fn new( + device: Arc, + path: &str, + shader_type: ShaderType, + ) -> Result> { + let code = Self::shader_code(path)?; + + Self::from_slice(device, code.as_slice(), shader_type) + } + + pub fn from_slice( + device: Arc, + code: &[u8], + shader_type: ShaderType, + ) -> Result> { + let shader_module_ci = + VkShaderModuleCreateInfo::new(VK_SHADER_MODULE_CREATE_NULL_BIT, code); + + let shader_module = device.create_shader_module(&shader_module_ci)?; + + Ok(Arc::new(ShaderModule { + device, + shader_module, + shader_type, + })) + } + + fn shader_code(path: &str) -> Result> { + let mut file = File::open(path).with_context({ + let path = path.to_string(); + || path + })?; + + let mut code: Vec = Vec::new(); + + file.read_to_end(&mut code)?; + + Ok(code) + } + + pub fn shader_type(&self) -> ShaderType { + self.shader_type + } + + pub fn pipeline_stage_info(&self) -> VkPipelineShaderStageCreateInfo { + match self.shader_type { + ShaderType::None => unimplemented!(), + ShaderType::Vertex => VkPipelineShaderStageCreateInfo::vertex(self.shader_module), + ShaderType::Geometry => VkPipelineShaderStageCreateInfo::geometry(self.shader_module), + ShaderType::TesselationControl => { + VkPipelineShaderStageCreateInfo::tesselation_control(self.shader_module) + } + ShaderType::TesselationEvaluation => { + VkPipelineShaderStageCreateInfo::tesselation_evaluation(self.shader_module) + } + ShaderType::Fragment => VkPipelineShaderStageCreateInfo::fragment(self.shader_module), + ShaderType::Compute => VkPipelineShaderStageCreateInfo::compute(self.shader_module), + ShaderType::AnyHit => VkPipelineShaderStageCreateInfo::any_hit(self.shader_module), + ShaderType::Intersection => { + VkPipelineShaderStageCreateInfo::intersection(self.shader_module) + } + ShaderType::ClosestHit => { + VkPipelineShaderStageCreateInfo::closest_hit(self.shader_module) + } + ShaderType::RayGeneration => { + VkPipelineShaderStageCreateInfo::ray_generation(self.shader_module) + } + ShaderType::Miss => VkPipelineShaderStageCreateInfo::miss(self.shader_module), + } + } +} + +impl VulkanDevice for ShaderModule { + fn device(&self) -> &Arc { + &self.device + } +} + +impl_vk_handle!(ShaderModule, VkShaderModule, shader_module); + +impl Drop for ShaderModule { + fn drop(&mut self) { + self.device.destroy_shader_module(self.shader_module); + } +} + +pub trait AddSpecializationConstant { + fn add(&mut self, value: T, id: u32); +} + +pub struct SpecializationConstants { + // store data as raw bytes + data: Vec, + entries: Vec, + + info: VkSpecializationInfo, +} + +impl SpecializationConstants { + pub fn new() -> Self { + let mut me = SpecializationConstants { + data: Vec::new(), + entries: Vec::new(), + + info: VkSpecializationInfo::empty(), + }; + + me.info.set_data(&me.data); + me.info.set_map_entries(&me.entries); + + me + } + + pub fn vk_handle(&self) -> &VkSpecializationInfo { + &self.info + } +} + +macro_rules! impl_add_specialization_constant { + ($($type: ty),+) => { + $( + impl AddSpecializationConstant<$type> for SpecializationConstants { + fn add(&mut self, value: $type, id: u32) { + let bytes = value.to_ne_bytes(); + + self.entries.push(VkSpecializationMapEntry { + constantID: id, + offset: self.data.len() as u32, + size: bytes.len(), + }); + + self.data.extend(&bytes); + } + } + )+ + }; +} + +impl_add_specialization_constant!(f32, f64, u64, i64, u32, i32, u16, i16, u8, i8, usize, isize); diff --git a/vulkan-rs/src/surface.rs b/vulkan-rs/src/surface.rs new file mode 100644 index 0000000..9719ddd --- /dev/null +++ b/vulkan-rs/src/surface.rs @@ -0,0 +1,80 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std::sync::Arc; + +const UNORM_FORMATS: [VkFormat; 2] = [VK_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_B8G8R8A8_UNORM]; + +#[derive(Debug)] +pub struct Surface { + external_source: bool, + instance: Arc, + surface: VkSurfaceKHR, +} + +impl Surface { + pub fn from_vk_surface(surface: VkSurfaceKHR, instance: &Arc) -> Arc { + Arc::new(Surface { + external_source: true, + instance: instance.clone(), + surface, + }) + } + + pub fn capabilities(&self, device: &Arc) -> Result { + self.instance.physical_device_surface_capabilities( + device.physical_device().vk_handle(), + self.surface, + ) + } + + pub fn format_colorspace( + &self, + device: &Arc, + prefered_format: VkFormat, + ) -> Result<(VkFormat, VkColorSpaceKHR)> { + let surface_formats = self + .instance + .physical_device_surface_formats(device.physical_device().vk_handle(), self.surface)?; + + // if there is a single undefined format, assume the preferred mode + if (surface_formats.len() == 1) && (surface_formats[0].format == VK_FORMAT_UNDEFINED) { + return Ok((prefered_format, VK_COLOR_SPACE_SRGB_NONLINEAR_KHR)); + } + + // look for prefered_format + for surface_format in &surface_formats { + if surface_format.format == prefered_format { + return Ok((surface_format.format, surface_format.colorSpace)); + } + } + + // prefer UNORM formats + for surface_format in &surface_formats { + for unorm_format in &UNORM_FORMATS { + if *unorm_format == surface_format.format { + return Ok((surface_format.format, surface_format.colorSpace)); + } + } + } + + // if nothing was found, take the first one + Ok((surface_formats[0].format, surface_formats[0].colorSpace)) + } + + pub fn present_modes(&self, device: &Arc) -> Result> { + self.instance + .physical_device_present_modes(device.physical_device().vk_handle(), self.surface) + } +} + +impl_vk_handle!(Surface, VkSurfaceKHR, surface); + +impl Drop for Surface { + fn drop(&mut self) { + if !self.external_source { + self.instance.destroy_surface(self.surface) + } + } +} diff --git a/vulkan-rs/src/swapchain.rs b/vulkan-rs/src/swapchain.rs new file mode 100644 index 0000000..ae23942 --- /dev/null +++ b/vulkan-rs/src/swapchain.rs @@ -0,0 +1,319 @@ +use crate::prelude::*; + +use anyhow::Result; + +use std::cmp; +use std::sync::{ + atomic::{AtomicU32, Ordering::SeqCst}, + Arc, Mutex, +}; + +#[derive(Debug)] +pub struct Swapchain { + width: AtomicU32, + height: AtomicU32, + index: AtomicU32, + + device: Arc, + surface: Arc, + + create_info: Mutex, + swapchain: Mutex, + usage: VkImageUsageFlagBits, + + raw: bool, +} + +impl Swapchain { + pub fn new( + device: Arc, + surface: &Arc, + vsync: bool, + image_count: u32, + image_usage: impl Into, + prefered_format: VkFormat, + array_layers: u32, + ) -> Result> { + let surface_caps = surface.capabilities(&device)?; + + let extent = if surface_caps.currentExtent.width == u32::max_value() { + return Err(anyhow::Error::msg("Surface has no extent")); + } else { + VkExtent2D { + width: surface_caps.currentExtent.width, + height: surface_caps.currentExtent.height, + } + }; + + let mut present_mode = VK_PRESENT_MODE_FIFO_KHR; + + if !vsync { + for present_mode_iter in surface.present_modes(&device)? { + if present_mode_iter == VK_PRESENT_MODE_MAILBOX_KHR { + present_mode = VK_PRESENT_MODE_MAILBOX_KHR; + break; + } else if present_mode_iter == VK_PRESENT_MODE_IMMEDIATE_KHR { + present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR; + } + } + } + + let swapchain_image_count = if surface_caps.maxImageCount < surface_caps.minImageCount { + cmp::max(image_count, surface_caps.minImageCount) + } else { + cmp::max( + cmp::min(image_count, surface_caps.maxImageCount), + surface_caps.minImageCount, + ) + }; + + let pretransform = + if (surface_caps.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR) != 0 { + VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR.into() + } else { + surface_caps.currentTransform + }; + + let (format, colorspace) = surface.format_colorspace(&device, prefered_format)?; + + let swapchain_ci = VkSwapchainCreateInfoKHR::new( + 0, + surface.vk_handle(), + swapchain_image_count, + format, + colorspace, + extent, + array_layers, + image_usage, + VK_SHARING_MODE_EXCLUSIVE, + &[], + pretransform, + VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR, + present_mode, + device.physical_device().features().shaderClipDistance, + ); + + let swapchain = device.create_swapchain(&swapchain_ci)?; + + Ok(Arc::new(Swapchain { + width: AtomicU32::new(extent.width), + height: AtomicU32::new(extent.height), + usage: swapchain_ci.imageUsage, + index: AtomicU32::new(0), + + device, + surface: surface.clone(), + + create_info: Mutex::new(swapchain_ci), + + swapchain: Mutex::new(swapchain), + + raw: false, + })) + } + + pub fn from_ci( + device: Arc, + swapchain_ci: &VkSwapchainCreateInfoKHR, + ) -> Result> { + Self::from_raw( + device.clone(), + swapchain_ci, + device.create_swapchain(swapchain_ci)?, + ) + } + + pub fn from_raw( + device: Arc, + swapchain_ci: &VkSwapchainCreateInfoKHR, + swapchain: VkSwapchainKHR, + ) -> Result> { + Ok(Arc::new(Swapchain { + width: AtomicU32::new(swapchain_ci.imageExtent.width), + height: AtomicU32::new(swapchain_ci.imageExtent.height), + usage: swapchain_ci.imageUsage, + index: AtomicU32::new(0), + + surface: Surface::from_vk_surface( + swapchain_ci.surface, + device.physical_device().instance(), + ), + device, + + create_info: Mutex::new(swapchain_ci.clone()), + + swapchain: Mutex::new(swapchain), + + raw: true, + })) + } + + pub fn recreate(&self) -> Result<()> { + // wait for the device to get idle + self.device.wait_idle()?; + + let surface_caps = self.surface.capabilities(&self.device)?; + + let extent = if surface_caps.currentExtent.width == u32::max_value() + || surface_caps.currentExtent.height == u32::max_value() + { + return Err(anyhow::Error::msg("Surface has no extent")); + } else if surface_caps.currentExtent.width == 0 || surface_caps.currentExtent.height == 0 { + // don't recreate swapchain + return Ok(()); + } else { + VkExtent2D { + width: surface_caps.currentExtent.width, + height: surface_caps.currentExtent.height, + } + }; + + let mut swapchain_ci = self.create_info.lock().unwrap(); + swapchain_ci.imageExtent = extent; + swapchain_ci.set_old_swapchain(*self.swapchain.lock().unwrap()); + + let swapchain = self.device.create_swapchain(&swapchain_ci)?; + + // destroy the old swapchain + self.destroy(); + + // replace swapchain + *self.swapchain.lock().unwrap() = swapchain; + + // set new surface size + self.width.store(extent.width, SeqCst); + self.height.store(extent.height, SeqCst); + + Ok(()) + } + + pub fn acquire_next_image( + &self, + time_out: u64, + present_complete_semaphore: Option<&Arc>, + fence: Option<&Arc>, + ) -> Result> { + let res = self.device.acquire_next_image( + *self.swapchain.lock().unwrap(), + time_out, + present_complete_semaphore.map(|sem| sem.vk_handle()), + fence.map(|fence| fence.vk_handle()), + ); + + if let Ok(r) = &res { + if let OutOfDate::Ok(i) = r { + self.index.store(*i, SeqCst); + } + } + + res + } + + /// set current + /// only use when externally acquired next index !!! + pub unsafe fn set_image_index(&self, index: u32) { + self.index.store(index, SeqCst); + } + + pub fn current_index(&self) -> u32 { + self.index.load(SeqCst) + } + + pub fn vk_images(&self) -> Result> { + self.device + .swapchain_images(*self.swapchain.lock().unwrap()) + } + + pub fn wrap_images( + &self, + images: &[VkImage], + queue: &Arc>, + assume_layout: bool, + ) -> Result>> { + let format = self.format(); + let tiling = VK_IMAGE_TILING_OPTIMAL; + + if !Image::check_configuration(&self.device, tiling, format, self.usage) { + return Err(anyhow::Error::msg(format!( + "Image configuration not allowed (tiling: {:?}, format: {:?}, usage: {:?})", + tiling, format, self.usage, + ))); + } + + let mut swapchain_images = Vec::new(); + + for image in images { + swapchain_images.push( + Image::from_preinitialized( + *image, + format, + self.width(), + self.height(), + VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, + self.usage, + assume_layout, + ) + .attach_sampler(Sampler::nearest_sampler().build(&self.device)?) + .build(&self.device, queue)?, + ); + } + + Ok(swapchain_images) + } + + pub fn width(&self) -> u32 { + self.width.load(SeqCst) + } + + pub fn height(&self) -> u32 { + self.height.load(SeqCst) + } + + pub fn format(&self) -> VkFormat { + self.create_info.lock().unwrap().imageFormat + } + + #[inline] + fn destroy(&self) { + self.device + .destroy_swapchain(*self.swapchain.lock().unwrap()) + } +} + +impl VulkanDevice for Swapchain { + fn device(&self) -> &Arc { + &self.device + } +} + +impl VkHandle for Swapchain { + fn vk_handle(&self) -> VkSwapchainKHR { + *self.swapchain.lock().unwrap() + } +} + +impl<'a> VkHandle for &'a Swapchain { + fn vk_handle(&self) -> VkSwapchainKHR { + *self.swapchain.lock().unwrap() + } +} + +impl VkHandle for Arc { + fn vk_handle(&self) -> VkSwapchainKHR { + *self.swapchain.lock().unwrap() + } +} + +impl<'a> VkHandle for &'a Arc { + fn vk_handle(&self) -> VkSwapchainKHR { + *self.swapchain.lock().unwrap() + } +} + +impl Drop for Swapchain { + fn drop(&mut self) { + if !self.raw { + self.destroy(); + } + } +} diff --git a/vulkan-sys/Cargo.toml b/vulkan-sys/Cargo.toml new file mode 100644 index 0000000..92c4331 --- /dev/null +++ b/vulkan-sys/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "vulkan-sys" +version = "0.1.0" +authors = ["hodasemi "] +edition = "2021" + +[dependencies] +library_loader = { path = "../library_loader" } +paste = "1.0.11" +shared_library = "0.1.9" +anyhow = { version = "1.0.68", features = ["backtrace"] } \ No newline at end of file diff --git a/vulkan-sys/src/custom/mappedmemory.rs b/vulkan-sys/src/custom/mappedmemory.rs new file mode 100644 index 0000000..1345476 --- /dev/null +++ b/vulkan-sys/src/custom/mappedmemory.rs @@ -0,0 +1,67 @@ +use core::slice::{Iter, IterMut}; +use std::ops::{Index, IndexMut}; +use std::{fmt, fmt::Debug}; + +pub struct VkMappedMemory<'a, T> +where + T: Clone, +{ + data: &'a mut [T], + + unmap: Option>, +} + +impl<'a, T: Clone> VkMappedMemory<'a, T> { + pub fn new(data: &'a mut [T]) -> VkMappedMemory<'a, T> { + VkMappedMemory { data, unmap: None } + } + + pub fn set_unmap(&mut self, f: F) + where + F: Fn() + 'static, + { + self.unmap = Some(Box::new(f)); + } + + pub fn copy(&mut self, data: &[T]) { + self.data.clone_from_slice(data); + } + + pub fn iter(&self) -> Iter<'_, T> { + self.data.iter() + } + + pub fn iter_mut(&mut self) -> IterMut<'_, T> { + self.data.iter_mut() + } +} + +impl<'a, T: Clone> Index for VkMappedMemory<'a, T> { + type Output = T; + + fn index(&self, index: usize) -> &T { + &self.data[index] + } +} + +impl<'a, T: Clone> IndexMut for VkMappedMemory<'a, T> { + fn index_mut(&mut self, index: usize) -> &mut T { + &mut self.data[index] + } +} + +impl<'a, T: Clone> Drop for VkMappedMemory<'a, T> { + fn drop(&mut self) { + if let Some(unmap) = &self.unmap { + unmap(); + } + } +} + +impl<'a, T: Clone + Debug> fmt::Debug for VkMappedMemory<'a, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("VkMappedMemory") + .field("data", &self.data) + .finish() + } +} diff --git a/vulkan-sys/src/custom/mod.rs b/vulkan-sys/src/custom/mod.rs new file mode 100644 index 0000000..6e61245 --- /dev/null +++ b/vulkan-sys/src/custom/mod.rs @@ -0,0 +1,5 @@ +pub mod mappedmemory; +pub mod names; +pub mod string; + +pub mod prelude; diff --git a/vulkan-sys/src/custom/names.rs b/vulkan-sys/src/custom/names.rs new file mode 100644 index 0000000..cec4db8 --- /dev/null +++ b/vulkan-sys/src/custom/names.rs @@ -0,0 +1,32 @@ +use crate::prelude::*; + +use std::os::raw::c_char; +use std::slice::Iter; + +pub struct VkNames { + r_names: Vec, + c_names: Vec<*const c_char>, +} + +impl VkNames { + pub fn new(names: &[VkString]) -> Self { + let local: Vec = names.to_vec(); + + VkNames { + c_names: local.iter().map(|s| s.as_ptr()).collect(), + r_names: local, + } + } + + pub fn len(&self) -> usize { + self.r_names.len() + } + + pub fn iter(&self) -> Iter<'_, VkString> { + self.r_names.iter() + } + + pub fn c_names(&self) -> &Vec<*const c_char> { + &self.c_names + } +} diff --git a/vulkan-sys/src/custom/prelude.rs b/vulkan-sys/src/custom/prelude.rs new file mode 100644 index 0000000..4388ccd --- /dev/null +++ b/vulkan-sys/src/custom/prelude.rs @@ -0,0 +1,3 @@ +pub use super::mappedmemory::*; +pub use super::names::VkNames; +pub use super::string::VkString; diff --git a/vulkan-sys/src/custom/string.rs b/vulkan-sys/src/custom/string.rs new file mode 100644 index 0000000..dca368b --- /dev/null +++ b/vulkan-sys/src/custom/string.rs @@ -0,0 +1,70 @@ +use std::ffi::{CString, CStr}; +use std::fmt; +use std::ops::Deref; +use std::os::raw::c_char; +use std::str::Utf8Error; + +#[derive(Clone, Eq, Hash)] +pub struct VkString { + rust_text: String, + cstring_text: CString, +} + +impl VkString { + pub fn new(text: &str) -> VkString { + let owned = String::from(text); + let cstring = CString::new(owned.clone()) + .unwrap_or_else(|_| panic!("could not create CString ({})", text)); + + VkString { + rust_text: owned, + cstring_text: cstring, + } + } + + pub fn as_ptr(&self) -> *const c_char { + self.cstring_text.as_ptr() + } + + pub fn as_str(&self) -> &str { + &self.rust_text + } + + pub fn as_string(&self) -> String { + self.rust_text.clone() + } + + pub fn into_string(self) -> String { + self.rust_text + } +} + +impl TryFrom<*const c_char> for VkString { + type Error = Utf8Error; + + fn try_from(value: *const c_char) -> Result { + let cstr = unsafe { CStr::from_ptr(value) }; + let str = cstr.to_str()?; + Ok(VkString::new(str)) + } +} + +impl Deref for VkString { + type Target = String; + + fn deref(&self) -> &String { + &self.rust_text + } +} + +impl PartialEq for VkString { + fn eq(&self, other: &VkString) -> bool { + self.rust_text == other.rust_text + } +} + +impl fmt::Debug for VkString { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "VkString {{ {} }}", self.rust_text) + } +} diff --git a/vulkan-sys/src/enums/accessflags.rs b/vulkan-sys/src/enums/accessflags.rs new file mode 100644 index 0000000..549c7f7 --- /dev/null +++ b/vulkan-sys/src/enums/accessflags.rs @@ -0,0 +1,39 @@ +pub use VkAccessFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkAccessFlags { + VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0x0000_0001, + VK_ACCESS_INDEX_READ_BIT = 0x0000_0002, + VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 0x0000_0004, + VK_ACCESS_UNIFORM_READ_BIT = 0x0000_0008, + VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 0x0000_0010, + VK_ACCESS_SHADER_READ_BIT = 0x0000_0020, + VK_ACCESS_SHADER_WRITE_BIT = 0x0000_0040, + VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 0x0000_0080, + VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 0x0000_0100, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x0000_0200, + VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x0000_0400, + VK_ACCESS_TRANSFER_READ_BIT = 0x0000_0800, + VK_ACCESS_TRANSFER_WRITE_BIT = 0x0000_1000, + VK_ACCESS_HOST_READ_BIT = 0x0000_2000, + VK_ACCESS_HOST_WRITE_BIT = 0x0000_4000, + VK_ACCESS_MEMORY_READ_BIT = 0x0000_8000, + VK_ACCESS_MEMORY_WRITE_BIT = 0x0001_0000, + VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 0x0200_0000, + VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 0x0400_0000, + VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 0x0800_0000, + VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT = 0x0010_0000, + VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX = 0x0002_0000, + VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 0x0004_0000, + VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 0x0008_0000, + VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV = 0x0080_0000, + VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR = 0x0020_0000, + VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR = 0x0040_0000, + VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT = 0x0100_0000, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkAccessFlagBits(u32); +SetupVkFlags!(VkAccessFlags, VkAccessFlagBits); diff --git a/vulkan-sys/src/enums/amd/mod.rs b/vulkan-sys/src/enums/amd/mod.rs new file mode 100644 index 0000000..1015ecf --- /dev/null +++ b/vulkan-sys/src/enums/amd/mod.rs @@ -0,0 +1,3 @@ +pub mod rasterizationorderamd; + +pub mod prelude; diff --git a/vulkan-sys/src/enums/amd/prelude.rs b/vulkan-sys/src/enums/amd/prelude.rs new file mode 100644 index 0000000..0c03b6f --- /dev/null +++ b/vulkan-sys/src/enums/amd/prelude.rs @@ -0,0 +1 @@ +pub use super::rasterizationorderamd::*; diff --git a/vulkan-sys/src/enums/amd/rasterizationorderamd.rs b/vulkan-sys/src/enums/amd/rasterizationorderamd.rs new file mode 100644 index 0000000..0e74045 --- /dev/null +++ b/vulkan-sys/src/enums/amd/rasterizationorderamd.rs @@ -0,0 +1,9 @@ +pub use VkRasterizationOrderAMD::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkRasterizationOrderAMD { + VK_RASTERIZATION_ORDER_STRICT_AMD = 0, + VK_RASTERIZATION_ORDER_RELAXED_AMD = 1, + VK_RASTERIZATION_ORDER_MAX_ENUM_AMD = 0x7FFF_FFFF, +} diff --git a/vulkan-sys/src/enums/androidsurfacecreateflagskhr.rs b/vulkan-sys/src/enums/androidsurfacecreateflagskhr.rs new file mode 100644 index 0000000..5458bce --- /dev/null +++ b/vulkan-sys/src/enums/androidsurfacecreateflagskhr.rs @@ -0,0 +1,15 @@ +pub use VkAndroidSurfaceCreateFlagsKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkAndroidSurfaceCreateFlagsKHR { + VK_ANDROID_SURFACE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkAndroidSurfaceCreateFlagBitsKHR(u32); +SetupVkFlags!( + VkAndroidSurfaceCreateFlagsKHR, + VkAndroidSurfaceCreateFlagBitsKHR +); diff --git a/vulkan-sys/src/enums/attachmentdescriptionflags.rs b/vulkan-sys/src/enums/attachmentdescriptionflags.rs new file mode 100644 index 0000000..826bb33 --- /dev/null +++ b/vulkan-sys/src/enums/attachmentdescriptionflags.rs @@ -0,0 +1,15 @@ +pub use VkAttachmentDescriptionFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkAttachmentDescriptionFlags { + VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT = 0x0000_0001, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkAttachmentDescriptionFlagBits(u32); +SetupVkFlags!( + VkAttachmentDescriptionFlags, + VkAttachmentDescriptionFlagBits +); diff --git a/vulkan-sys/src/enums/attachmentloadop.rs b/vulkan-sys/src/enums/attachmentloadop.rs new file mode 100644 index 0000000..0e8d29d --- /dev/null +++ b/vulkan-sys/src/enums/attachmentloadop.rs @@ -0,0 +1,9 @@ +pub use VkAttachmentLoadOp::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkAttachmentLoadOp { + VK_ATTACHMENT_LOAD_OP_LOAD = 0, + VK_ATTACHMENT_LOAD_OP_CLEAR = 1, + VK_ATTACHMENT_LOAD_OP_DONT_CARE = 2, +} diff --git a/vulkan-sys/src/enums/attachmentstoreop.rs b/vulkan-sys/src/enums/attachmentstoreop.rs new file mode 100644 index 0000000..27a4d93 --- /dev/null +++ b/vulkan-sys/src/enums/attachmentstoreop.rs @@ -0,0 +1,8 @@ +pub use VkAttachmentStoreOp::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkAttachmentStoreOp { + VK_ATTACHMENT_STORE_OP_STORE = 0, + VK_ATTACHMENT_STORE_OP_DONT_CARE = 1, +} diff --git a/vulkan-sys/src/enums/blendfactor.rs b/vulkan-sys/src/enums/blendfactor.rs new file mode 100644 index 0000000..5197554 --- /dev/null +++ b/vulkan-sys/src/enums/blendfactor.rs @@ -0,0 +1,25 @@ +pub use VkBlendFactor::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkBlendFactor { + VK_BLEND_FACTOR_ZERO = 0, + VK_BLEND_FACTOR_ONE = 1, + VK_BLEND_FACTOR_SRC_COLOR = 2, + VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR = 3, + VK_BLEND_FACTOR_DST_COLOR = 4, + VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR = 5, + VK_BLEND_FACTOR_SRC_ALPHA = 6, + VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA = 7, + VK_BLEND_FACTOR_DST_ALPHA = 8, + VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA = 9, + VK_BLEND_FACTOR_CONSTANT_COLOR = 10, + VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR = 11, + VK_BLEND_FACTOR_CONSTANT_ALPHA = 12, + VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA = 13, + VK_BLEND_FACTOR_SRC_ALPHA_SATURATE = 14, + VK_BLEND_FACTOR_SRC1_COLOR = 15, + VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR = 16, + VK_BLEND_FACTOR_SRC1_ALPHA = 17, + VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA = 18, +} diff --git a/vulkan-sys/src/enums/blendop.rs b/vulkan-sys/src/enums/blendop.rs new file mode 100644 index 0000000..97a9387 --- /dev/null +++ b/vulkan-sys/src/enums/blendop.rs @@ -0,0 +1,11 @@ +pub use VkBlendOp::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkBlendOp { + VK_BLEND_OP_ADD = 0, + VK_BLEND_OP_SUBTRACT = 1, + VK_BLEND_OP_REVERSE_SUBTRACT = 2, + VK_BLEND_OP_MIN = 3, + VK_BLEND_OP_MAX = 4, +} diff --git a/vulkan-sys/src/enums/bool32.rs b/vulkan-sys/src/enums/bool32.rs new file mode 100644 index 0000000..ee0bfa1 --- /dev/null +++ b/vulkan-sys/src/enums/bool32.rs @@ -0,0 +1,33 @@ +pub use VkBool32::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +pub enum VkBool32 { + VK_FALSE = 0, + VK_TRUE = 1, +} + +impl From for VkBool32 { + fn from(b: bool) -> VkBool32 { + if b { + VK_TRUE + } else { + VK_FALSE + } + } +} + +impl Into for VkBool32 { + fn into(self) -> bool { + match self { + VK_FALSE => false, + VK_TRUE => true, + } + } +} + +impl Default for VkBool32 { + fn default() -> Self { + VK_FALSE + } +} diff --git a/vulkan-sys/src/enums/bordercolor.rs b/vulkan-sys/src/enums/bordercolor.rs new file mode 100644 index 0000000..831e8d7 --- /dev/null +++ b/vulkan-sys/src/enums/bordercolor.rs @@ -0,0 +1,12 @@ +pub use VkBorderColor::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +pub enum VkBorderColor { + VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK = 0, + VK_BORDER_COLOR_INT_TRANSPARENT_BLACK = 1, + VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK = 2, + VK_BORDER_COLOR_INT_OPAQUE_BLACK = 3, + VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE = 4, + VK_BORDER_COLOR_INT_OPAQUE_WHITE = 5, +} diff --git a/vulkan-sys/src/enums/buffercreateflags.rs b/vulkan-sys/src/enums/buffercreateflags.rs new file mode 100644 index 0000000..52f268f --- /dev/null +++ b/vulkan-sys/src/enums/buffercreateflags.rs @@ -0,0 +1,17 @@ +pub use VkBufferCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkBufferCreateFlags { + VK_BUFFER_CREATE_SPARSE_BINDING_BIT = 0x0000_0001, + VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT = 0x0000_0002, + VK_BUFFER_CREATE_SPARSE_ALIASED_BIT = 0x0000_0004, + VK_BUFFER_CREATE_PROTECTED_BIT = 0x0000_0008, + VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT = 0x0000_0010, + VK_BUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFF_FFFF, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkBufferCreateFlagBits(u32); +SetupVkFlags!(VkBufferCreateFlags, VkBufferCreateFlagBits); diff --git a/vulkan-sys/src/enums/bufferusageflags.rs b/vulkan-sys/src/enums/bufferusageflags.rs new file mode 100644 index 0000000..3b86a66 --- /dev/null +++ b/vulkan-sys/src/enums/bufferusageflags.rs @@ -0,0 +1,30 @@ +pub use VkBufferUsageFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkBufferUsageFlags { + VK_BUFFER_USAGE_TRANSFER_SRC_BIT = 0x0000_0001, + VK_BUFFER_USAGE_TRANSFER_DST_BIT = 0x0000_0002, + VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = 0x0000_0004, + VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = 0x0000_0008, + VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = 0x0000_0010, + VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = 0x0000_0020, + VK_BUFFER_USAGE_INDEX_BUFFER_BIT = 0x0000_0040, + VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = 0x0000_0080, + VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = 0x0000_0100, + VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT = 0x0000_0800, + VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT = 0x0000_1000, + VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x0000_0200, + VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR = 0x0000_0400, + VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT = 0x0002_0000, + // Provided by VK_KHR_acceleration_structure + VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR = 0x0008_0000, + // Provided by VK_KHR_acceleration_structure + VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR = 0x0010_0000, + // VK_BUFFER_USAGE_RAY_TRACING_BIT_NV = VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash, Default)] +pub struct VkBufferUsageFlagBits(u32); +SetupVkFlags!(VkBufferUsageFlags, VkBufferUsageFlagBits); diff --git a/vulkan-sys/src/enums/bufferviewcreateflags.rs b/vulkan-sys/src/enums/bufferviewcreateflags.rs new file mode 100644 index 0000000..b719a24 --- /dev/null +++ b/vulkan-sys/src/enums/bufferviewcreateflags.rs @@ -0,0 +1,12 @@ +pub use VkBufferViewCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkBufferViewCreateFlags { + VK_BUFFER_VIEW_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkBufferViewCreateFlagBits(u32); +SetupVkFlags!(VkBufferViewCreateFlags, VkBufferViewCreateFlagBits); diff --git a/vulkan-sys/src/enums/colorcomponentflags.rs b/vulkan-sys/src/enums/colorcomponentflags.rs new file mode 100644 index 0000000..bd810de --- /dev/null +++ b/vulkan-sys/src/enums/colorcomponentflags.rs @@ -0,0 +1,15 @@ +pub use VkColorComponentFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkColorComponentFlags { + VK_COLOR_COMPONENT_R_BIT = 0x0000_0001, + VK_COLOR_COMPONENT_G_BIT = 0x0000_0002, + VK_COLOR_COMPONENT_B_BIT = 0x0000_0004, + VK_COLOR_COMPONENT_A_BIT = 0x0000_0008, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkColorComponentFlagBits(u32); +SetupVkFlags!(VkColorComponentFlags, VkColorComponentFlagBits); diff --git a/vulkan-sys/src/enums/colorspacekhr.rs b/vulkan-sys/src/enums/colorspacekhr.rs new file mode 100644 index 0000000..d221c5b --- /dev/null +++ b/vulkan-sys/src/enums/colorspacekhr.rs @@ -0,0 +1,25 @@ +pub use VkColorSpaceKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkColorSpaceKHR { + VK_COLOR_SPACE_SRGB_NONLINEAR_KHR = 0, + VK_COLOR_SPACE_DISPLAY_P3_LINEAR_EXT = 1000104001, + VK_COLOR_SPACE_DISPLAY_P3_NONLINEAR_EXT = 1000104002, + VK_COLOR_SPACE_SCRGB_LINEAR_EXT = 1000104003, + VK_COLOR_SPACE_SCRGB_NONLINEAR_EXT = 1000104004, + VK_COLOR_SPACE_DCI_P3_LINEAR_EXT = 1000104005, + VK_COLOR_SPACE_DCI_P3_NONLINEAR_EXT = 1000104006, + VK_COLOR_SPACE_BT709_LINEAR_EXT = 1000104007, + VK_COLOR_SPACE_BT709_NONLINEAR_EXT = 1000104008, + VK_COLOR_SPACE_BT2020_LINEAR_EXT = 1000104009, + VK_COLOR_SPACE_BT2020_NONLINEAR_EXT = 1000104010, + VK_COLOR_SPACE_ADOBERGB_LINEAR_EXT = 1000104011, + VK_COLOR_SPACE_ADOBERGB_NONLINEAR_EXT = 1000104012, +} + +impl Default for VkColorSpaceKHR { + fn default() -> Self { + VK_COLOR_SPACE_SRGB_NONLINEAR_KHR + } +} diff --git a/vulkan-sys/src/enums/commandbufferlevel.rs b/vulkan-sys/src/enums/commandbufferlevel.rs new file mode 100644 index 0000000..3fcba56 --- /dev/null +++ b/vulkan-sys/src/enums/commandbufferlevel.rs @@ -0,0 +1,8 @@ +pub use VkCommandBufferLevel::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkCommandBufferLevel { + VK_COMMAND_BUFFER_LEVEL_PRIMARY = 0, + VK_COMMAND_BUFFER_LEVEL_SECONDARY = 1, +} diff --git a/vulkan-sys/src/enums/commandbufferresetflags.rs b/vulkan-sys/src/enums/commandbufferresetflags.rs new file mode 100644 index 0000000..c26112c --- /dev/null +++ b/vulkan-sys/src/enums/commandbufferresetflags.rs @@ -0,0 +1,12 @@ +pub use VkCommandBufferResetFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkCommandBufferResetFlags { + VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT = 0x00000001, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkCommandBufferResetFlagBits(u32); +SetupVkFlags!(VkCommandBufferResetFlags, VkCommandBufferResetFlagBits); diff --git a/vulkan-sys/src/enums/commandbufferusageflags.rs b/vulkan-sys/src/enums/commandbufferusageflags.rs new file mode 100644 index 0000000..b695f65 --- /dev/null +++ b/vulkan-sys/src/enums/commandbufferusageflags.rs @@ -0,0 +1,14 @@ +pub use VkCommandBufferUsageFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkCommandBufferUsageFlags { + VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT = 0x00000001, + VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT = 0x00000002, + VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT = 0x00000004, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkCommandBufferUsageFlagBits(u32); +SetupVkFlags!(VkCommandBufferUsageFlags, VkCommandBufferUsageFlagBits); diff --git a/vulkan-sys/src/enums/commandpoolcreateflags.rs b/vulkan-sys/src/enums/commandpoolcreateflags.rs new file mode 100644 index 0000000..2aea2d7 --- /dev/null +++ b/vulkan-sys/src/enums/commandpoolcreateflags.rs @@ -0,0 +1,13 @@ +pub use VkCommandPoolCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkCommandPoolCreateFlags { + VK_COMMAND_POOL_CREATE_TRANSIENT_BIT = 0x00000001, + VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT = 0x00000002, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkCommandPoolCreateFlagBits(u32); +SetupVkFlags!(VkCommandPoolCreateFlags, VkCommandPoolCreateFlagBits); diff --git a/vulkan-sys/src/enums/commandpoolresetflags.rs b/vulkan-sys/src/enums/commandpoolresetflags.rs new file mode 100644 index 0000000..8f2ce48 --- /dev/null +++ b/vulkan-sys/src/enums/commandpoolresetflags.rs @@ -0,0 +1,12 @@ +pub use VkCommandPoolResetFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkCommandPoolResetFlags { + VK_COMMAND_POOL_RESET_RELEASE_RESOURCES_BIT = 0x00000001, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkCommandPoolResetFlagBits(u32); +SetupVkFlags!(VkCommandPoolResetFlags, VkCommandPoolResetFlagBits); diff --git a/vulkan-sys/src/enums/commandpooltrimflags.rs b/vulkan-sys/src/enums/commandpooltrimflags.rs new file mode 100644 index 0000000..107ec0c --- /dev/null +++ b/vulkan-sys/src/enums/commandpooltrimflags.rs @@ -0,0 +1,12 @@ +pub use VkCommandPoolTrimFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkCommandPoolTrimFlags { + VK_COMMAND_POOL_TRIM_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkCommandPoolTrimFlagBits(u32); +SetupVkFlags!(VkCommandPoolTrimFlags, VkCommandPoolTrimFlagBits); diff --git a/vulkan-sys/src/enums/compareop.rs b/vulkan-sys/src/enums/compareop.rs new file mode 100644 index 0000000..94efcf9 --- /dev/null +++ b/vulkan-sys/src/enums/compareop.rs @@ -0,0 +1,14 @@ +pub use VkCompareOp::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +pub enum VkCompareOp { + VK_COMPARE_OP_NEVER = 0, + VK_COMPARE_OP_LESS = 1, + VK_COMPARE_OP_EQUAL = 2, + VK_COMPARE_OP_LESS_OR_EQUAL = 3, + VK_COMPARE_OP_GREATER = 4, + VK_COMPARE_OP_NOT_EQUAL = 5, + VK_COMPARE_OP_GREATER_OR_EQUAL = 6, + VK_COMPARE_OP_ALWAYS = 7, +} diff --git a/vulkan-sys/src/enums/componentswizzle.rs b/vulkan-sys/src/enums/componentswizzle.rs new file mode 100644 index 0000000..6317b1c --- /dev/null +++ b/vulkan-sys/src/enums/componentswizzle.rs @@ -0,0 +1,13 @@ +pub use VkComponentSwizzle::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkComponentSwizzle { + VK_COMPONENT_SWIZZLE_IDENTITY = 0, + VK_COMPONENT_SWIZZLE_ZERO = 1, + VK_COMPONENT_SWIZZLE_ONE = 2, + VK_COMPONENT_SWIZZLE_R = 3, + VK_COMPONENT_SWIZZLE_G = 4, + VK_COMPONENT_SWIZZLE_B = 5, + VK_COMPONENT_SWIZZLE_A = 6, +} diff --git a/vulkan-sys/src/enums/compositealphaflagskhr.rs b/vulkan-sys/src/enums/compositealphaflagskhr.rs new file mode 100644 index 0000000..b253ba6 --- /dev/null +++ b/vulkan-sys/src/enums/compositealphaflagskhr.rs @@ -0,0 +1,21 @@ +pub use VkCompositeAlphaFlagsKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkCompositeAlphaFlagsKHR { + VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR = 0x00000001, + VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR = 0x00000002, + VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR = 0x00000004, + VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR = 0x00000008, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkCompositeAlphaFlagBitsKHR(u32); +SetupVkFlags!(VkCompositeAlphaFlagsKHR, VkCompositeAlphaFlagBitsKHR); + +impl Default for VkCompositeAlphaFlagBitsKHR { + fn default() -> Self { + VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR.into() + } +} diff --git a/vulkan-sys/src/enums/cullmodeflags.rs b/vulkan-sys/src/enums/cullmodeflags.rs new file mode 100644 index 0000000..80bd9eb --- /dev/null +++ b/vulkan-sys/src/enums/cullmodeflags.rs @@ -0,0 +1,15 @@ +pub use VkCullModeFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkCullModeFlags { + VK_CULL_MODE_NONE = 0, + VK_CULL_MODE_FRONT_BIT = 0x0000_0001, + VK_CULL_MODE_BACK_BIT = 0x0000_0002, + VK_CULL_MODE_FRONT_AND_BACK = 0x3, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkCullModeFlagBits(u32); +SetupVkFlags!(VkCullModeFlags, VkCullModeFlagBits); diff --git a/vulkan-sys/src/enums/debugreporterrorext.rs b/vulkan-sys/src/enums/debugreporterrorext.rs new file mode 100644 index 0000000..30a28a8 --- /dev/null +++ b/vulkan-sys/src/enums/debugreporterrorext.rs @@ -0,0 +1,8 @@ +pub use VkDebugReportErrorEXT::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkDebugReportErrorEXT { + VK_DEBUG_REPORT_ERROR_NONE_EXT = 0, + VK_DEBUG_REPORT_ERROR_CALLBACK_REF_EXT = 1, +} diff --git a/vulkan-sys/src/enums/debugreportflagsext.rs b/vulkan-sys/src/enums/debugreportflagsext.rs new file mode 100644 index 0000000..0fc8e25 --- /dev/null +++ b/vulkan-sys/src/enums/debugreportflagsext.rs @@ -0,0 +1,16 @@ +pub use VkDebugReportFlagsEXT::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkDebugReportFlagsEXT { + VK_DEBUG_REPORT_INFORMATION_BIT_EXT = 0x00000001, + VK_DEBUG_REPORT_WARNING_BIT_EXT = 0x00000002, + VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT = 0x00000004, + VK_DEBUG_REPORT_ERROR_BIT_EXT = 0x00000008, + VK_DEBUG_REPORT_DEBUG_BIT_EXT = 0x00000010, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkDebugReportFlagBitsEXT(u32); +SetupVkFlags!(VkDebugReportFlagsEXT, VkDebugReportFlagBitsEXT); diff --git a/vulkan-sys/src/enums/debugreportobjecttypeext.rs b/vulkan-sys/src/enums/debugreportobjecttypeext.rs new file mode 100644 index 0000000..96bb69e --- /dev/null +++ b/vulkan-sys/src/enums/debugreportobjecttypeext.rs @@ -0,0 +1,43 @@ +pub use VkDebugReportObjectTypeEXT::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkDebugReportObjectTypeEXT { + VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT = 0, + VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT = 1, + VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT = 2, + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT = 3, + VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT = 4, + VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT = 5, + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT = 6, + VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT = 7, + VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT = 8, + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT = 9, + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT = 10, + VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT = 11, + VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT = 12, + VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT = 13, + VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT = 14, + VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT = 15, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT = 16, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT = 17, + VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT = 18, + VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT = 19, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT = 20, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT = 21, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT = 22, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT = 23, + VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT = 24, + VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT = 25, + VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT = 26, + VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT = 27, + VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT = 28, + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT = 29, + VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT = 30, + VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT = 31, + VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT = 32, + VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT_EXT = 33, + VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT = 1_000_156_000, + VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT = 1_000_085_000, + VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR_EXT = 1_000_165_000, +} diff --git a/vulkan-sys/src/enums/debugutilsmessageseverityflagsext.rs b/vulkan-sys/src/enums/debugutilsmessageseverityflagsext.rs new file mode 100644 index 0000000..e745ad7 --- /dev/null +++ b/vulkan-sys/src/enums/debugutilsmessageseverityflagsext.rs @@ -0,0 +1,18 @@ +pub use VkDebugUtilsMessageSeverityFlagsEXT::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkDebugUtilsMessageSeverityFlagsEXT { + VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT = 0x00000001, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT = 0x00000010, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT = 0x00000100, + VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT = 0x00001000, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkDebugUtilsMessageSeverityFlagBitsEXT(u32); +SetupVkFlags!( + VkDebugUtilsMessageSeverityFlagsEXT, + VkDebugUtilsMessageSeverityFlagBitsEXT +); diff --git a/vulkan-sys/src/enums/debugutilsmessagetypeflagsext.rs b/vulkan-sys/src/enums/debugutilsmessagetypeflagsext.rs new file mode 100644 index 0000000..221113b --- /dev/null +++ b/vulkan-sys/src/enums/debugutilsmessagetypeflagsext.rs @@ -0,0 +1,17 @@ +pub use VkDebugUtilsMessageTypeFlagsEXT::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkDebugUtilsMessageTypeFlagsEXT { + VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT = 0x00000001, + VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT = 0x00000002, + VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT = 0x00000004, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkDebugUtilsMessageTypeFlagBitsEXT(u32); +SetupVkFlags!( + VkDebugUtilsMessageTypeFlagsEXT, + VkDebugUtilsMessageTypeFlagBitsEXT +); diff --git a/vulkan-sys/src/enums/debugutilsmessengercallbackdataflagsext.rs b/vulkan-sys/src/enums/debugutilsmessengercallbackdataflagsext.rs new file mode 100644 index 0000000..07c70cf --- /dev/null +++ b/vulkan-sys/src/enums/debugutilsmessengercallbackdataflagsext.rs @@ -0,0 +1,15 @@ +pub use VkDebugUtilsMessengerCallbackDataFlagsEXT::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkDebugUtilsMessengerCallbackDataFlagsEXT { + VK_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkDebugUtilsMessengerCallbackDataFlagBitsEXT(u32); +SetupVkFlags!( + VkDebugUtilsMessengerCallbackDataFlagsEXT, + VkDebugUtilsMessengerCallbackDataFlagBitsEXT +); diff --git a/vulkan-sys/src/enums/debugutilsmessengercreateflags.rs b/vulkan-sys/src/enums/debugutilsmessengercreateflags.rs new file mode 100644 index 0000000..18b25ca --- /dev/null +++ b/vulkan-sys/src/enums/debugutilsmessengercreateflags.rs @@ -0,0 +1,15 @@ +pub use VkDebugUtilsMessengerCreateFlagsEXT::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkDebugUtilsMessengerCreateFlagsEXT { + VK_DEBUG_UTILS_MESSENGER_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkDebugUtilsMessengerCreateFlagBitsEXT(u32); +SetupVkFlags!( + VkDebugUtilsMessengerCreateFlagsEXT, + VkDebugUtilsMessengerCreateFlagBitsEXT +); diff --git a/vulkan-sys/src/enums/dependencyflags.rs b/vulkan-sys/src/enums/dependencyflags.rs new file mode 100644 index 0000000..8eb2ffc --- /dev/null +++ b/vulkan-sys/src/enums/dependencyflags.rs @@ -0,0 +1,15 @@ +pub use VkDependencyFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkDependencyFlags { + VK_DEPENDENCY_BY_REGION_BIT = 0x0000_0001, + VK_DEPENDENCY_DEVICE_GROUP_BIT = 0x0000_0004, + VK_DEPENDENCY_VIEW_LOCAL_BIT = 0x0000_0002, + VK_DEPENDENCY_FLAG_BITS_MAX_ENUM = 0x7FFF_FFFF, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkDependencyFlagBits(u32); +SetupVkFlags!(VkDependencyFlags, VkDependencyFlagBits); diff --git a/vulkan-sys/src/enums/descriptorpoolcreateflags.rs b/vulkan-sys/src/enums/descriptorpoolcreateflags.rs new file mode 100644 index 0000000..fa618c6 --- /dev/null +++ b/vulkan-sys/src/enums/descriptorpoolcreateflags.rs @@ -0,0 +1,13 @@ +pub use VkDescriptorPoolCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkDescriptorPoolCreateFlags { + VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT = 0x0000_0001, + VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT = 0x0000_0002, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkDescriptorPoolCreateFlagBits(u32); +SetupVkFlags!(VkDescriptorPoolCreateFlags, VkDescriptorPoolCreateFlagBits); diff --git a/vulkan-sys/src/enums/descriptorpoolresetflags.rs b/vulkan-sys/src/enums/descriptorpoolresetflags.rs new file mode 100644 index 0000000..3aafa74 --- /dev/null +++ b/vulkan-sys/src/enums/descriptorpoolresetflags.rs @@ -0,0 +1,12 @@ +pub use VkDescriptorPoolResetFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkDescriptorPoolResetFlags { + VK_DESCRIPTOR_POOL_RESET_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkDescriptorPoolResetFlagBits(u32); +SetupVkFlags!(VkDescriptorPoolResetFlags, VkDescriptorPoolResetFlagBits); diff --git a/vulkan-sys/src/enums/descriptorsetlayoutcreateflags.rs b/vulkan-sys/src/enums/descriptorsetlayoutcreateflags.rs new file mode 100644 index 0000000..aa37e3c --- /dev/null +++ b/vulkan-sys/src/enums/descriptorsetlayoutcreateflags.rs @@ -0,0 +1,16 @@ +pub use VkDescriptorSetLayoutCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkDescriptorSetLayoutCreateFlags { + VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR = 0x0000_0001, + VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT = 0x0000_0002, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkDescriptorSetLayoutCreateFlagBits(u32); +SetupVkFlags!( + VkDescriptorSetLayoutCreateFlags, + VkDescriptorSetLayoutCreateFlagBits +); diff --git a/vulkan-sys/src/enums/descriptortype.rs b/vulkan-sys/src/enums/descriptortype.rs new file mode 100644 index 0000000..44c78fd --- /dev/null +++ b/vulkan-sys/src/enums/descriptortype.rs @@ -0,0 +1,31 @@ +pub use VkDescriptorType::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkDescriptorType { + VK_DESCRIPTOR_TYPE_SAMPLER = 0, + VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER = 1, + VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE = 2, + VK_DESCRIPTOR_TYPE_STORAGE_IMAGE = 3, + VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER = 4, + VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER = 5, + VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER = 6, + VK_DESCRIPTOR_TYPE_STORAGE_BUFFER = 7, + VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC = 8, + VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC = 9, + VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT = 10, + VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT = 1_000_138_000, + // Provided by VK_KHR_acceleration_structure + VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR = 1_000_150_000, + // Provided by VK_NV_ray_tracing + VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV = 1_000_165_000, + // Provided by VK_VALVE_mutable_descriptor_type + VK_DESCRIPTOR_TYPE_MUTABLE_VALVE = 1_000_351_000, + VK_DESCRIPTOR_TYPE_MAX_ENUM = 0x7FFF_FFFF, +} + +impl Default for VkDescriptorType { + fn default() -> Self { + VK_DESCRIPTOR_TYPE_MAX_ENUM + } +} diff --git a/vulkan-sys/src/enums/devicecreateflags.rs b/vulkan-sys/src/enums/devicecreateflags.rs new file mode 100644 index 0000000..e9fab9e --- /dev/null +++ b/vulkan-sys/src/enums/devicecreateflags.rs @@ -0,0 +1,12 @@ +pub use VkDeviceCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkDeviceCreateFlags { + VK_DEVICE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkDeviceCreateFlagBits(u32); +SetupVkFlags!(VkDeviceCreateFlags, VkDeviceCreateFlagBits); diff --git a/vulkan-sys/src/enums/devicequeuecreateflags.rs b/vulkan-sys/src/enums/devicequeuecreateflags.rs new file mode 100644 index 0000000..b95c713 --- /dev/null +++ b/vulkan-sys/src/enums/devicequeuecreateflags.rs @@ -0,0 +1,12 @@ +pub use VkDeviceQueueCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkDeviceQueueCreateFlags { + VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT = 0x0000_0001, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkDeviceQueueCreateFlagBits(u32); +SetupVkFlags!(VkDeviceQueueCreateFlags, VkDeviceQueueCreateFlagBits); diff --git a/vulkan-sys/src/enums/displaymodecreateflagskhr.rs b/vulkan-sys/src/enums/displaymodecreateflagskhr.rs new file mode 100644 index 0000000..eeba7d0 --- /dev/null +++ b/vulkan-sys/src/enums/displaymodecreateflagskhr.rs @@ -0,0 +1,15 @@ +pub use VkDisplayModeCreateFlagsKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkDisplayModeCreateFlagsKHR { + VK_DISPLAY_PLANE_ALPHA_OPAQUE_BIT_KHR = 0x00000001, + VK_DISPLAY_PLANE_ALPHA_GLOBAL_BIT_KHR = 0x00000002, + VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_BIT_KHR = 0x00000004, + VK_DISPLAY_PLANE_ALPHA_PER_PIXEL_PREMULTIPLIED_BIT_KHR = 0x00000008, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkDisplayModeCreateFlagBitsKHR(u32); +SetupVkFlags!(VkDisplayModeCreateFlagsKHR, VkDisplayModeCreateFlagBitsKHR); diff --git a/vulkan-sys/src/enums/displayplanealphaflagskhr.rs b/vulkan-sys/src/enums/displayplanealphaflagskhr.rs new file mode 100644 index 0000000..5e28ae7 --- /dev/null +++ b/vulkan-sys/src/enums/displayplanealphaflagskhr.rs @@ -0,0 +1,12 @@ +pub use VkDisplayPlaneAlphaFlagsKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkDisplayPlaneAlphaFlagsKHR { + VK_DISPLAY_PLANE_ALPHA_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkDisplayPlaneAlphaFlagBitsKHR(u32); +SetupVkFlags!(VkDisplayPlaneAlphaFlagsKHR, VkDisplayPlaneAlphaFlagBitsKHR); diff --git a/vulkan-sys/src/enums/displaysurfacecreateflagskhr.rs b/vulkan-sys/src/enums/displaysurfacecreateflagskhr.rs new file mode 100644 index 0000000..e0efa42 --- /dev/null +++ b/vulkan-sys/src/enums/displaysurfacecreateflagskhr.rs @@ -0,0 +1,15 @@ +pub use VkDisplaySurfaceCreateFlagsKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkDisplaySurfaceCreateFlagsKHR { + VK_DISPLAY_SURFACE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkDisplaySurfaceCreateFlagBitsKHR(u32); +SetupVkFlags!( + VkDisplaySurfaceCreateFlagsKHR, + VkDisplaySurfaceCreateFlagBitsKHR +); diff --git a/vulkan-sys/src/enums/dynamicstate.rs b/vulkan-sys/src/enums/dynamicstate.rs new file mode 100644 index 0000000..6429727 --- /dev/null +++ b/vulkan-sys/src/enums/dynamicstate.rs @@ -0,0 +1,22 @@ +pub use VkDynamicState::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkDynamicState { + VK_DYNAMIC_STATE_VIEWPORT = 0, + VK_DYNAMIC_STATE_SCISSOR = 1, + VK_DYNAMIC_STATE_LINE_WIDTH = 2, + VK_DYNAMIC_STATE_DEPTH_BIAS = 3, + VK_DYNAMIC_STATE_BLEND_CONSTANTS = 4, + VK_DYNAMIC_STATE_DEPTH_BOUNDS = 5, + VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK = 6, + VK_DYNAMIC_STATE_STENCIL_WRITE_MASK = 7, + VK_DYNAMIC_STATE_STENCIL_REFERENCE = 8, + VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV = 1_000_087_000, + VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT = 1_000_099_000, + VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT = 1_000_143_000, + VK_DYNAMIC_STATE_VIEWPORT_SHADING_RATE_PALETTE_NV = 1_000_164_004, + VK_DYNAMIC_STATE_VIEWPORT_COARSE_SAMPLE_ORDER_NV = 1_000_164_006, + VK_DYNAMIC_STATE_EXCLUSIVE_SCISSOR_NV = 1_000_205_001, + VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR = 1_000_347_000, +} diff --git a/vulkan-sys/src/enums/eventcreateflags.rs b/vulkan-sys/src/enums/eventcreateflags.rs new file mode 100644 index 0000000..5cb90ad --- /dev/null +++ b/vulkan-sys/src/enums/eventcreateflags.rs @@ -0,0 +1,12 @@ +pub use VkEventCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkEventCreateFlags { + VK_EVENT_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkEventCreateFlagBits(u32); +SetupVkFlags!(VkEventCreateFlags, VkEventCreateFlagBits); diff --git a/vulkan-sys/src/enums/ext/descriptorbindingflagsext.rs b/vulkan-sys/src/enums/ext/descriptorbindingflagsext.rs new file mode 100644 index 0000000..a9cb5d1 --- /dev/null +++ b/vulkan-sys/src/enums/ext/descriptorbindingflagsext.rs @@ -0,0 +1,16 @@ +pub use VkDescriptorBindingFlagsEXT::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkDescriptorBindingFlagsEXT { + VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT = 0x0000_0001, + VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT = 0x0000_0002, + VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT = 0x0000_0004, + VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT = 0x0000_0008, + VK_DESCRIPTOR_BINDING_FLAG_BITS_MAX_ENUM_EXT = 0x7FFF_FFFF, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkDescriptorBindingFlagBitsEXT(u32); +SetupVkFlags!(VkDescriptorBindingFlagsEXT, VkDescriptorBindingFlagBitsEXT); diff --git a/vulkan-sys/src/enums/ext/mod.rs b/vulkan-sys/src/enums/ext/mod.rs new file mode 100644 index 0000000..8fc4da0 --- /dev/null +++ b/vulkan-sys/src/enums/ext/mod.rs @@ -0,0 +1,3 @@ +pub mod descriptorbindingflagsext; + +pub mod prelude; diff --git a/vulkan-sys/src/enums/ext/prelude.rs b/vulkan-sys/src/enums/ext/prelude.rs new file mode 100644 index 0000000..60b23bc --- /dev/null +++ b/vulkan-sys/src/enums/ext/prelude.rs @@ -0,0 +1 @@ +pub use super::descriptorbindingflagsext::*; diff --git a/vulkan-sys/src/enums/externalmemoryhandletypeflags.rs b/vulkan-sys/src/enums/externalmemoryhandletypeflags.rs new file mode 100644 index 0000000..b6c96f1 --- /dev/null +++ b/vulkan-sys/src/enums/externalmemoryhandletypeflags.rs @@ -0,0 +1,25 @@ +pub use VkExternalMemoryHandleTypeFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkExternalMemoryHandleTypeFlags { + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT = 0x0000_0001, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_BIT = 0x0000_0002, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT_BIT = 0x0000_0004, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT = 0x0000_0008, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_KMT_BIT = 0x0000_0010, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP_BIT = 0x0000_0020, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE_BIT = 0x0000_0040, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT = 0x0000_0080, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_MAPPED_FOREIGN_MEMORY_BIT_EXT = 0x0000_0100, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT = 0x0000_0200, + VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID = 0x0000_0400, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkExternalMemoryHandleTypeFlagBits(u32); +SetupVkFlags!( + VkExternalMemoryHandleTypeFlags, + VkExternalMemoryHandleTypeFlagBits +); diff --git a/vulkan-sys/src/enums/fencecreateflags.rs b/vulkan-sys/src/enums/fencecreateflags.rs new file mode 100644 index 0000000..982be62 --- /dev/null +++ b/vulkan-sys/src/enums/fencecreateflags.rs @@ -0,0 +1,12 @@ +pub use VkFenceCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkFenceCreateFlags { + VK_FENCE_CREATE_SIGNALED_BIT = 0x0000_0001, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkFenceCreateFlagBits(u32); +SetupVkFlags!(VkFenceCreateFlags, VkFenceCreateFlagBits); diff --git a/vulkan-sys/src/enums/filter.rs b/vulkan-sys/src/enums/filter.rs new file mode 100644 index 0000000..8d9b7ae --- /dev/null +++ b/vulkan-sys/src/enums/filter.rs @@ -0,0 +1,8 @@ +pub use VkFilter::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +pub enum VkFilter { + VK_FILTER_NEAREST = 0, + VK_FILTER_LINEAR = 1, +} diff --git a/vulkan-sys/src/enums/format.rs b/vulkan-sys/src/enums/format.rs new file mode 100644 index 0000000..3175dc7 --- /dev/null +++ b/vulkan-sys/src/enums/format.rs @@ -0,0 +1,384 @@ +pub use VkFormat::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkFormat { + VK_FORMAT_UNDEFINED = 0, + VK_FORMAT_R4G4_UNORM_PACK8 = 1, + VK_FORMAT_R4G4B4A4_UNORM_PACK16 = 2, + VK_FORMAT_B4G4R4A4_UNORM_PACK16 = 3, + VK_FORMAT_R5G6B5_UNORM_PACK16 = 4, + VK_FORMAT_B5G6R5_UNORM_PACK16 = 5, + VK_FORMAT_R5G5B5A1_UNORM_PACK16 = 6, + VK_FORMAT_B5G5R5A1_UNORM_PACK16 = 7, + VK_FORMAT_A1R5G5B5_UNORM_PACK16 = 8, + VK_FORMAT_R8_UNORM = 9, + VK_FORMAT_R8_SNORM = 10, + VK_FORMAT_R8_USCALED = 11, + VK_FORMAT_R8_SSCALED = 12, + VK_FORMAT_R8_UINT = 13, + VK_FORMAT_R8_SINT = 14, + VK_FORMAT_R8_SRGB = 15, + VK_FORMAT_R8G8_UNORM = 16, + VK_FORMAT_R8G8_SNORM = 17, + VK_FORMAT_R8G8_USCALED = 18, + VK_FORMAT_R8G8_SSCALED = 19, + VK_FORMAT_R8G8_UINT = 20, + VK_FORMAT_R8G8_SINT = 21, + VK_FORMAT_R8G8_SRGB = 22, + VK_FORMAT_R8G8B8_UNORM = 23, + VK_FORMAT_R8G8B8_SNORM = 24, + VK_FORMAT_R8G8B8_USCALED = 25, + VK_FORMAT_R8G8B8_SSCALED = 26, + VK_FORMAT_R8G8B8_UINT = 27, + VK_FORMAT_R8G8B8_SINT = 28, + VK_FORMAT_R8G8B8_SRGB = 29, + VK_FORMAT_B8G8R8_UNORM = 30, + VK_FORMAT_B8G8R8_SNORM = 31, + VK_FORMAT_B8G8R8_USCALED = 32, + VK_FORMAT_B8G8R8_SSCALED = 33, + VK_FORMAT_B8G8R8_UINT = 34, + VK_FORMAT_B8G8R8_SINT = 35, + VK_FORMAT_B8G8R8_SRGB = 36, + VK_FORMAT_R8G8B8A8_UNORM = 37, + VK_FORMAT_R8G8B8A8_SNORM = 38, + VK_FORMAT_R8G8B8A8_USCALED = 39, + VK_FORMAT_R8G8B8A8_SSCALED = 40, + VK_FORMAT_R8G8B8A8_UINT = 41, + VK_FORMAT_R8G8B8A8_SINT = 42, + VK_FORMAT_R8G8B8A8_SRGB = 43, + VK_FORMAT_B8G8R8A8_UNORM = 44, + VK_FORMAT_B8G8R8A8_SNORM = 45, + VK_FORMAT_B8G8R8A8_USCALED = 46, + VK_FORMAT_B8G8R8A8_SSCALED = 47, + VK_FORMAT_B8G8R8A8_UINT = 48, + VK_FORMAT_B8G8R8A8_SINT = 49, + VK_FORMAT_B8G8R8A8_SRGB = 50, + VK_FORMAT_A8B8G8R8_UNORM_PACK32 = 51, + VK_FORMAT_A8B8G8R8_SNORM_PACK32 = 52, + VK_FORMAT_A8B8G8R8_USCALED_PACK32 = 53, + VK_FORMAT_A8B8G8R8_SSCALED_PACK32 = 54, + VK_FORMAT_A8B8G8R8_UINT_PACK32 = 55, + VK_FORMAT_A8B8G8R8_SINT_PACK32 = 56, + VK_FORMAT_A8B8G8R8_SRGB_PACK32 = 57, + VK_FORMAT_A2R10G10B10_UNORM_PACK32 = 58, + VK_FORMAT_A2R10G10B10_SNORM_PACK32 = 59, + VK_FORMAT_A2R10G10B10_USCALED_PACK32 = 60, + VK_FORMAT_A2R10G10B10_SSCALED_PACK32 = 61, + VK_FORMAT_A2R10G10B10_UINT_PACK32 = 62, + VK_FORMAT_A2R10G10B10_SINT_PACK32 = 63, + VK_FORMAT_A2B10G10R10_UNORM_PACK32 = 64, + VK_FORMAT_A2B10G10R10_SNORM_PACK32 = 65, + VK_FORMAT_A2B10G10R10_USCALED_PACK32 = 66, + VK_FORMAT_A2B10G10R10_SSCALED_PACK32 = 67, + VK_FORMAT_A2B10G10R10_UINT_PACK32 = 68, + VK_FORMAT_A2B10G10R10_SINT_PACK32 = 69, + VK_FORMAT_R16_UNORM = 70, + VK_FORMAT_R16_SNORM = 71, + VK_FORMAT_R16_USCALED = 72, + VK_FORMAT_R16_SSCALED = 73, + VK_FORMAT_R16_UINT = 74, + VK_FORMAT_R16_SINT = 75, + VK_FORMAT_R16_SFLOAT = 76, + VK_FORMAT_R16G16_UNORM = 77, + VK_FORMAT_R16G16_SNORM = 78, + VK_FORMAT_R16G16_USCALED = 79, + VK_FORMAT_R16G16_SSCALED = 80, + VK_FORMAT_R16G16_UINT = 81, + VK_FORMAT_R16G16_SINT = 82, + VK_FORMAT_R16G16_SFLOAT = 83, + VK_FORMAT_R16G16B16_UNORM = 84, + VK_FORMAT_R16G16B16_SNORM = 85, + VK_FORMAT_R16G16B16_USCALED = 86, + VK_FORMAT_R16G16B16_SSCALED = 87, + VK_FORMAT_R16G16B16_UINT = 88, + VK_FORMAT_R16G16B16_SINT = 89, + VK_FORMAT_R16G16B16_SFLOAT = 90, + VK_FORMAT_R16G16B16A16_UNORM = 91, + VK_FORMAT_R16G16B16A16_SNORM = 92, + VK_FORMAT_R16G16B16A16_USCALED = 93, + VK_FORMAT_R16G16B16A16_SSCALED = 94, + VK_FORMAT_R16G16B16A16_UINT = 95, + VK_FORMAT_R16G16B16A16_SINT = 96, + VK_FORMAT_R16G16B16A16_SFLOAT = 97, + VK_FORMAT_R32_UINT = 98, + VK_FORMAT_R32_SINT = 99, + VK_FORMAT_R32_SFLOAT = 100, + VK_FORMAT_R32G32_UINT = 101, + VK_FORMAT_R32G32_SINT = 102, + VK_FORMAT_R32G32_SFLOAT = 103, + VK_FORMAT_R32G32B32_UINT = 104, + VK_FORMAT_R32G32B32_SINT = 105, + VK_FORMAT_R32G32B32_SFLOAT = 106, + VK_FORMAT_R32G32B32A32_UINT = 107, + VK_FORMAT_R32G32B32A32_SINT = 108, + VK_FORMAT_R32G32B32A32_SFLOAT = 109, + VK_FORMAT_R64_UINT = 110, + VK_FORMAT_R64_SINT = 111, + VK_FORMAT_R64_SFLOAT = 112, + VK_FORMAT_R64G64_UINT = 113, + VK_FORMAT_R64G64_SINT = 114, + VK_FORMAT_R64G64_SFLOAT = 115, + VK_FORMAT_R64G64B64_UINT = 116, + VK_FORMAT_R64G64B64_SINT = 117, + VK_FORMAT_R64G64B64_SFLOAT = 118, + VK_FORMAT_R64G64B64A64_UINT = 119, + VK_FORMAT_R64G64B64A64_SINT = 120, + VK_FORMAT_R64G64B64A64_SFLOAT = 121, + VK_FORMAT_B10G11R11_UFLOAT_PACK32 = 122, + VK_FORMAT_E5B9G9R9_UFLOAT_PACK32 = 123, + VK_FORMAT_D16_UNORM = 124, + VK_FORMAT_X8_D24_UNORM_PACK32 = 125, + VK_FORMAT_D32_SFLOAT = 126, + VK_FORMAT_S8_UINT = 127, + VK_FORMAT_D16_UNORM_S8_UINT = 128, + VK_FORMAT_D24_UNORM_S8_UINT = 129, + VK_FORMAT_D32_SFLOAT_S8_UINT = 130, + VK_FORMAT_BC1_RGB_UNORM_BLOCK = 131, + VK_FORMAT_BC1_RGB_SRGB_BLOCK = 132, + VK_FORMAT_BC1_RGBA_UNORM_BLOCK = 133, + VK_FORMAT_BC1_RGBA_SRGB_BLOCK = 134, + VK_FORMAT_BC2_UNORM_BLOCK = 135, + VK_FORMAT_BC2_SRGB_BLOCK = 136, + VK_FORMAT_BC3_UNORM_BLOCK = 137, + VK_FORMAT_BC3_SRGB_BLOCK = 138, + VK_FORMAT_BC4_UNORM_BLOCK = 139, + VK_FORMAT_BC4_SNORM_BLOCK = 140, + VK_FORMAT_BC5_UNORM_BLOCK = 141, + VK_FORMAT_BC5_SNORM_BLOCK = 142, + VK_FORMAT_BC6H_UFLOAT_BLOCK = 143, + VK_FORMAT_BC6H_SFLOAT_BLOCK = 144, + VK_FORMAT_BC7_UNORM_BLOCK = 145, + VK_FORMAT_BC7_SRGB_BLOCK = 146, + VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK = 147, + VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK = 148, + VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK = 149, + VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK = 150, + VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK = 151, + VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK = 152, + VK_FORMAT_EAC_R11_UNORM_BLOCK = 153, + VK_FORMAT_EAC_R11_SNORM_BLOCK = 154, + VK_FORMAT_EAC_R11G11_UNORM_BLOCK = 155, + VK_FORMAT_EAC_R11G11_SNORM_BLOCK = 156, + VK_FORMAT_ASTC_4x4_UNORM_BLOCK = 157, + VK_FORMAT_ASTC_4x4_SRGB_BLOCK = 158, + VK_FORMAT_ASTC_5x4_UNORM_BLOCK = 159, + VK_FORMAT_ASTC_5x4_SRGB_BLOCK = 160, + VK_FORMAT_ASTC_5x5_UNORM_BLOCK = 161, + VK_FORMAT_ASTC_5x5_SRGB_BLOCK = 162, + VK_FORMAT_ASTC_6x5_UNORM_BLOCK = 163, + VK_FORMAT_ASTC_6x5_SRGB_BLOCK = 164, + VK_FORMAT_ASTC_6x6_UNORM_BLOCK = 165, + VK_FORMAT_ASTC_6x6_SRGB_BLOCK = 166, + VK_FORMAT_ASTC_8x5_UNORM_BLOCK = 167, + VK_FORMAT_ASTC_8x5_SRGB_BLOCK = 168, + VK_FORMAT_ASTC_8x6_UNORM_BLOCK = 169, + VK_FORMAT_ASTC_8x6_SRGB_BLOCK = 170, + VK_FORMAT_ASTC_8x8_UNORM_BLOCK = 171, + VK_FORMAT_ASTC_8x8_SRGB_BLOCK = 172, + VK_FORMAT_ASTC_10x5_UNORM_BLOCK = 173, + VK_FORMAT_ASTC_10x5_SRGB_BLOCK = 174, + VK_FORMAT_ASTC_10x6_UNORM_BLOCK = 175, + VK_FORMAT_ASTC_10x6_SRGB_BLOCK = 176, + VK_FORMAT_ASTC_10x8_UNORM_BLOCK = 177, + VK_FORMAT_ASTC_10x8_SRGB_BLOCK = 178, + VK_FORMAT_ASTC_10x10_UNORM_BLOCK = 179, + VK_FORMAT_ASTC_10x10_SRGB_BLOCK = 180, + VK_FORMAT_ASTC_12x10_UNORM_BLOCK = 181, + VK_FORMAT_ASTC_12x10_SRGB_BLOCK = 182, + VK_FORMAT_ASTC_12x12_UNORM_BLOCK = 183, + VK_FORMAT_ASTC_12x12_SRGB_BLOCK = 184, +} + +impl From for VkFormat { + fn from(n: u32) -> Self { + match n { + 0 => VK_FORMAT_UNDEFINED, + 1 => VK_FORMAT_R4G4_UNORM_PACK8, + 2 => VK_FORMAT_R4G4B4A4_UNORM_PACK16, + 3 => VK_FORMAT_B4G4R4A4_UNORM_PACK16, + 4 => VK_FORMAT_R5G6B5_UNORM_PACK16, + 5 => VK_FORMAT_B5G6R5_UNORM_PACK16, + 6 => VK_FORMAT_R5G5B5A1_UNORM_PACK16, + 7 => VK_FORMAT_B5G5R5A1_UNORM_PACK16, + 8 => VK_FORMAT_A1R5G5B5_UNORM_PACK16, + 9 => VK_FORMAT_R8_UNORM, + 10 => VK_FORMAT_R8_SNORM, + 11 => VK_FORMAT_R8_USCALED, + 12 => VK_FORMAT_R8_SSCALED, + 13 => VK_FORMAT_R8_UINT, + 14 => VK_FORMAT_R8_SINT, + 15 => VK_FORMAT_R8_SRGB, + 16 => VK_FORMAT_R8G8_UNORM, + 17 => VK_FORMAT_R8G8_SNORM, + 18 => VK_FORMAT_R8G8_USCALED, + 19 => VK_FORMAT_R8G8_SSCALED, + 20 => VK_FORMAT_R8G8_UINT, + 21 => VK_FORMAT_R8G8_SINT, + 22 => VK_FORMAT_R8G8_SRGB, + 23 => VK_FORMAT_R8G8B8_UNORM, + 24 => VK_FORMAT_R8G8B8_SNORM, + 25 => VK_FORMAT_R8G8B8_USCALED, + 26 => VK_FORMAT_R8G8B8_SSCALED, + 27 => VK_FORMAT_R8G8B8_UINT, + 28 => VK_FORMAT_R8G8B8_SINT, + 29 => VK_FORMAT_R8G8B8_SRGB, + 30 => VK_FORMAT_B8G8R8_UNORM, + 31 => VK_FORMAT_B8G8R8_SNORM, + 32 => VK_FORMAT_B8G8R8_USCALED, + 33 => VK_FORMAT_B8G8R8_SSCALED, + 34 => VK_FORMAT_B8G8R8_UINT, + 35 => VK_FORMAT_B8G8R8_SINT, + 36 => VK_FORMAT_B8G8R8_SRGB, + 37 => VK_FORMAT_R8G8B8A8_UNORM, + 38 => VK_FORMAT_R8G8B8A8_SNORM, + 39 => VK_FORMAT_R8G8B8A8_USCALED, + 40 => VK_FORMAT_R8G8B8A8_SSCALED, + 41 => VK_FORMAT_R8G8B8A8_UINT, + 42 => VK_FORMAT_R8G8B8A8_SINT, + 43 => VK_FORMAT_R8G8B8A8_SRGB, + 44 => VK_FORMAT_B8G8R8A8_UNORM, + 45 => VK_FORMAT_B8G8R8A8_SNORM, + 46 => VK_FORMAT_B8G8R8A8_USCALED, + 47 => VK_FORMAT_B8G8R8A8_SSCALED, + 48 => VK_FORMAT_B8G8R8A8_UINT, + 49 => VK_FORMAT_B8G8R8A8_SINT, + 50 => VK_FORMAT_B8G8R8A8_SRGB, + 51 => VK_FORMAT_A8B8G8R8_UNORM_PACK32, + 52 => VK_FORMAT_A8B8G8R8_SNORM_PACK32, + 53 => VK_FORMAT_A8B8G8R8_USCALED_PACK32, + 54 => VK_FORMAT_A8B8G8R8_SSCALED_PACK32, + 55 => VK_FORMAT_A8B8G8R8_UINT_PACK32, + 56 => VK_FORMAT_A8B8G8R8_SINT_PACK32, + 57 => VK_FORMAT_A8B8G8R8_SRGB_PACK32, + 58 => VK_FORMAT_A2R10G10B10_UNORM_PACK32, + 59 => VK_FORMAT_A2R10G10B10_SNORM_PACK32, + 60 => VK_FORMAT_A2R10G10B10_USCALED_PACK32, + 61 => VK_FORMAT_A2R10G10B10_SSCALED_PACK32, + 62 => VK_FORMAT_A2R10G10B10_UINT_PACK32, + 63 => VK_FORMAT_A2R10G10B10_SINT_PACK32, + 64 => VK_FORMAT_A2B10G10R10_UNORM_PACK32, + 65 => VK_FORMAT_A2B10G10R10_SNORM_PACK32, + 66 => VK_FORMAT_A2B10G10R10_USCALED_PACK32, + 67 => VK_FORMAT_A2B10G10R10_SSCALED_PACK32, + 68 => VK_FORMAT_A2B10G10R10_UINT_PACK32, + 69 => VK_FORMAT_A2B10G10R10_SINT_PACK32, + 70 => VK_FORMAT_R16_UNORM, + 71 => VK_FORMAT_R16_SNORM, + 72 => VK_FORMAT_R16_USCALED, + 73 => VK_FORMAT_R16_SSCALED, + 74 => VK_FORMAT_R16_UINT, + 75 => VK_FORMAT_R16_SINT, + 76 => VK_FORMAT_R16_SFLOAT, + 77 => VK_FORMAT_R16G16_UNORM, + 78 => VK_FORMAT_R16G16_SNORM, + 79 => VK_FORMAT_R16G16_USCALED, + 80 => VK_FORMAT_R16G16_SSCALED, + 81 => VK_FORMAT_R16G16_UINT, + 82 => VK_FORMAT_R16G16_SINT, + 83 => VK_FORMAT_R16G16_SFLOAT, + 84 => VK_FORMAT_R16G16B16_UNORM, + 85 => VK_FORMAT_R16G16B16_SNORM, + 86 => VK_FORMAT_R16G16B16_USCALED, + 87 => VK_FORMAT_R16G16B16_SSCALED, + 88 => VK_FORMAT_R16G16B16_UINT, + 89 => VK_FORMAT_R16G16B16_SINT, + 90 => VK_FORMAT_R16G16B16_SFLOAT, + 91 => VK_FORMAT_R16G16B16A16_UNORM, + 92 => VK_FORMAT_R16G16B16A16_SNORM, + 93 => VK_FORMAT_R16G16B16A16_USCALED, + 94 => VK_FORMAT_R16G16B16A16_SSCALED, + 95 => VK_FORMAT_R16G16B16A16_UINT, + 96 => VK_FORMAT_R16G16B16A16_SINT, + 97 => VK_FORMAT_R16G16B16A16_SFLOAT, + 98 => VK_FORMAT_R32_UINT, + 99 => VK_FORMAT_R32_SINT, + 100 => VK_FORMAT_R32_SFLOAT, + 101 => VK_FORMAT_R32G32_UINT, + 102 => VK_FORMAT_R32G32_SINT, + 103 => VK_FORMAT_R32G32_SFLOAT, + 104 => VK_FORMAT_R32G32B32_UINT, + 105 => VK_FORMAT_R32G32B32_SINT, + 106 => VK_FORMAT_R32G32B32_SFLOAT, + 107 => VK_FORMAT_R32G32B32A32_UINT, + 108 => VK_FORMAT_R32G32B32A32_SINT, + 109 => VK_FORMAT_R32G32B32A32_SFLOAT, + 110 => VK_FORMAT_R64_UINT, + 111 => VK_FORMAT_R64_SINT, + 112 => VK_FORMAT_R64_SFLOAT, + 113 => VK_FORMAT_R64G64_UINT, + 114 => VK_FORMAT_R64G64_SINT, + 115 => VK_FORMAT_R64G64_SFLOAT, + 116 => VK_FORMAT_R64G64B64_UINT, + 117 => VK_FORMAT_R64G64B64_SINT, + 118 => VK_FORMAT_R64G64B64_SFLOAT, + 119 => VK_FORMAT_R64G64B64A64_UINT, + 120 => VK_FORMAT_R64G64B64A64_SINT, + 121 => VK_FORMAT_R64G64B64A64_SFLOAT, + 122 => VK_FORMAT_B10G11R11_UFLOAT_PACK32, + 123 => VK_FORMAT_E5B9G9R9_UFLOAT_PACK32, + 124 => VK_FORMAT_D16_UNORM, + 125 => VK_FORMAT_X8_D24_UNORM_PACK32, + 126 => VK_FORMAT_D32_SFLOAT, + 127 => VK_FORMAT_S8_UINT, + 128 => VK_FORMAT_D16_UNORM_S8_UINT, + 129 => VK_FORMAT_D24_UNORM_S8_UINT, + 130 => VK_FORMAT_D32_SFLOAT_S8_UINT, + 131 => VK_FORMAT_BC1_RGB_UNORM_BLOCK, + 132 => VK_FORMAT_BC1_RGB_SRGB_BLOCK, + 133 => VK_FORMAT_BC1_RGBA_UNORM_BLOCK, + 134 => VK_FORMAT_BC1_RGBA_SRGB_BLOCK, + 135 => VK_FORMAT_BC2_UNORM_BLOCK, + 136 => VK_FORMAT_BC2_SRGB_BLOCK, + 137 => VK_FORMAT_BC3_UNORM_BLOCK, + 138 => VK_FORMAT_BC3_SRGB_BLOCK, + 139 => VK_FORMAT_BC4_UNORM_BLOCK, + 140 => VK_FORMAT_BC4_SNORM_BLOCK, + 141 => VK_FORMAT_BC5_UNORM_BLOCK, + 142 => VK_FORMAT_BC5_SNORM_BLOCK, + 143 => VK_FORMAT_BC6H_UFLOAT_BLOCK, + 144 => VK_FORMAT_BC6H_SFLOAT_BLOCK, + 145 => VK_FORMAT_BC7_UNORM_BLOCK, + 146 => VK_FORMAT_BC7_SRGB_BLOCK, + 147 => VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK, + 148 => VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK, + 149 => VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK, + 150 => VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK, + 151 => VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK, + 152 => VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK, + 153 => VK_FORMAT_EAC_R11_UNORM_BLOCK, + 154 => VK_FORMAT_EAC_R11_SNORM_BLOCK, + 155 => VK_FORMAT_EAC_R11G11_UNORM_BLOCK, + 156 => VK_FORMAT_EAC_R11G11_SNORM_BLOCK, + 157 => VK_FORMAT_ASTC_4x4_UNORM_BLOCK, + 158 => VK_FORMAT_ASTC_4x4_SRGB_BLOCK, + 159 => VK_FORMAT_ASTC_5x4_UNORM_BLOCK, + 160 => VK_FORMAT_ASTC_5x4_SRGB_BLOCK, + 161 => VK_FORMAT_ASTC_5x5_UNORM_BLOCK, + 162 => VK_FORMAT_ASTC_5x5_SRGB_BLOCK, + 163 => VK_FORMAT_ASTC_6x5_UNORM_BLOCK, + 164 => VK_FORMAT_ASTC_6x5_SRGB_BLOCK, + 165 => VK_FORMAT_ASTC_6x6_UNORM_BLOCK, + 166 => VK_FORMAT_ASTC_6x6_SRGB_BLOCK, + 167 => VK_FORMAT_ASTC_8x5_UNORM_BLOCK, + 168 => VK_FORMAT_ASTC_8x5_SRGB_BLOCK, + 169 => VK_FORMAT_ASTC_8x6_UNORM_BLOCK, + 170 => VK_FORMAT_ASTC_8x6_SRGB_BLOCK, + 171 => VK_FORMAT_ASTC_8x8_UNORM_BLOCK, + 172 => VK_FORMAT_ASTC_8x8_SRGB_BLOCK, + 173 => VK_FORMAT_ASTC_10x5_UNORM_BLOCK, + 174 => VK_FORMAT_ASTC_10x5_SRGB_BLOCK, + 175 => VK_FORMAT_ASTC_10x6_UNORM_BLOCK, + 176 => VK_FORMAT_ASTC_10x6_SRGB_BLOCK, + 177 => VK_FORMAT_ASTC_10x8_UNORM_BLOCK, + 178 => VK_FORMAT_ASTC_10x8_SRGB_BLOCK, + 179 => VK_FORMAT_ASTC_10x10_UNORM_BLOCK, + 180 => VK_FORMAT_ASTC_10x10_SRGB_BLOCK, + 181 => VK_FORMAT_ASTC_12x10_UNORM_BLOCK, + 182 => VK_FORMAT_ASTC_12x10_SRGB_BLOCK, + 183 => VK_FORMAT_ASTC_12x12_UNORM_BLOCK, + 184 => VK_FORMAT_ASTC_12x12_SRGB_BLOCK, + _ => VK_FORMAT_UNDEFINED, + } + } +} diff --git a/vulkan-sys/src/enums/formatfeatureflags.rs b/vulkan-sys/src/enums/formatfeatureflags.rs new file mode 100644 index 0000000..7a67e19 --- /dev/null +++ b/vulkan-sys/src/enums/formatfeatureflags.rs @@ -0,0 +1,30 @@ +pub use VkFormatFeatureFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkFormatFeatureFlags { + VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT = 0x0000_0001, + VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT = 0x0000_0002, + VK_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT = 0x0000_0004, + VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT = 0x0000_0008, + VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT = 0x0000_0010, + VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_ATOMIC_BIT = 0x0000_0020, + VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT = 0x0000_0040, + VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT = 0x0000_0080, + VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT = 0x0000_0100, + VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x0000_0200, + VK_FORMAT_FEATURE_BLIT_SRC_BIT = 0x0000_0400, + VK_FORMAT_FEATURE_BLIT_DST_BIT = 0x0000_0800, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT = 0x0000_1000, + VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR = 0x0000_4000, + VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR = 0x0000_8000, + VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT = 0x0002_0000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_LINEAR_FILTER_BIT = 0x0004_0000, + VK_FORMAT_FEATURE_SAMPLED_IMAGE_YCBCR_CONVERSION_SEPARATE_RECONSTRUCTION_FILTER_BIT = + 0x0008_0000, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkFormatFeatureFlagBits(u32); +SetupVkFlags!(VkFormatFeatureFlags, VkFormatFeatureFlagBits); diff --git a/vulkan-sys/src/enums/framebuffercreateflags.rs b/vulkan-sys/src/enums/framebuffercreateflags.rs new file mode 100644 index 0000000..a3c8e08 --- /dev/null +++ b/vulkan-sys/src/enums/framebuffercreateflags.rs @@ -0,0 +1,12 @@ +pub use VkFramebufferCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkFramebufferCreateFlags { + VK_FRAMEBUFFER_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkFramebufferCreateFlagBits(u32); +SetupVkFlags!(VkFramebufferCreateFlags, VkFramebufferCreateFlagBits); diff --git a/vulkan-sys/src/enums/frontface.rs b/vulkan-sys/src/enums/frontface.rs new file mode 100644 index 0000000..3cbb8d5 --- /dev/null +++ b/vulkan-sys/src/enums/frontface.rs @@ -0,0 +1,8 @@ +pub use VkFrontFace::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkFrontFace { + VK_FRONT_FACE_COUNTER_CLOCKWISE = 0, + VK_FRONT_FACE_CLOCKWISE = 1, +} diff --git a/vulkan-sys/src/enums/imageaspectflags.rs b/vulkan-sys/src/enums/imageaspectflags.rs new file mode 100644 index 0000000..9baec53 --- /dev/null +++ b/vulkan-sys/src/enums/imageaspectflags.rs @@ -0,0 +1,15 @@ +pub use VkImageAspectFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkImageAspectFlags { + VK_IMAGE_ASPECT_COLOR_BIT = 0x0000_0001, + VK_IMAGE_ASPECT_DEPTH_BIT = 0x0000_0002, + VK_IMAGE_ASPECT_STENCIL_BIT = 0x0000_0004, + VK_IMAGE_ASPECT_METADATA_BIT = 0x0000_0008, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash, Default)] +pub struct VkImageAspectFlagBits(u32); +SetupVkFlags!(VkImageAspectFlags, VkImageAspectFlagBits); diff --git a/vulkan-sys/src/enums/imagecreateflags.rs b/vulkan-sys/src/enums/imagecreateflags.rs new file mode 100644 index 0000000..759b879 --- /dev/null +++ b/vulkan-sys/src/enums/imagecreateflags.rs @@ -0,0 +1,26 @@ +pub use VkImageCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkImageCreateFlags { + VK_IMAGE_CREATE_SPARSE_BINDING_BIT = 0x0000_0001, + VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT = 0x0000_0002, + VK_IMAGE_CREATE_SPARSE_ALIASED_BIT = 0x0000_0004, + VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT = 0x0000_0008, + VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT = 0x0000_0010, + VK_IMAGE_CREATE_ALIAS_BIT = 0x0000_0400, + VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT = 0x0000_0040, + VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT = 0x0000_0020, + VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT = 0x0000_0080, + VK_IMAGE_CREATE_EXTENDED_USAGE_BIT = 0x0000_0100, + VK_IMAGE_CREATE_PROTECTED_BIT = 0x0000_0800, + VK_IMAGE_CREATE_DISJOINT_BIT = 0x0000_0200, + VK_IMAGE_CREATE_CORNER_SAMPLED_BIT_NV = 0x0000_2000, + VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT = 0x0000_1000, + VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT = 0x0000_4000, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkImageCreateFlagBits(u32); +SetupVkFlags!(VkImageCreateFlags, VkImageCreateFlagBits); diff --git a/vulkan-sys/src/enums/imagelayout.rs b/vulkan-sys/src/enums/imagelayout.rs new file mode 100644 index 0000000..3b58d50 --- /dev/null +++ b/vulkan-sys/src/enums/imagelayout.rs @@ -0,0 +1,22 @@ +pub use VkImageLayout::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkImageLayout { + VK_IMAGE_LAYOUT_UNDEFINED = 0, + VK_IMAGE_LAYOUT_GENERAL = 1, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL = 2, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL = 3, + VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL = 4, + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL = 5, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL = 6, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL = 7, + VK_IMAGE_LAYOUT_PREINITIALIZED = 8, + VK_IMAGE_LAYOUT_PRESENT_SRC_KHR = 1_000_001_002, +} + +impl Default for VkImageLayout { + fn default() -> Self { + VK_IMAGE_LAYOUT_UNDEFINED + } +} diff --git a/vulkan-sys/src/enums/imagetiling.rs b/vulkan-sys/src/enums/imagetiling.rs new file mode 100644 index 0000000..c538061 --- /dev/null +++ b/vulkan-sys/src/enums/imagetiling.rs @@ -0,0 +1,8 @@ +pub use VkImageTiling::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkImageTiling { + VK_IMAGE_TILING_OPTIMAL = 0, + VK_IMAGE_TILING_LINEAR = 1, +} diff --git a/vulkan-sys/src/enums/imagetype.rs b/vulkan-sys/src/enums/imagetype.rs new file mode 100644 index 0000000..82d2685 --- /dev/null +++ b/vulkan-sys/src/enums/imagetype.rs @@ -0,0 +1,9 @@ +pub use VkImageType::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkImageType { + VK_IMAGE_TYPE_1D = 0, + VK_IMAGE_TYPE_2D = 1, + VK_IMAGE_TYPE_3D = 2, +} diff --git a/vulkan-sys/src/enums/imageusageflags.rs b/vulkan-sys/src/enums/imageusageflags.rs new file mode 100644 index 0000000..3074522 --- /dev/null +++ b/vulkan-sys/src/enums/imageusageflags.rs @@ -0,0 +1,19 @@ +pub use VkImageUsageFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkImageUsageFlags { + VK_IMAGE_USAGE_TRANSFER_SRC_BIT = 0x0000_0001, + VK_IMAGE_USAGE_TRANSFER_DST_BIT = 0x0000_0002, + VK_IMAGE_USAGE_SAMPLED_BIT = 0x0000_0004, + VK_IMAGE_USAGE_STORAGE_BIT = 0x0000_0008, + VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT = 0x0000_0010, + VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT = 0x0000_0020, + VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT = 0x0000_0040, + VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT = 0x0000_0080, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash, Default)] +pub struct VkImageUsageFlagBits(u32); +SetupVkFlags!(VkImageUsageFlags, VkImageUsageFlagBits); diff --git a/vulkan-sys/src/enums/imageviewcreateflags.rs b/vulkan-sys/src/enums/imageviewcreateflags.rs new file mode 100644 index 0000000..b162e7c --- /dev/null +++ b/vulkan-sys/src/enums/imageviewcreateflags.rs @@ -0,0 +1,12 @@ +pub use VkImageViewCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkImageViewCreateFlags { + VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT = 0x0000_0001, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkImageViewCreateFlagBits(u32); +SetupVkFlags!(VkImageViewCreateFlags, VkImageViewCreateFlagBits); diff --git a/vulkan-sys/src/enums/imageviewtype.rs b/vulkan-sys/src/enums/imageviewtype.rs new file mode 100644 index 0000000..8ee39a4 --- /dev/null +++ b/vulkan-sys/src/enums/imageviewtype.rs @@ -0,0 +1,13 @@ +pub use VkImageViewType::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkImageViewType { + VK_IMAGE_VIEW_TYPE_1D = 0, + VK_IMAGE_VIEW_TYPE_2D = 1, + VK_IMAGE_VIEW_TYPE_3D = 2, + VK_IMAGE_VIEW_TYPE_CUBE = 3, + VK_IMAGE_VIEW_TYPE_1D_ARRAY = 4, + VK_IMAGE_VIEW_TYPE_2D_ARRAY = 5, + VK_IMAGE_VIEW_TYPE_CUBE_ARRAY = 6, +} diff --git a/vulkan-sys/src/enums/indextype.rs b/vulkan-sys/src/enums/indextype.rs new file mode 100644 index 0000000..8683540 --- /dev/null +++ b/vulkan-sys/src/enums/indextype.rs @@ -0,0 +1,10 @@ +pub use VkIndexType::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkIndexType { + VK_INDEX_TYPE_UINT16 = 0, + VK_INDEX_TYPE_UINT32 = 1, + VK_INDEX_TYPE_NONE_KHR = 1_000_165_000, + VK_INDEX_TYPE_UINT8_EXT = 1_000_265_000, +} diff --git a/vulkan-sys/src/enums/instancecreateflags.rs b/vulkan-sys/src/enums/instancecreateflags.rs new file mode 100644 index 0000000..fdc53fa --- /dev/null +++ b/vulkan-sys/src/enums/instancecreateflags.rs @@ -0,0 +1,12 @@ +pub use VkInstanceCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkInstanceCreateFlags { + VK_INSTANCE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkInstanceCreateFlagBits(u32); +SetupVkFlags!(VkInstanceCreateFlags, VkInstanceCreateFlagBits); diff --git a/vulkan-sys/src/enums/internalallocationtype.rs b/vulkan-sys/src/enums/internalallocationtype.rs new file mode 100644 index 0000000..24b8520 --- /dev/null +++ b/vulkan-sys/src/enums/internalallocationtype.rs @@ -0,0 +1,7 @@ +pub use VkInternalAllocationType::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkInternalAllocationType { + VK_INTERNAL_ALLOCATION_TYPE_EXECUTABLE = 0, +} diff --git a/vulkan-sys/src/enums/iossurfacecreateflagsmvk.rs b/vulkan-sys/src/enums/iossurfacecreateflagsmvk.rs new file mode 100644 index 0000000..149fbdc --- /dev/null +++ b/vulkan-sys/src/enums/iossurfacecreateflagsmvk.rs @@ -0,0 +1,12 @@ +pub use VkIOSSurfaceCreateFlagsMVK::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkIOSSurfaceCreateFlagsMVK { + VK_IOS_SURFACE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkIOSSurfaceCreateFlagBitsMVK(u32); +SetupVkFlags!(VkIOSSurfaceCreateFlagsMVK, VkIOSSurfaceCreateFlagBitsMVK); diff --git a/vulkan-sys/src/enums/khr/acceleration_structure_build_type.rs b/vulkan-sys/src/enums/khr/acceleration_structure_build_type.rs new file mode 100644 index 0000000..d5ef70e --- /dev/null +++ b/vulkan-sys/src/enums/khr/acceleration_structure_build_type.rs @@ -0,0 +1,9 @@ +pub use VkAccelerationStructureBuildTypeKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkAccelerationStructureBuildTypeKHR { + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_KHR = 0, + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR = 1, + VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_OR_DEVICE_KHR = 2, +} diff --git a/vulkan-sys/src/enums/khr/acceleration_structure_compatibility.rs b/vulkan-sys/src/enums/khr/acceleration_structure_compatibility.rs new file mode 100644 index 0000000..92d5a0f --- /dev/null +++ b/vulkan-sys/src/enums/khr/acceleration_structure_compatibility.rs @@ -0,0 +1,8 @@ +pub use VkAccelerationStructureCompatibilityKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkAccelerationStructureCompatibilityKHR { + VK_ACCELERATION_STRUCTURE_COMPATIBILITY_COMPATIBLE_KHR = 0, + VK_ACCELERATION_STRUCTURE_COMPATIBILITY_INCOMPATIBLE_KHR = 1, +} diff --git a/vulkan-sys/src/enums/khr/acceleration_structure_create_flags.rs b/vulkan-sys/src/enums/khr/acceleration_structure_create_flags.rs new file mode 100644 index 0000000..5350eb8 --- /dev/null +++ b/vulkan-sys/src/enums/khr/acceleration_structure_create_flags.rs @@ -0,0 +1,15 @@ +pub use VkAccelerationStructureCreateFlagsKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkAccelerationStructureCreateFlagsKHR { + VK_ACCELERATION_STRUCTURE_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_KHR = 0x0000_0001, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkAccelerationStructureCreateFlagBitsKHR(u32); +SetupVkFlags!( + VkAccelerationStructureCreateFlagsKHR, + VkAccelerationStructureCreateFlagBitsKHR +); diff --git a/vulkan-sys/src/enums/khr/acceleration_structure_memory_requirements_type.rs b/vulkan-sys/src/enums/khr/acceleration_structure_memory_requirements_type.rs new file mode 100644 index 0000000..aa52f0d --- /dev/null +++ b/vulkan-sys/src/enums/khr/acceleration_structure_memory_requirements_type.rs @@ -0,0 +1,9 @@ +pub use VkAccelerationStructureMemoryRequirementsTypeKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkAccelerationStructureMemoryRequirementsTypeKHR { + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_OBJECT_KHR = 0, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_BUILD_SCRATCH_KHR = 1, + VK_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_TYPE_UPDATE_SCRATCH_KHR = 2, +} diff --git a/vulkan-sys/src/enums/khr/acceleration_structure_type.rs b/vulkan-sys/src/enums/khr/acceleration_structure_type.rs new file mode 100644 index 0000000..02ef7d1 --- /dev/null +++ b/vulkan-sys/src/enums/khr/acceleration_structure_type.rs @@ -0,0 +1,9 @@ +pub use VkAccelerationStructureTypeKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkAccelerationStructureTypeKHR { + VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR = 0, + VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR = 1, + VK_ACCELERATION_STRUCTURE_TYPE_GENERIC_KHR = 3, +} diff --git a/vulkan-sys/src/enums/khr/build_acceleration_structure_flags.rs b/vulkan-sys/src/enums/khr/build_acceleration_structure_flags.rs new file mode 100644 index 0000000..60d3416 --- /dev/null +++ b/vulkan-sys/src/enums/khr/build_acceleration_structure_flags.rs @@ -0,0 +1,19 @@ +pub use VkBuildAccelerationStructureFlagsKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkBuildAccelerationStructureFlagsKHR { + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR = 0x0000_0001, + VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_COMPACTION_BIT_KHR = 0x0000_0002, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR = 0x0000_0004, + VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_BUILD_BIT_KHR = 0x0000_0008, + VK_BUILD_ACCELERATION_STRUCTURE_LOW_MEMORY_BIT_KHR = 0x0000_0010, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkBuildAccelerationStructureFlagBitsKHR(u32); +SetupVkFlags!( + VkBuildAccelerationStructureFlagsKHR, + VkBuildAccelerationStructureFlagBitsKHR +); diff --git a/vulkan-sys/src/enums/khr/build_acceleration_structure_mode.rs b/vulkan-sys/src/enums/khr/build_acceleration_structure_mode.rs new file mode 100644 index 0000000..81cb160 --- /dev/null +++ b/vulkan-sys/src/enums/khr/build_acceleration_structure_mode.rs @@ -0,0 +1,8 @@ +pub use VkBuildAccelerationStructureModeKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkBuildAccelerationStructureModeKHR { + VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR = 0, + VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR = 1, +} diff --git a/vulkan-sys/src/enums/khr/copy_acceleration_structure_mode.rs b/vulkan-sys/src/enums/khr/copy_acceleration_structure_mode.rs new file mode 100644 index 0000000..77492e8 --- /dev/null +++ b/vulkan-sys/src/enums/khr/copy_acceleration_structure_mode.rs @@ -0,0 +1,10 @@ +pub use VkCopyAccelerationStructureModeKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkCopyAccelerationStructureModeKHR { + VK_COPY_ACCELERATION_STRUCTURE_MODE_CLONE_KHR = 0, + VK_COPY_ACCELERATION_STRUCTURE_MODE_COMPACT_KHR = 1, + VK_COPY_ACCELERATION_STRUCTURE_MODE_SERIALIZE_KHR = 2, + VK_COPY_ACCELERATION_STRUCTURE_MODE_DESERIALIZE_KHR = 3, +} diff --git a/vulkan-sys/src/enums/khr/geometry_flags.rs b/vulkan-sys/src/enums/khr/geometry_flags.rs new file mode 100644 index 0000000..5ccbd4a --- /dev/null +++ b/vulkan-sys/src/enums/khr/geometry_flags.rs @@ -0,0 +1,13 @@ +pub use VkGeometryFlagsKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkGeometryFlagsKHR { + VK_GEOMETRY_OPAQUE_BIT_KHR = 0x0000_0001, + VK_GEOMETRY_NO_DUPLICATE_ANY_HIT_INVOCATION_BIT_KHR = 0x0000_0002, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkGeometryFlagBitsKHR(u32); +SetupVkFlags!(VkGeometryFlagsKHR, VkGeometryFlagBitsKHR); diff --git a/vulkan-sys/src/enums/khr/geometry_instance_flags.rs b/vulkan-sys/src/enums/khr/geometry_instance_flags.rs new file mode 100644 index 0000000..2d824d1 --- /dev/null +++ b/vulkan-sys/src/enums/khr/geometry_instance_flags.rs @@ -0,0 +1,15 @@ +pub use VkGeometryInstanceFlagsKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkGeometryInstanceFlagsKHR { + VK_GEOMETRY_INSTANCE_TRIANGLE_FACING_CULL_DISABLE_BIT_KHR = 0x0000_0001, + VK_GEOMETRY_INSTANCE_TRIANGLE_FRONT_COUNTERCLOCKWISE_BIT_KHR = 0x0000_0002, + VK_GEOMETRY_INSTANCE_FORCE_OPAQUE_BIT_KHR = 0x0000_0004, + VK_GEOMETRY_INSTANCE_FORCE_NO_OPAQUE_BIT_KHR = 0x0000_0008, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkGeometryInstanceFlagBitsKHR(u32); +SetupVkFlags!(VkGeometryInstanceFlagsKHR, VkGeometryInstanceFlagBitsKHR); diff --git a/vulkan-sys/src/enums/khr/geometry_type.rs b/vulkan-sys/src/enums/khr/geometry_type.rs new file mode 100644 index 0000000..a03f764 --- /dev/null +++ b/vulkan-sys/src/enums/khr/geometry_type.rs @@ -0,0 +1,9 @@ +pub use VkGeometryTypeKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkGeometryTypeKHR { + VK_GEOMETRY_TYPE_TRIANGLES_KHR = 0, + VK_GEOMETRY_TYPE_AABBS_KHR = 1, + VK_GEOMETRY_TYPE_INSTANCES_KHR = 2, +} diff --git a/vulkan-sys/src/enums/khr/mod.rs b/vulkan-sys/src/enums/khr/mod.rs new file mode 100644 index 0000000..ee76a58 --- /dev/null +++ b/vulkan-sys/src/enums/khr/mod.rs @@ -0,0 +1,15 @@ +pub mod acceleration_structure_build_type; +pub mod acceleration_structure_compatibility; +pub mod acceleration_structure_create_flags; +pub mod acceleration_structure_memory_requirements_type; +pub mod acceleration_structure_type; +pub mod build_acceleration_structure_flags; +pub mod build_acceleration_structure_mode; +pub mod copy_acceleration_structure_mode; +pub mod geometry_flags; +pub mod geometry_instance_flags; +pub mod geometry_type; +pub mod ray_tracing_shader_group_type; +pub mod shader_group_shader; + +pub mod prelude; diff --git a/vulkan-sys/src/enums/khr/prelude.rs b/vulkan-sys/src/enums/khr/prelude.rs new file mode 100644 index 0000000..0943e6c --- /dev/null +++ b/vulkan-sys/src/enums/khr/prelude.rs @@ -0,0 +1,13 @@ +pub use super::acceleration_structure_build_type::*; +pub use super::acceleration_structure_compatibility::*; +pub use super::acceleration_structure_create_flags::*; +pub use super::acceleration_structure_memory_requirements_type::*; +pub use super::acceleration_structure_type::*; +pub use super::build_acceleration_structure_flags::*; +pub use super::build_acceleration_structure_mode::*; +pub use super::copy_acceleration_structure_mode::*; +pub use super::geometry_flags::*; +pub use super::geometry_instance_flags::*; +pub use super::geometry_type::*; +pub use super::ray_tracing_shader_group_type::*; +pub use super::shader_group_shader::*; diff --git a/vulkan-sys/src/enums/khr/ray_tracing_shader_group_type.rs b/vulkan-sys/src/enums/khr/ray_tracing_shader_group_type.rs new file mode 100644 index 0000000..b117ea9 --- /dev/null +++ b/vulkan-sys/src/enums/khr/ray_tracing_shader_group_type.rs @@ -0,0 +1,9 @@ +pub use VkRayTracingShaderGroupTypeKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkRayTracingShaderGroupTypeKHR { + VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR = 0, + VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR = 1, + VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR = 2, +} diff --git a/vulkan-sys/src/enums/khr/shader_group_shader.rs b/vulkan-sys/src/enums/khr/shader_group_shader.rs new file mode 100644 index 0000000..01cc65f --- /dev/null +++ b/vulkan-sys/src/enums/khr/shader_group_shader.rs @@ -0,0 +1,10 @@ +pub use VkShaderGroupShaderKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkShaderGroupShaderKHR { + VK_SHADER_GROUP_SHADER_GENERAL_KHR = 0, + VK_SHADER_GROUP_SHADER_CLOSEST_HIT_KHR = 1, + VK_SHADER_GROUP_SHADER_ANY_HIT_KHR = 2, + VK_SHADER_GROUP_SHADER_INTERSECTION_KHR = 3, +} diff --git a/vulkan-sys/src/enums/logicop.rs b/vulkan-sys/src/enums/logicop.rs new file mode 100644 index 0000000..7e434f8 --- /dev/null +++ b/vulkan-sys/src/enums/logicop.rs @@ -0,0 +1,22 @@ +pub use VkLogicOp::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkLogicOp { + VK_LOGIC_OP_CLEAR = 0, + VK_LOGIC_OP_AND = 1, + VK_LOGIC_OP_AND_REVERSE = 2, + VK_LOGIC_OP_COPY = 3, + VK_LOGIC_OP_AND_INVERTED = 4, + VK_LOGIC_OP_NO_OP = 5, + VK_LOGIC_OP_XOR = 6, + VK_LOGIC_OP_OR = 7, + VK_LOGIC_OP_NOR = 8, + VK_LOGIC_OP_EQUIVALENT = 9, + VK_LOGIC_OP_INVERT = 10, + VK_LOGIC_OP_OR_REVERSE = 11, + VK_LOGIC_OP_COPY_INVERTED = 12, + VK_LOGIC_OP_OR_INVERTED = 13, + VK_LOGIC_OP_NAND = 14, + VK_LOGIC_OP_SET = 15, +} diff --git a/vulkan-sys/src/enums/macossurfacecreateflagsmvk.rs b/vulkan-sys/src/enums/macossurfacecreateflagsmvk.rs new file mode 100644 index 0000000..5ffa407 --- /dev/null +++ b/vulkan-sys/src/enums/macossurfacecreateflagsmvk.rs @@ -0,0 +1,15 @@ +pub use VkMacOSSurfaceCreateFlagsMVK::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkMacOSSurfaceCreateFlagsMVK { + VK_MACOS_SURFACE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkMacOSSurfaceCreateFlagBitsMVK(u32); +SetupVkFlags!( + VkMacOSSurfaceCreateFlagsMVK, + VkMacOSSurfaceCreateFlagBitsMVK +); diff --git a/vulkan-sys/src/enums/macros.rs b/vulkan-sys/src/enums/macros.rs new file mode 100644 index 0000000..7aff186 --- /dev/null +++ b/vulkan-sys/src/enums/macros.rs @@ -0,0 +1,255 @@ +macro_rules! SetupVkFlags { + ($flags: ident, $bits: ident) => { + use std::cmp::Ordering; + use std::cmp::PartialEq; + use std::convert::From; + use std::fmt; + use std::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign}; + + impl fmt::Debug for $bits { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}(0x{:08x?})", stringify!($bits), self.0) + } + } + + impl BitAnd for $bits { + type Output = Self; + + fn bitand(self, rhs: Self) -> Self { + $bits(self.0 & rhs.0) + } + } + + impl BitAndAssign for $bits { + fn bitand_assign(&mut self, rhs: Self) { + *self = $bits(self.0 & rhs.0) + } + } + + impl BitOr for $bits { + type Output = Self; + + fn bitor(self, rhs: Self) -> Self { + $bits(self.0 | rhs.0) + } + } + + impl BitOrAssign for $bits { + fn bitor_assign(&mut self, rhs: Self) { + *self = $bits(self.0 | rhs.0) + } + } + + impl BitXor for $bits { + type Output = Self; + + fn bitxor(self, rhs: Self) -> Self { + $bits(self.0 ^ rhs.0) + } + } + + impl BitXorAssign for $bits { + fn bitxor_assign(&mut self, rhs: Self) { + *self = $bits(self.0 ^ rhs.0) + } + } + + impl PartialEq for $bits { + fn eq(&self, rhs: &u8) -> bool { + self.0 == *rhs as u32 + } + } + + impl PartialEq for $bits { + fn eq(&self, rhs: &u16) -> bool { + self.0 == *rhs as u32 + } + } + + impl PartialEq for $bits { + fn eq(&self, rhs: &u32) -> bool { + self.0 == *rhs + } + } + + impl PartialEq for $bits { + fn eq(&self, rhs: &i32) -> bool { + self.0 as i32 == *rhs + } + } + + impl PartialEq for $bits { + fn eq(&self, rhs: &u64) -> bool { + self.0 as u64 == *rhs + } + } + + impl PartialEq<$flags> for $bits { + fn eq(&self, rhs: &$flags) -> bool { + self.0 as u32 == *rhs as u32 + } + } + + impl BitOr<$flags> for $bits { + type Output = $bits; + + fn bitor(self, rhs: $flags) -> $bits { + $bits(self.0 | rhs as u32) + } + } + + impl BitOr<$bits> for $flags { + type Output = $bits; + + fn bitor(self, rhs: $bits) -> $bits { + $bits(self as u32 | rhs.0) + } + } + + impl BitOr<$flags> for $flags { + type Output = $bits; + + fn bitor(self, rhs: $flags) -> $bits { + $bits(self as u32 | rhs as u32) + } + } + + impl BitOr for $bits { + type Output = u32; + + fn bitor(self, rhs: u32) -> u32 { + self.0 | rhs + } + } + + impl BitOrAssign<$flags> for $bits { + fn bitor_assign(&mut self, rhs: $flags) { + *self = $bits(self.0 | rhs as u32) + } + } + + impl BitAnd<$flags> for $bits { + type Output = $bits; + + fn bitand(self, rhs: $flags) -> $bits { + $bits(self.0 & rhs as u32) + } + } + + impl BitAnd<$bits> for $flags { + type Output = $bits; + + fn bitand(self, rhs: $bits) -> $bits { + $bits(self as u32 & rhs.0) + } + } + + impl BitAnd<$flags> for $flags { + type Output = $bits; + + fn bitand(self, rhs: $flags) -> $bits { + $bits(self as u32 & rhs as u32) + } + } + + impl BitAnd for $bits { + type Output = u32; + + fn bitand(self, rhs: u32) -> u32 { + self.0 & rhs + } + } + + impl BitAndAssign<$flags> for $bits { + fn bitand_assign(&mut self, rhs: $flags) { + *self = $bits(self.0 & rhs as u32) + } + } + + impl BitXor<$flags> for $bits { + type Output = $bits; + + fn bitxor(self, rhs: $flags) -> $bits { + $bits(self.0 ^ rhs as u32) + } + } + + impl BitXor<$bits> for $flags { + type Output = $bits; + + fn bitxor(self, rhs: $bits) -> $bits { + $bits(self as u32 ^ rhs.0) + } + } + + impl BitXor<$flags> for $flags { + type Output = $bits; + + fn bitxor(self, rhs: $flags) -> $bits { + $bits(self as u32 ^ rhs as u32) + } + } + + impl BitXor for $bits { + type Output = u32; + + fn bitxor(self, rhs: u32) -> u32 { + self.0 ^ rhs + } + } + + impl BitXorAssign<$flags> for $bits { + fn bitxor_assign(&mut self, rhs: $flags) { + *self = $bits(self.0 ^ rhs as u32) + } + } + + impl Into for $bits { + fn into(self) -> u32 { + self.0 as u32 + } + } + + impl From for $bits { + fn from(n: u32) -> $bits { + $bits(n) + } + } + + impl Into for $flags { + fn into(self) -> u32 { + self as u32 + } + } + + impl From<$flags> for $bits { + fn from(flags: $flags) -> $bits { + $bits(flags as u32) + } + } + + impl PartialOrd for $flags { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + impl Ord for $flags { + fn cmp(&self, other: &Self) -> Ordering { + (*self as u32).cmp(&(*other as u32)) + } + } + + impl PartialOrd for $bits { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + impl Ord for $bits { + fn cmp(&self, other: &Self) -> Ordering { + self.0.cmp(&other.0) + } + } + }; +} diff --git a/vulkan-sys/src/enums/memoryheapflags.rs b/vulkan-sys/src/enums/memoryheapflags.rs new file mode 100644 index 0000000..b5692a3 --- /dev/null +++ b/vulkan-sys/src/enums/memoryheapflags.rs @@ -0,0 +1,12 @@ +pub use VkMemoryHeapFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkMemoryHeapFlags { + VK_MEMORY_HEAP_DEVICE_LOCAL_BIT = 0x0000_0001, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash, Default)] +pub struct VkMemoryHeapFlagBits(u32); +SetupVkFlags!(VkMemoryHeapFlags, VkMemoryHeapFlagBits); diff --git a/vulkan-sys/src/enums/memorymapflags.rs b/vulkan-sys/src/enums/memorymapflags.rs new file mode 100644 index 0000000..7849770 --- /dev/null +++ b/vulkan-sys/src/enums/memorymapflags.rs @@ -0,0 +1,12 @@ +pub use VkMemoryMapFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkMemoryMapFlags { + VK_MEMORY_MAP_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkMemoryMapFlagBits(u32); +SetupVkFlags!(VkMemoryMapFlags, VkMemoryMapFlagBits); diff --git a/vulkan-sys/src/enums/memorypropertyflags.rs b/vulkan-sys/src/enums/memorypropertyflags.rs new file mode 100644 index 0000000..602d61a --- /dev/null +++ b/vulkan-sys/src/enums/memorypropertyflags.rs @@ -0,0 +1,19 @@ +pub use VkMemoryPropertyFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkMemoryPropertyFlags { + VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT = 0x0000_0001, + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT = 0x0000_0002, + VK_MEMORY_PROPERTY_HOST_COHERENT_BIT = 0x0000_0004, + VK_MEMORY_PROPERTY_HOST_CACHED_BIT = 0x0000_0008, + VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT = 0x0000_0010, + VK_MEMORY_PROPERTY_PROTECTED_BIT = 0x0000_0020, + VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD = 0x0000_0040, + VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD = 0x0000_0080, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash, Default)] +pub struct VkMemoryPropertyFlagBits(u32); +SetupVkFlags!(VkMemoryPropertyFlags, VkMemoryPropertyFlagBits); diff --git a/vulkan-sys/src/enums/mirsurfacecreateflagskhr.rs b/vulkan-sys/src/enums/mirsurfacecreateflagskhr.rs new file mode 100644 index 0000000..f48cf9e --- /dev/null +++ b/vulkan-sys/src/enums/mirsurfacecreateflagskhr.rs @@ -0,0 +1,12 @@ +pub use VkMirSurfaceCreateFlagsKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkMirSurfaceCreateFlagsKHR { + VK_MIR_SURFACE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkMirSurfaceCreateFlagBitsKHR(u32); +SetupVkFlags!(VkMirSurfaceCreateFlagsKHR, VkMirSurfaceCreateFlagBitsKHR); diff --git a/vulkan-sys/src/enums/mod.rs b/vulkan-sys/src/enums/mod.rs new file mode 100644 index 0000000..041d9a0 --- /dev/null +++ b/vulkan-sys/src/enums/mod.rs @@ -0,0 +1,129 @@ +#[macro_use] +mod macros; + +pub mod accessflags; +pub mod androidsurfacecreateflagskhr; +pub mod attachmentdescriptionflags; +pub mod attachmentloadop; +pub mod attachmentstoreop; +pub mod blendfactor; +pub mod blendop; +pub mod bool32; +pub mod bordercolor; +pub mod buffercreateflags; +pub mod bufferusageflags; +pub mod bufferviewcreateflags; +pub mod colorcomponentflags; +pub mod colorspacekhr; +pub mod commandbufferlevel; +pub mod commandbufferresetflags; +pub mod commandbufferusageflags; +pub mod commandpoolcreateflags; +pub mod commandpoolresetflags; +pub mod commandpooltrimflags; +pub mod compareop; +pub mod componentswizzle; +pub mod compositealphaflagskhr; +pub mod cullmodeflags; +pub mod debugreporterrorext; +pub mod debugreportflagsext; +pub mod debugreportobjecttypeext; +pub mod debugutilsmessageseverityflagsext; +pub mod debugutilsmessagetypeflagsext; +pub mod debugutilsmessengercallbackdataflagsext; +pub mod debugutilsmessengercreateflags; +pub mod dependencyflags; +pub mod descriptorpoolcreateflags; +pub mod descriptorpoolresetflags; +pub mod descriptorsetlayoutcreateflags; +pub mod descriptortype; +pub mod devicecreateflags; +pub mod devicequeuecreateflags; +pub mod displaymodecreateflagskhr; +pub mod displayplanealphaflagskhr; +pub mod displaysurfacecreateflagskhr; +pub mod dynamicstate; +pub mod eventcreateflags; +pub mod externalmemoryhandletypeflags; +pub mod fencecreateflags; +pub mod filter; +pub mod format; +pub mod formatfeatureflags; +pub mod framebuffercreateflags; +pub mod frontface; +pub mod imageaspectflags; +pub mod imagecreateflags; +pub mod imagelayout; +pub mod imagetiling; +pub mod imagetype; +pub mod imageusageflags; +pub mod imageviewcreateflags; +pub mod imageviewtype; +pub mod indextype; +pub mod instancecreateflags; +pub mod internalallocationtype; +pub mod iossurfacecreateflagsmvk; +pub mod logicop; +pub mod macossurfacecreateflagsmvk; +pub mod memoryheapflags; +pub mod memorymapflags; +pub mod memorypropertyflags; +pub mod mirsurfacecreateflagskhr; +pub mod objecttype; +pub mod physicaldevicetype; +pub mod pipelinebindpoint; +pub mod pipelinecachecreateflags; +pub mod pipelinecacheheaderversion; +pub mod pipelinecolorblendstatecreateflags; +pub mod pipelinecreateflags; +pub mod pipelinedepthstencilstatecreateflags; +pub mod pipelinedynamicstatecreateflags; +pub mod pipelineinputassemblystatecreateflags; +pub mod pipelinelayoutcreateflags; +pub mod pipelinemultisamplestatecreateflags; +pub mod pipelinerasterizationstatecreateflags; +pub mod pipelineshaderstagecreateflags; +pub mod pipelinestageflags; +pub mod pipelinetesselationstatecreateflags; +pub mod pipelinevertexinputstatecreateflags; +pub mod pipelineviewportstatecreateflags; +pub mod polygonmode; +pub mod presentmodekhr; +pub mod primitivetopology; +pub mod querycontrolflags; +pub mod querypipelinestatisticsflags; +pub mod querypoolcreateflags; +pub mod queryresultflags; +pub mod querytype; +pub mod queueflags; +pub mod renderpasscreateflags; +pub mod result; +pub mod samplecountflags; +pub mod sampleraddressmode; +pub mod samplercreateflags; +pub mod samplermipmapmode; +pub mod semaphorecreateflags; +pub mod shadermodulecreateflags; +pub mod shaderstageflags; +pub mod sharingmode; +pub mod sparseimageformatflags; +pub mod sparsememorybindflags; +pub mod stencilfaceflags; +pub mod stencilop; +pub mod structuretype; +pub mod subpasscontents; +pub mod subpassdescriptionflags; +pub mod surfacetransformflagskhr; +pub mod swapchaincreateflagskhr; +pub mod systemallocationscope; +pub mod vertexinputrate; +pub mod waylandsurfacecreateflagskhr; +pub mod win32surfacecreateflagskhr; +pub mod xcbsurfacecreateflagskhr; +pub mod xlibsurfacecreateflagskhr; + +pub mod amd; +pub mod ext; +pub mod khr; + +pub mod prelude; diff --git a/vulkan-sys/src/enums/objecttype.rs b/vulkan-sys/src/enums/objecttype.rs new file mode 100644 index 0000000..b51c01b --- /dev/null +++ b/vulkan-sys/src/enums/objecttype.rs @@ -0,0 +1,45 @@ +pub use VkObjectType::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkObjectType { + VK_OBJECT_TYPE_UNKNOWN = 0, + VK_OBJECT_TYPE_INSTANCE = 1, + VK_OBJECT_TYPE_PHYSICAL_DEVICE = 2, + VK_OBJECT_TYPE_DEVICE = 3, + VK_OBJECT_TYPE_QUEUE = 4, + VK_OBJECT_TYPE_SEMAPHORE = 5, + VK_OBJECT_TYPE_COMMAND_BUFFER = 6, + VK_OBJECT_TYPE_FENCE = 7, + VK_OBJECT_TYPE_DEVICE_MEMORY = 8, + VK_OBJECT_TYPE_BUFFER = 9, + VK_OBJECT_TYPE_IMAGE = 10, + VK_OBJECT_TYPE_EVENT = 11, + VK_OBJECT_TYPE_QUERY_POOL = 12, + VK_OBJECT_TYPE_BUFFER_VIEW = 13, + VK_OBJECT_TYPE_IMAGE_VIEW = 14, + VK_OBJECT_TYPE_SHADER_MODULE = 15, + VK_OBJECT_TYPE_PIPELINE_CACHE = 16, + VK_OBJECT_TYPE_PIPELINE_LAYOUT = 17, + VK_OBJECT_TYPE_RENDER_PASS = 18, + VK_OBJECT_TYPE_PIPELINE = 19, + VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT = 20, + VK_OBJECT_TYPE_SAMPLER = 21, + VK_OBJECT_TYPE_DESCRIPTOR_POOL = 22, + VK_OBJECT_TYPE_DESCRIPTOR_SET = 23, + VK_OBJECT_TYPE_FRAMEBUFFER = 24, + VK_OBJECT_TYPE_COMMAND_POOL = 25, + VK_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION = 1_000_156_000, + VK_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE = 1_000_085_000, + VK_OBJECT_TYPE_SURFACE_KHR = 1_000_000_000, + VK_OBJECT_TYPE_SWAPCHAIN_KHR = 1_000_001_000, + VK_OBJECT_TYPE_DISPLAY_KHR = 1_000_002_000, + VK_OBJECT_TYPE_DISPLAY_MODE_KHR = 1_000_002_001, + VK_OBJECT_TYPE_DEBUG_REPORT_CALLBACK_EXT = 1_000_011_000, + VK_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT = 1_000_128_000, + VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR = 1_000_165_000, + VK_OBJECT_TYPE_VALIDATION_CACHE_EXT = 1_000_160_000, + VK_OBJECT_TYPE_PERFORMANCE_CONFIGURATION_INTEL = 1_000_210_000, + VK_OBJECT_TYPE_DEFERRED_OPERATION_KHR = 1_000_268_000, + VK_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NV = 1_000_277_000, +} diff --git a/vulkan-sys/src/enums/physicaldevicetype.rs b/vulkan-sys/src/enums/physicaldevicetype.rs new file mode 100644 index 0000000..f21c603 --- /dev/null +++ b/vulkan-sys/src/enums/physicaldevicetype.rs @@ -0,0 +1,11 @@ +pub use VkPhysicalDeviceType::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkPhysicalDeviceType { + VK_PHYSICAL_DEVICE_TYPE_OTHER = 0, + VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU = 1, + VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU = 2, + VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU = 3, + VK_PHYSICAL_DEVICE_TYPE_CPU = 4, +} diff --git a/vulkan-sys/src/enums/pipelinebindpoint.rs b/vulkan-sys/src/enums/pipelinebindpoint.rs new file mode 100644 index 0000000..42c97c8 --- /dev/null +++ b/vulkan-sys/src/enums/pipelinebindpoint.rs @@ -0,0 +1,9 @@ +pub use VkPipelineBindPoint::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkPipelineBindPoint { + VK_PIPELINE_BIND_POINT_GRAPHICS = 0, + VK_PIPELINE_BIND_POINT_COMPUTE = 1, + VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR = 1_000_165_000, +} diff --git a/vulkan-sys/src/enums/pipelinecachecreateflags.rs b/vulkan-sys/src/enums/pipelinecachecreateflags.rs new file mode 100644 index 0000000..27e0dbb --- /dev/null +++ b/vulkan-sys/src/enums/pipelinecachecreateflags.rs @@ -0,0 +1,12 @@ +pub use VkPipelineCacheCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkPipelineCacheCreateFlags { + VK_PIPELINE_CACHE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkPipelineCacheCreateFlagBits(u32); +SetupVkFlags!(VkPipelineCacheCreateFlags, VkPipelineCacheCreateFlagBits); diff --git a/vulkan-sys/src/enums/pipelinecacheheaderversion.rs b/vulkan-sys/src/enums/pipelinecacheheaderversion.rs new file mode 100644 index 0000000..25c4236 --- /dev/null +++ b/vulkan-sys/src/enums/pipelinecacheheaderversion.rs @@ -0,0 +1,7 @@ +pub use VkPipelineCacheHeaderVersion::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkPipelineCacheHeaderVersion { + VK_PIPELINE_CACHE_HEADER_VERSION_ONE = 1, +} diff --git a/vulkan-sys/src/enums/pipelinecolorblendstatecreateflags.rs b/vulkan-sys/src/enums/pipelinecolorblendstatecreateflags.rs new file mode 100644 index 0000000..230c72e --- /dev/null +++ b/vulkan-sys/src/enums/pipelinecolorblendstatecreateflags.rs @@ -0,0 +1,15 @@ +pub use VkPipelineColorBlendStateCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkPipelineColorBlendStateCreateFlags { + VK_PIPELINE_COLOR_BLEND_STATE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkPipelineColorBlendStateCreateFlagBits(u32); +SetupVkFlags!( + VkPipelineColorBlendStateCreateFlags, + VkPipelineColorBlendStateCreateFlagBits +); diff --git a/vulkan-sys/src/enums/pipelinecreateflags.rs b/vulkan-sys/src/enums/pipelinecreateflags.rs new file mode 100644 index 0000000..b7daec7 --- /dev/null +++ b/vulkan-sys/src/enums/pipelinecreateflags.rs @@ -0,0 +1,30 @@ +pub use VkPipelineCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkPipelineCreateFlags { + VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT = 0x0000_0001, + VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT = 0x0000_0002, + VK_PIPELINE_CREATE_DERIVATIVE_BIT = 0x0000_0004, + VK_PIPELINE_CREATE_VIEW_INDEX_FROM_DEVICE_INDEX_BIT = 0x0000_0008, + VK_PIPELINE_CREATE_DISPATCH_BASE_BIT = 0x0000_0010, + VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV = 0x0000_0020, + VK_PIPELINE_CREATE_CAPTURE_STATISTICS_BIT_KHR = 0x0000_0040, + VK_PIPELINE_CREATE_CAPTURE_INTERNAL_REPRESENTATIONS_BIT_KHR = 0x0000_0080, + VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT = 0x0000_0100, + VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT = 0x0000_0200, + VK_PIPELINE_CREATE_LIBRARY_BIT_KHR = 0x0000_0800, + VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR = 0x0000_1000, + VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR = 0x0000_2000, + VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR = 0x0000_4000, + VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR = 0x0000_8000, + VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR = 0x0001_0000, + VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR = 0x0002_0000, + VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR = 0x0008_0000, + VK_PIPELINE_CREATE_INDIRECT_BINDABLE_BIT_NV = 0x0004_0000, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkPipelineCreateFlagBits(u32); +SetupVkFlags!(VkPipelineCreateFlags, VkPipelineCreateFlagBits); diff --git a/vulkan-sys/src/enums/pipelinedepthstencilstatecreateflags.rs b/vulkan-sys/src/enums/pipelinedepthstencilstatecreateflags.rs new file mode 100644 index 0000000..9796105 --- /dev/null +++ b/vulkan-sys/src/enums/pipelinedepthstencilstatecreateflags.rs @@ -0,0 +1,15 @@ +pub use VkPipelineDepthStencilStateCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkPipelineDepthStencilStateCreateFlags { + VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkPipelineDepthStencilStateCreateFlagBits(u32); +SetupVkFlags!( + VkPipelineDepthStencilStateCreateFlags, + VkPipelineDepthStencilStateCreateFlagBits +); diff --git a/vulkan-sys/src/enums/pipelinedynamicstatecreateflags.rs b/vulkan-sys/src/enums/pipelinedynamicstatecreateflags.rs new file mode 100644 index 0000000..c51fa94 --- /dev/null +++ b/vulkan-sys/src/enums/pipelinedynamicstatecreateflags.rs @@ -0,0 +1,15 @@ +pub use VkPipelineDynamicStateCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkPipelineDynamicStateCreateFlags { + VK_PIPELINE_DYNAMIC_STATE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkPipelineDynamicStateCreateFlagBits(u32); +SetupVkFlags!( + VkPipelineDynamicStateCreateFlags, + VkPipelineDynamicStateCreateFlagBits +); diff --git a/vulkan-sys/src/enums/pipelineinputassemblystatecreateflags.rs b/vulkan-sys/src/enums/pipelineinputassemblystatecreateflags.rs new file mode 100644 index 0000000..5ee6763 --- /dev/null +++ b/vulkan-sys/src/enums/pipelineinputassemblystatecreateflags.rs @@ -0,0 +1,15 @@ +pub use VkPipelineInputAssemblyStateCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkPipelineInputAssemblyStateCreateFlags { + VK_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkPipelineInputAssemblyStateCreateFlagBits(u32); +SetupVkFlags!( + VkPipelineInputAssemblyStateCreateFlags, + VkPipelineInputAssemblyStateCreateFlagBits +); diff --git a/vulkan-sys/src/enums/pipelinelayoutcreateflags.rs b/vulkan-sys/src/enums/pipelinelayoutcreateflags.rs new file mode 100644 index 0000000..16a1796 --- /dev/null +++ b/vulkan-sys/src/enums/pipelinelayoutcreateflags.rs @@ -0,0 +1,12 @@ +pub use VkPipelineLayoutCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkPipelineLayoutCreateFlags { + VK_PIPELINE_LAYOUT_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkPipelineLayoutCreateFlagBits(u32); +SetupVkFlags!(VkPipelineLayoutCreateFlags, VkPipelineLayoutCreateFlagBits); diff --git a/vulkan-sys/src/enums/pipelinemultisamplestatecreateflags.rs b/vulkan-sys/src/enums/pipelinemultisamplestatecreateflags.rs new file mode 100644 index 0000000..9e63351 --- /dev/null +++ b/vulkan-sys/src/enums/pipelinemultisamplestatecreateflags.rs @@ -0,0 +1,15 @@ +pub use VkPipelineMultisampleStateCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkPipelineMultisampleStateCreateFlags { + VK_PIPELINE_MULTISAMPLE_STATE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkPipelineMultisampleStateCreateFlagBits(u32); +SetupVkFlags!( + VkPipelineMultisampleStateCreateFlags, + VkPipelineMultisampleStateCreateFlagBits +); diff --git a/vulkan-sys/src/enums/pipelinerasterizationstatecreateflags.rs b/vulkan-sys/src/enums/pipelinerasterizationstatecreateflags.rs new file mode 100644 index 0000000..8fde54d --- /dev/null +++ b/vulkan-sys/src/enums/pipelinerasterizationstatecreateflags.rs @@ -0,0 +1,15 @@ +pub use VkPipelineRasterizationStateCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkPipelineRasterizationStateCreateFlags { + VK_PIPELINE_RASTERIZATION_STATE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkPipelineRasterizationStateCreateFlagBits(u32); +SetupVkFlags!( + VkPipelineRasterizationStateCreateFlags, + VkPipelineRasterizationStateCreateFlagBits +); diff --git a/vulkan-sys/src/enums/pipelineshaderstagecreateflags.rs b/vulkan-sys/src/enums/pipelineshaderstagecreateflags.rs new file mode 100644 index 0000000..5fe793e --- /dev/null +++ b/vulkan-sys/src/enums/pipelineshaderstagecreateflags.rs @@ -0,0 +1,15 @@ +pub use VkPipelineShaderStageCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkPipelineShaderStageCreateFlags { + VK_PIPELINE_SHADER_STAGE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkPipelineShaderStageCreateFlagBits(u32); +SetupVkFlags!( + VkPipelineShaderStageCreateFlags, + VkPipelineShaderStageCreateFlagBits +); diff --git a/vulkan-sys/src/enums/pipelinestageflags.rs b/vulkan-sys/src/enums/pipelinestageflags.rs new file mode 100644 index 0000000..f6a6aac --- /dev/null +++ b/vulkan-sys/src/enums/pipelinestageflags.rs @@ -0,0 +1,38 @@ +pub use VkPipelineStageFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkPipelineStageFlags { + VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT = 0x0000_0001, + VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT = 0x0000_0002, + VK_PIPELINE_STAGE_VERTEX_INPUT_BIT = 0x0000_0004, + VK_PIPELINE_STAGE_VERTEX_SHADER_BIT = 0x0000_0008, + VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT = 0x0000_0010, + VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT = 0x0000_0020, + VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT = 0x0000_0040, + VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT = 0x0000_0080, + VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT = 0x0000_0100, + VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT = 0x0000_0200, + VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT = 0x0000_0400, + VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT = 0x0000_0800, + VK_PIPELINE_STAGE_TRANSFER_BIT = 0x0000_1000, + VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT = 0x0000_2000, + VK_PIPELINE_STAGE_HOST_BIT = 0x0000_4000, + VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT = 0x0000_8000, + VK_PIPELINE_STAGE_ALL_COMMANDS_BIT = 0x0001_0000, + VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT = 0x0100_0000, + VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x0004_0000, + VK_PIPELINE_STAGE_COMMAND_PROCESS_BIT_NVX = 0x0002_0000, + VK_PIPELINE_STAGE_SHADING_RATE_IMAGE_BIT_NV = 0x0040_0000, + VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR = 0x0020_0000, + VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR = 0x0200_0000, + VK_PIPELINE_STAGE_TASK_SHADER_BIT_NV = 0x0008_0000, + VK_PIPELINE_STAGE_MESH_SHADER_BIT_NV = 0x0010_0000, + VK_PIPELINE_STAGE_FRAGMENT_DENSITY_PROCESS_BIT_EXT = 0x0080_0000, + VK_PIPELINE_STAGE_FLAG_BITS_MAX_ENUM = 0x7FFF_FFFF, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkPipelineStageFlagBits(u32); +SetupVkFlags!(VkPipelineStageFlags, VkPipelineStageFlagBits); diff --git a/vulkan-sys/src/enums/pipelinetesselationstatecreateflags.rs b/vulkan-sys/src/enums/pipelinetesselationstatecreateflags.rs new file mode 100644 index 0000000..26fc8a7 --- /dev/null +++ b/vulkan-sys/src/enums/pipelinetesselationstatecreateflags.rs @@ -0,0 +1,15 @@ +pub use VkPipelineTessellationStateCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkPipelineTessellationStateCreateFlags { + VK_PIPELINE_TESSELATION_STATE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkPipelineTessellationStateCreateFlagBits(u32); +SetupVkFlags!( + VkPipelineTessellationStateCreateFlags, + VkPipelineTessellationStateCreateFlagBits +); diff --git a/vulkan-sys/src/enums/pipelinevertexinputstatecreateflags.rs b/vulkan-sys/src/enums/pipelinevertexinputstatecreateflags.rs new file mode 100644 index 0000000..52374a2 --- /dev/null +++ b/vulkan-sys/src/enums/pipelinevertexinputstatecreateflags.rs @@ -0,0 +1,15 @@ +pub use VkPipelineVertexInputStateCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkPipelineVertexInputStateCreateFlags { + VK_PIPELINE_VERTEX_INPUT_STATE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkPipelineVertexInputStateCreateFlagBits(u32); +SetupVkFlags!( + VkPipelineVertexInputStateCreateFlags, + VkPipelineVertexInputStateCreateFlagBits +); diff --git a/vulkan-sys/src/enums/pipelineviewportstatecreateflags.rs b/vulkan-sys/src/enums/pipelineviewportstatecreateflags.rs new file mode 100644 index 0000000..b08d99a --- /dev/null +++ b/vulkan-sys/src/enums/pipelineviewportstatecreateflags.rs @@ -0,0 +1,15 @@ +pub use VkPipelineViewportStateCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkPipelineViewportStateCreateFlags { + VK_PIPELINE_VIEWPORT_STATE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkPipelineViewportStateCreateFlagBits(u32); +SetupVkFlags!( + VkPipelineViewportStateCreateFlags, + VkPipelineViewportStateCreateFlagBits +); diff --git a/vulkan-sys/src/enums/polygonmode.rs b/vulkan-sys/src/enums/polygonmode.rs new file mode 100644 index 0000000..59572e6 --- /dev/null +++ b/vulkan-sys/src/enums/polygonmode.rs @@ -0,0 +1,9 @@ +pub use VkPolygonMode::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkPolygonMode { + VK_POLYGON_MODE_FILL = 0, + VK_POLYGON_MODE_LINE = 1, + VK_POLYGON_MODE_POINT = 2, +} diff --git a/vulkan-sys/src/enums/prelude.rs b/vulkan-sys/src/enums/prelude.rs new file mode 100644 index 0000000..28ad5bf --- /dev/null +++ b/vulkan-sys/src/enums/prelude.rs @@ -0,0 +1,124 @@ +pub use super::accessflags::*; +pub use super::androidsurfacecreateflagskhr::*; +pub use super::attachmentdescriptionflags::*; +pub use super::attachmentloadop::*; +pub use super::attachmentstoreop::*; +pub use super::blendfactor::*; +pub use super::blendop::*; +pub use super::bool32::*; +pub use super::bordercolor::*; +pub use super::buffercreateflags::*; +pub use super::bufferusageflags::*; +pub use super::bufferviewcreateflags::*; +pub use super::colorcomponentflags::*; +pub use super::colorspacekhr::*; +pub use super::commandbufferlevel::*; +pub use super::commandbufferresetflags::*; +pub use super::commandbufferusageflags::*; +pub use super::commandpoolcreateflags::*; +pub use super::commandpoolresetflags::*; +pub use super::commandpooltrimflags::*; +pub use super::compareop::*; +pub use super::componentswizzle::*; +pub use super::compositealphaflagskhr::*; +pub use super::cullmodeflags::*; +pub use super::debugreporterrorext::*; +pub use super::debugreportflagsext::*; +pub use super::debugreportobjecttypeext::*; +pub use super::debugutilsmessageseverityflagsext::*; +pub use super::debugutilsmessagetypeflagsext::*; +pub use super::debugutilsmessengercallbackdataflagsext::*; +pub use super::debugutilsmessengercreateflags::*; +pub use super::dependencyflags::*; +pub use super::descriptorpoolcreateflags::*; +pub use super::descriptorpoolresetflags::*; +pub use super::descriptorsetlayoutcreateflags::*; +pub use super::descriptortype::*; +pub use super::devicecreateflags::*; +pub use super::devicequeuecreateflags::*; +pub use super::displaymodecreateflagskhr::*; +pub use super::displayplanealphaflagskhr::*; +pub use super::displaysurfacecreateflagskhr::*; +pub use super::dynamicstate::*; +pub use super::eventcreateflags::*; +pub use super::externalmemoryhandletypeflags::*; +pub use super::fencecreateflags::*; +pub use super::filter::*; +pub use super::format::*; +pub use super::formatfeatureflags::*; +pub use super::framebuffercreateflags::*; +pub use super::frontface::*; +pub use super::imageaspectflags::*; +pub use super::imagecreateflags::*; +pub use super::imagelayout::*; +pub use super::imagetiling::*; +pub use super::imagetype::*; +pub use super::imageusageflags::*; +pub use super::imageviewcreateflags::*; +pub use super::imageviewtype::*; +pub use super::indextype::*; +pub use super::instancecreateflags::*; +pub use super::internalallocationtype::*; +pub use super::iossurfacecreateflagsmvk::*; +pub use super::logicop::*; +pub use super::macossurfacecreateflagsmvk::*; +pub use super::memoryheapflags::*; +pub use super::memorymapflags::*; +pub use super::memorypropertyflags::*; +pub use super::mirsurfacecreateflagskhr::*; +pub use super::objecttype::*; +pub use super::physicaldevicetype::*; +pub use super::pipelinebindpoint::*; +pub use super::pipelinecachecreateflags::*; +pub use super::pipelinecacheheaderversion::*; +pub use super::pipelinecolorblendstatecreateflags::*; +pub use super::pipelinecreateflags::*; +pub use super::pipelinedepthstencilstatecreateflags::*; +pub use super::pipelinedynamicstatecreateflags::*; +pub use super::pipelineinputassemblystatecreateflags::*; +pub use super::pipelinelayoutcreateflags::*; +pub use super::pipelinemultisamplestatecreateflags::*; +pub use super::pipelinerasterizationstatecreateflags::*; +pub use super::pipelineshaderstagecreateflags::*; +pub use super::pipelinestageflags::*; +pub use super::pipelinetesselationstatecreateflags::*; +pub use super::pipelinevertexinputstatecreateflags::*; +pub use super::pipelineviewportstatecreateflags::*; +pub use super::polygonmode::*; +pub use super::presentmodekhr::*; +pub use super::primitivetopology::*; +pub use super::querycontrolflags::*; +pub use super::querypipelinestatisticsflags::*; +pub use super::querypoolcreateflags::*; +pub use super::queryresultflags::*; +pub use super::querytype::*; +pub use super::queueflags::*; +pub use super::renderpasscreateflags::*; +pub use super::result::*; +pub use super::samplecountflags::*; +pub use super::sampleraddressmode::*; +pub use super::samplercreateflags::*; +pub use super::samplermipmapmode::*; +pub use super::semaphorecreateflags::*; +pub use super::shadermodulecreateflags::*; +pub use super::shaderstageflags::*; +pub use super::sharingmode::*; +pub use super::sparseimageformatflags::*; +pub use super::sparsememorybindflags::*; +pub use super::stencilfaceflags::*; +pub use super::stencilop::*; +pub use super::structuretype::*; +pub use super::subpasscontents::*; +pub use super::subpassdescriptionflags::*; +pub use super::surfacetransformflagskhr::*; +pub use super::swapchaincreateflagskhr::*; +pub use super::systemallocationscope::*; +pub use super::vertexinputrate::*; +pub use super::waylandsurfacecreateflagskhr::*; +pub use super::win32surfacecreateflagskhr::*; +pub use super::xcbsurfacecreateflagskhr::*; +pub use super::xlibsurfacecreateflagskhr::*; + +pub use super::amd::prelude::*; +pub use super::ext::prelude::*; +pub use super::khr::prelude::*; diff --git a/vulkan-sys/src/enums/presentmodekhr.rs b/vulkan-sys/src/enums/presentmodekhr.rs new file mode 100644 index 0000000..f0b35e4 --- /dev/null +++ b/vulkan-sys/src/enums/presentmodekhr.rs @@ -0,0 +1,10 @@ +pub use VkPresentModeKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkPresentModeKHR { + VK_PRESENT_MODE_IMMEDIATE_KHR = 0, + VK_PRESENT_MODE_MAILBOX_KHR = 1, + VK_PRESENT_MODE_FIFO_KHR = 2, + VK_PRESENT_MODE_FIFO_RELAXED_KHR = 3, +} diff --git a/vulkan-sys/src/enums/primitivetopology.rs b/vulkan-sys/src/enums/primitivetopology.rs new file mode 100644 index 0000000..0527596 --- /dev/null +++ b/vulkan-sys/src/enums/primitivetopology.rs @@ -0,0 +1,17 @@ +pub use VkPrimitiveTopology::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkPrimitiveTopology { + VK_PRIMITIVE_TOPOLOGY_POINT_LIST = 0, + VK_PRIMITIVE_TOPOLOGY_LINE_LIST = 1, + VK_PRIMITIVE_TOPOLOGY_LINE_STRIP = 2, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST = 3, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP = 4, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN = 5, + VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY = 6, + VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY = 7, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY = 8, + VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY = 9, + VK_PRIMITIVE_TOPOLOGY_PATCH_LIST = 10, +} diff --git a/vulkan-sys/src/enums/querycontrolflags.rs b/vulkan-sys/src/enums/querycontrolflags.rs new file mode 100644 index 0000000..f81681d --- /dev/null +++ b/vulkan-sys/src/enums/querycontrolflags.rs @@ -0,0 +1,12 @@ +pub use VkQueryControlFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkQueryControlFlags { + VK_QUERY_CONTROL_PRECISE_BIT = 0x0000_0001, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkQueryControlFlagBits(u32); +SetupVkFlags!(VkQueryControlFlags, VkQueryControlFlagBits); diff --git a/vulkan-sys/src/enums/querypipelinestatisticsflags.rs b/vulkan-sys/src/enums/querypipelinestatisticsflags.rs new file mode 100644 index 0000000..fb2e46e --- /dev/null +++ b/vulkan-sys/src/enums/querypipelinestatisticsflags.rs @@ -0,0 +1,25 @@ +pub use VkQueryPipelineStatisticFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkQueryPipelineStatisticFlags { + VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_VERTICES_BIT = 0x0000_0001, + VK_QUERY_PIPELINE_STATISTIC_INPUT_ASSEMBLY_PRIMITIVES_BIT = 0x0000_0002, + VK_QUERY_PIPELINE_STATISTIC_VERTEX_SHADER_INVOCATIONS_BIT = 0x0000_0004, + VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_INVOCATIONS_BIT = 0x0000_0008, + VK_QUERY_PIPELINE_STATISTIC_GEOMETRY_SHADER_PRIMITIVES_BIT = 0x0000_0010, + VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT = 0x0000_0020, + VK_QUERY_PIPELINE_STATISTIC_CLIPPING_PRIMITIVES_BIT = 0x0000_0040, + VK_QUERY_PIPELINE_STATISTIC_FRAGMENT_SHADER_INVOCATIONS_BIT = 0x0000_0080, + VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_CONTROL_SHADER_PATCHES_BIT = 0x0000_0100, + VK_QUERY_PIPELINE_STATISTIC_TESSELLATION_EVALUATION_SHADER_INVOCATIONS_BIT = 0x0000_0200, + VK_QUERY_PIPELINE_STATISTIC_COMPUTE_SHADER_INVOCATIONS_BIT = 0x0000_0400, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkQueryPipelineStatisticFlagBits(u32); +SetupVkFlags!( + VkQueryPipelineStatisticFlags, + VkQueryPipelineStatisticFlagBits +); diff --git a/vulkan-sys/src/enums/querypoolcreateflags.rs b/vulkan-sys/src/enums/querypoolcreateflags.rs new file mode 100644 index 0000000..9263618 --- /dev/null +++ b/vulkan-sys/src/enums/querypoolcreateflags.rs @@ -0,0 +1,12 @@ +pub use VkQueryPoolCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkQueryPoolCreateFlags { + VK_QUERY_POOL_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkQueryPoolCreateFlagBits(u32); +SetupVkFlags!(VkQueryPoolCreateFlags, VkQueryPoolCreateFlagBits); diff --git a/vulkan-sys/src/enums/queryresultflags.rs b/vulkan-sys/src/enums/queryresultflags.rs new file mode 100644 index 0000000..3d92196 --- /dev/null +++ b/vulkan-sys/src/enums/queryresultflags.rs @@ -0,0 +1,15 @@ +pub use VkQueryResultFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkQueryResultFlags { + VK_QUERY_RESULT_64_BIT = 0x0000_0001, + VK_QUERY_RESULT_WAIT_BIT = 0x0000_0002, + VK_QUERY_RESULT_WITH_AVAILABILITY_BIT = 0x0000_0004, + VK_QUERY_RESULT_PARTIAL_BIT = 0x0000_0008, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkQueryResultFlagBits(u32); +SetupVkFlags!(VkQueryResultFlags, VkQueryResultFlagBits); diff --git a/vulkan-sys/src/enums/querytype.rs b/vulkan-sys/src/enums/querytype.rs new file mode 100644 index 0000000..bdbde7f --- /dev/null +++ b/vulkan-sys/src/enums/querytype.rs @@ -0,0 +1,12 @@ +pub use VkQueryType::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkQueryType { + VK_QUERY_TYPE_OCCLUSION = 0, + VK_QUERY_TYPE_PIPELINE_STATISTICS = 1, + VK_QUERY_TYPE_TIMESTAMP = 2, + VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT = 1_000_028_004, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_COMPACTED_SIZE_KHR = 1_000_165_000, + VK_QUERY_TYPE_ACCELERATION_STRUCTURE_SERIALIZATION_SIZE_KHR = 1_000_150_000, +} diff --git a/vulkan-sys/src/enums/queueflags.rs b/vulkan-sys/src/enums/queueflags.rs new file mode 100644 index 0000000..f98fef7 --- /dev/null +++ b/vulkan-sys/src/enums/queueflags.rs @@ -0,0 +1,15 @@ +pub use VkQueueFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkQueueFlags { + VK_QUEUE_GRAPHICS_BIT = 0x0000_0001, + VK_QUEUE_COMPUTE_BIT = 0x0000_0002, + VK_QUEUE_TRANSFER_BIT = 0x0000_0004, + VK_QUEUE_SPARSE_BINDING_BIT = 0x0000_0008, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkQueueFlagBits(u32); +SetupVkFlags!(VkQueueFlags, VkQueueFlagBits); diff --git a/vulkan-sys/src/enums/renderpasscreateflags.rs b/vulkan-sys/src/enums/renderpasscreateflags.rs new file mode 100644 index 0000000..ce83836 --- /dev/null +++ b/vulkan-sys/src/enums/renderpasscreateflags.rs @@ -0,0 +1,12 @@ +pub use VkRenderPassCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkRenderPassCreateFlags { + VK_RENDERPASS_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkRenderPassCreateFlagBits(u32); +SetupVkFlags!(VkRenderPassCreateFlags, VkRenderPassCreateFlagBits); diff --git a/vulkan-sys/src/enums/result.rs b/vulkan-sys/src/enums/result.rs new file mode 100644 index 0000000..189652a --- /dev/null +++ b/vulkan-sys/src/enums/result.rs @@ -0,0 +1,53 @@ +use std::fmt::{Display, Formatter}; + +pub use VkResult::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkResult { + VK_SUCCESS = 0, + VK_NOT_READY = 1, + VK_TIMEOUT = 2, + VK_EVENT_SET = 3, + VK_EVENT_RESET = 4, + VK_INCOMPLETE = 5, + VK_ERROR_OUT_OF_HOST_MEMORY = -1i32 as u32, + VK_ERROR_OUT_OF_DEVICE_MEMORY = -2i32 as u32, + VK_ERROR_INITIALIZATION_FAILED = -3i32 as u32, + VK_ERROR_DEVICE_LOST = -4i32 as u32, + VK_ERROR_MEMORY_MAP_FAILED = -5i32 as u32, + VK_ERROR_LAYER_NOT_PRESENT = -6i32 as u32, + VK_ERROR_EXTENSION_NOT_PRESENT = -7i32 as u32, + VK_ERROR_FEATURE_NOT_PRESENT = -8i32 as u32, + VK_ERROR_INCOMPATIBLE_DRIVER = -9i32 as u32, + VK_ERROR_TOO_MANY_OBJECTS = -10i32 as u32, + VK_ERROR_FORMAT_NOT_SUPPORTED = -11i32 as u32, + VK_ERROR_FRAGMENTED_POOL = -12i32 as u32, + VK_ERROR_OUT_OF_POOL_MEMORY = -1_000_069_000i32 as u32, + VK_ERROR_INVALID_EXTERNAL_HANDLE = -1_000_072_003i32 as u32, + VK_ERROR_SURFACE_LOST_KHR = -1_000_000_000i32 as u32, + VK_ERROR_NATIVE_WINDOW_IN_USE_KHR = -1_000_000_001i32 as u32, + VK_SUBOPTIMAL_KHR = 1_000_001_003i32 as u32, + VK_ERROR_OUT_OF_DATE_KHR = -1_000_001_004i32 as u32, + VK_ERROR_INCOMPATIBLE_DISPLAY_KHR = -1_000_003_001i32 as u32, + VK_ERROR_VALIDATION_FAILED_EXT = -1_000_011_001i32 as u32, + VK_ERROR_INVALID_SHADER_NV = -1_000_012_000i32 as u32, + VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT = -1_000_158_000i32 as u32, + VK_ERROR_FRAGMENTATION_EXT = -1_000_161_000i32 as u32, + VK_ERROR_NOT_PERMITTED_EXT = -1_000_174_001i32 as u32, + VK_ERROR_INVALID_DEVICE_ADDRESS_EXT = -1_000_244_000i32 as u32, + VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT = -1_000_255_000i32 as u32, + VK_THREAD_IDLE_KHR = 1_000_268_000, + VK_THREAD_DONE_KHR = 1_000_268_001, + VK_OPERATION_DEFERRED_KHR = 1_000_268_002, + VK_OPERATION_NOT_DEFERRED_KHR = 1_000_268_003, + VK_ERROR_PIPELINE_COMPILE_REQUIRED_EXT = 1_000_297_000, +} + +impl Display for VkResult { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str("Result from a vulkan api call") + } +} + +impl std::error::Error for VkResult {} diff --git a/vulkan-sys/src/enums/samplecountflags.rs b/vulkan-sys/src/enums/samplecountflags.rs new file mode 100644 index 0000000..b7bae78 --- /dev/null +++ b/vulkan-sys/src/enums/samplecountflags.rs @@ -0,0 +1,46 @@ +pub use VkSampleCountFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkSampleCountFlags { + VK_SAMPLE_COUNT_1_BIT = 0x0000_0001, + VK_SAMPLE_COUNT_2_BIT = 0x0000_0002, + VK_SAMPLE_COUNT_4_BIT = 0x0000_0004, + VK_SAMPLE_COUNT_8_BIT = 0x0000_0008, + VK_SAMPLE_COUNT_16_BIT = 0x0000_0010, + VK_SAMPLE_COUNT_32_BIT = 0x0000_0020, + VK_SAMPLE_COUNT_64_BIT = 0x0000_0040, +} + +impl From for VkSampleCountFlags { + fn from(n: u32) -> Self { + match n { + 1 => Self::VK_SAMPLE_COUNT_1_BIT, + 2 => Self::VK_SAMPLE_COUNT_2_BIT, + 4 => Self::VK_SAMPLE_COUNT_4_BIT, + 8 => Self::VK_SAMPLE_COUNT_8_BIT, + 16 => Self::VK_SAMPLE_COUNT_16_BIT, + 32 => Self::VK_SAMPLE_COUNT_32_BIT, + 64 => Self::VK_SAMPLE_COUNT_64_BIT, + + _ => panic!("SampleCount {} does not exist", n), + } + } +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkSampleCountFlagBits(u32); +SetupVkFlags!(VkSampleCountFlags, VkSampleCountFlagBits); + +impl Default for VkSampleCountFlags { + fn default() -> Self { + VK_SAMPLE_COUNT_1_BIT + } +} + +impl Default for VkSampleCountFlagBits { + fn default() -> Self { + VK_SAMPLE_COUNT_1_BIT.into() + } +} diff --git a/vulkan-sys/src/enums/sampleraddressmode.rs b/vulkan-sys/src/enums/sampleraddressmode.rs new file mode 100644 index 0000000..c68f789 --- /dev/null +++ b/vulkan-sys/src/enums/sampleraddressmode.rs @@ -0,0 +1,11 @@ +pub use VkSamplerAddressMode::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +pub enum VkSamplerAddressMode { + VK_SAMPLER_ADDRESS_MODE_REPEAT = 0, + VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT = 1, + VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE = 2, + VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER = 3, + VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE = 4, +} diff --git a/vulkan-sys/src/enums/samplercreateflags.rs b/vulkan-sys/src/enums/samplercreateflags.rs new file mode 100644 index 0000000..fa7c012 --- /dev/null +++ b/vulkan-sys/src/enums/samplercreateflags.rs @@ -0,0 +1,13 @@ +pub use VkSamplerCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkSamplerCreateFlags { + VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT = 0x0000_0001, + VK_SAMPLER_CREATE_SUBSAMPLED_COARSE_RECONSTRUCTION_BIT_EXT = 0x0000_0002, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkSamplerCreateFlagBits(u32); +SetupVkFlags!(VkSamplerCreateFlags, VkSamplerCreateFlagBits); diff --git a/vulkan-sys/src/enums/samplermipmapmode.rs b/vulkan-sys/src/enums/samplermipmapmode.rs new file mode 100644 index 0000000..6cf0aa9 --- /dev/null +++ b/vulkan-sys/src/enums/samplermipmapmode.rs @@ -0,0 +1,8 @@ +pub use VkSamplerMipmapMode::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] +pub enum VkSamplerMipmapMode { + VK_SAMPLER_MIPMAP_MODE_NEAREST = 0, + VK_SAMPLER_MIPMAP_MODE_LINEAR = 1, +} diff --git a/vulkan-sys/src/enums/semaphorecreateflags.rs b/vulkan-sys/src/enums/semaphorecreateflags.rs new file mode 100644 index 0000000..4ba657b --- /dev/null +++ b/vulkan-sys/src/enums/semaphorecreateflags.rs @@ -0,0 +1,12 @@ +pub use VkSemaphoreCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkSemaphoreCreateFlags { + VK_SEMAPHORE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkSemaphoreCreateFlagBits(u32); +SetupVkFlags!(VkSemaphoreCreateFlags, VkSemaphoreCreateFlagBits); diff --git a/vulkan-sys/src/enums/shadermodulecreateflags.rs b/vulkan-sys/src/enums/shadermodulecreateflags.rs new file mode 100644 index 0000000..68cedbe --- /dev/null +++ b/vulkan-sys/src/enums/shadermodulecreateflags.rs @@ -0,0 +1,12 @@ +pub use VkShaderModuleCreateFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkShaderModuleCreateFlags { + VK_SHADER_MODULE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkShaderModuleCreateFlagBits(u32); +SetupVkFlags!(VkShaderModuleCreateFlags, VkShaderModuleCreateFlagBits); diff --git a/vulkan-sys/src/enums/shaderstageflags.rs b/vulkan-sys/src/enums/shaderstageflags.rs new file mode 100644 index 0000000..6ad6093 --- /dev/null +++ b/vulkan-sys/src/enums/shaderstageflags.rs @@ -0,0 +1,27 @@ +pub use VkShaderStageFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkShaderStageFlags { + VK_SHADER_STAGE_VERTEX_BIT = 0x0000_0001, + VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT = 0x0000_0002, + VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT = 0x0000_0004, + VK_SHADER_STAGE_GEOMETRY_BIT = 0x0000_0008, + VK_SHADER_STAGE_FRAGMENT_BIT = 0x0000_0010, + VK_SHADER_STAGE_COMPUTE_BIT = 0x0000_0020, + VK_SHADER_STAGE_ALL_GRAPHICS = 0x1F, + VK_SHADER_STAGE_ALL = 0x7FFF_FFFF, + VK_SHADER_STAGE_RAYGEN_BIT_KHR = 0x0000_0100, + VK_SHADER_STAGE_ANY_HIT_BIT_KHR = 0x0000_0200, + VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR = 0x0000_0400, + VK_SHADER_STAGE_MISS_BIT_KHR = 0x0000_0800, + VK_SHADER_STAGE_INTERSECTION_BIT_KHR = 0x0000_1000, + VK_SHADER_STAGE_CALLABLE_BIT_KHR = 0x0000_2000, + VK_SHADER_STAGE_TASK_BIT_NV = 0x0000_0040, + VK_SHADER_STAGE_MESH_BIT_NV = 0x0000_0080, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkShaderStageFlagBits(u32); +SetupVkFlags!(VkShaderStageFlags, VkShaderStageFlagBits); diff --git a/vulkan-sys/src/enums/sharingmode.rs b/vulkan-sys/src/enums/sharingmode.rs new file mode 100644 index 0000000..ce86281 --- /dev/null +++ b/vulkan-sys/src/enums/sharingmode.rs @@ -0,0 +1,14 @@ +pub use VkSharingMode::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkSharingMode { + VK_SHARING_MODE_EXCLUSIVE = 0, + VK_SHARING_MODE_CONCURRENT = 1, +} + +impl Default for VkSharingMode { + fn default() -> Self { + VK_SHARING_MODE_EXCLUSIVE + } +} diff --git a/vulkan-sys/src/enums/sparseimageformatflags.rs b/vulkan-sys/src/enums/sparseimageformatflags.rs new file mode 100644 index 0000000..6e58ff6 --- /dev/null +++ b/vulkan-sys/src/enums/sparseimageformatflags.rs @@ -0,0 +1,14 @@ +pub use VkSparseImageFormatFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkSparseImageFormatFlags { + VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT = 0x0000_0001, + VK_SPARSE_IMAGE_FORMAT_ALIGNED_MIP_SIZE_BIT = 0x0000_0002, + VK_SPARSE_IMAGE_FORMAT_NONSTANDARD_BLOCK_SIZE_BIT = 0x0000_0004, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkSparseImageFormatFlagBits(u32); +SetupVkFlags!(VkSparseImageFormatFlags, VkSparseImageFormatFlagBits); diff --git a/vulkan-sys/src/enums/sparsememorybindflags.rs b/vulkan-sys/src/enums/sparsememorybindflags.rs new file mode 100644 index 0000000..5fa462d --- /dev/null +++ b/vulkan-sys/src/enums/sparsememorybindflags.rs @@ -0,0 +1,12 @@ +pub use VkSparseMemoryBindFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkSparseMemoryBindFlags { + VK_SPARSE_MEMORY_BIND_METADATA_BIT = 0x0000_0001, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkSparseMemoryBindFlagBits(u32); +SetupVkFlags!(VkSparseMemoryBindFlags, VkSparseMemoryBindFlagBits); diff --git a/vulkan-sys/src/enums/stencilfaceflags.rs b/vulkan-sys/src/enums/stencilfaceflags.rs new file mode 100644 index 0000000..8b8b87f --- /dev/null +++ b/vulkan-sys/src/enums/stencilfaceflags.rs @@ -0,0 +1,14 @@ +pub use VkStencilFaceFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkStencilFaceFlags { + VK_STENCIL_FACE_FRONT_BIT = 0x00000001, + VK_STENCIL_FACE_BACK_BIT = 0x00000002, + VK_STENCIL_FRONT_AND_BACK = 0x3, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkStencilFaceFlagBits(u32); +SetupVkFlags!(VkStencilFaceFlags, VkStencilFaceFlagBits); diff --git a/vulkan-sys/src/enums/stencilop.rs b/vulkan-sys/src/enums/stencilop.rs new file mode 100644 index 0000000..c84c1c0 --- /dev/null +++ b/vulkan-sys/src/enums/stencilop.rs @@ -0,0 +1,14 @@ +pub use VkStencilOp::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkStencilOp { + VK_STENCIL_OP_KEEP = 0, + VK_STENCIL_OP_ZERO = 1, + VK_STENCIL_OP_REPLACE = 2, + VK_STENCIL_OP_INCREMENT_AND_CLAMP = 3, + VK_STENCIL_OP_DECREMENT_AND_CLAMP = 4, + VK_STENCIL_OP_INVERT = 5, + VK_STENCIL_OP_INCREMENT_AND_WRAP = 6, + VK_STENCIL_OP_DECREMENT_AND_WRAP = 7, +} diff --git a/vulkan-sys/src/enums/structuretype.rs b/vulkan-sys/src/enums/structuretype.rs new file mode 100644 index 0000000..df66f7f --- /dev/null +++ b/vulkan-sys/src/enums/structuretype.rs @@ -0,0 +1,903 @@ +pub use VkStructureType::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkStructureType { + VK_STRUCTURE_TYPE_APPLICATION_INFO = 0, + VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO = 1, + VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO = 2, + VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO = 3, + VK_STRUCTURE_TYPE_SUBMIT_INFO = 4, + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO = 5, + VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE = 6, + VK_STRUCTURE_TYPE_BIND_SPARSE_INFO = 7, + VK_STRUCTURE_TYPE_FENCE_CREATE_INFO = 8, + VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO = 9, + VK_STRUCTURE_TYPE_EVENT_CREATE_INFO = 10, + VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO = 11, + VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO = 12, + VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO = 13, + VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO = 14, + VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO = 15, + VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO = 16, + VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO = 17, + VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO = 18, + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO = 19, + VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO = 20, + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO = 21, + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO = 22, + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO = 23, + VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO = 24, + VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO = 25, + VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO = 26, + VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO = 27, + VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO = 28, + VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO = 29, + VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO = 30, + VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO = 31, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO = 32, + VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO = 33, + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO = 34, + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET = 35, + VK_STRUCTURE_TYPE_COPY_DESCRIPTOR_SET = 36, + VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO = 37, + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO = 38, + VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO = 39, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO = 40, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO = 41, + VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO = 42, + VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO = 43, + VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER = 44, + VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER = 45, + VK_STRUCTURE_TYPE_MEMORY_BARRIER = 46, + VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO = 47, + VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO = 48, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES = 1000094000, + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO = 1000157000, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO = 1000157001, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES = 1000083000, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS = 1000127000, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO = 1000127001, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO = 1000060000, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO = 1000060003, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_DEVICE_GROUP_COMMAND_BUFFER_BEGIN_INFO = 1000060004, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_DEVICE_GROUP_SUBMIT_INFO = 1000060005, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_DEVICE_GROUP_BIND_SPARSE_INFO = 1000060006, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO = 1000060013, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO = 1000060014, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_GROUP_PROPERTIES = 1000070000, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO = 1000070001, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2 = 1000146000, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2 = 1000146001, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_IMAGE_SPARSE_MEMORY_REQUIREMENTS_INFO_2 = 1000146002, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2 = 1000146003, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_SPARSE_IMAGE_MEMORY_REQUIREMENTS_2 = 1000146004, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2 = 1000059000, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2 = 1000059001, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2 = 1000059002, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2 = 1000059003, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2 = 1000059004, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2 = 1000059005, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2 = 1000059006, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2 = 1000059007, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2 = 1000059008, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES = 1000117000, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_RENDER_PASS_INPUT_ATTACHMENT_ASPECT_CREATE_INFO = 1000117001, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO = 1000117002, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO = 1000117003, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_RENDER_PASS_MULTIVIEW_CREATE_INFO = 1000053000, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES = 1000053001, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES = 1000053002, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTERS_FEATURES = 1000120000, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PROTECTED_SUBMIT_INFO = 1000145000, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES = 1000145001, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES = 1000145002, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2 = 1000145003, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO = 1000156000, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO = 1000156001, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO = 1000156002, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO = 1000156003, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES = 1000156004, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_IMAGE_FORMAT_PROPERTIES = 1000156005, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO = 1000085000, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_IMAGE_FORMAT_INFO = 1000071000, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_EXTERNAL_IMAGE_FORMAT_PROPERTIES = 1000071001, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_BUFFER_INFO = 1000071002, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_EXTERNAL_BUFFER_PROPERTIES = 1000071003, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES = 1000071004, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO = 1000072000, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO = 1000072001, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO = 1000072002, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_FENCE_INFO = 1000112000, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_EXTERNAL_FENCE_PROPERTIES = 1000112001, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_EXPORT_FENCE_CREATE_INFO = 1000113000, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO = 1000077000, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_SEMAPHORE_INFO = 1000076000, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_EXTERNAL_SEMAPHORE_PROPERTIES = 1000076001, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES = 1000168000, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT = 1000168001, + // Provided by VK_VERSION_1_1 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETERS_FEATURES = 1000063000, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_FEATURES = 49, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_1_PROPERTIES = 50, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES = 51, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_PROPERTIES = 52, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO = 1000147000, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2 = 1000109000, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2 = 1000109001, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2 = 1000109002, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_SUBPASS_DEPENDENCY_2 = 1000109003, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2 = 1000109004, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO = 1000109005, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_SUBPASS_END_INFO = 1000109006, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES = 1000177000, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES = 1000196000, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_INT64_FEATURES = 1000180000, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES = 1000082000, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FLOAT_CONTROLS_PROPERTIES = 1000197000, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO = 1000161000, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES = 1000161001, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES = 1000161002, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO = 1000161003, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT = 1000161004, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_STENCIL_RESOLVE_PROPERTIES = 1000199000, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_DEPTH_STENCIL_RESOLVE = 1000199001, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SCALAR_BLOCK_LAYOUT_FEATURES = 1000221000, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_IMAGE_STENCIL_USAGE_CREATE_INFO = 1000246000, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES = 1000130000, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_SAMPLER_REDUCTION_MODE_CREATE_INFO = 1000130001, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_MEMORY_MODEL_FEATURES = 1000211000, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGELESS_FRAMEBUFFER_FEATURES = 1000108000, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENTS_CREATE_INFO = 1000108001, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_FRAMEBUFFER_ATTACHMENT_IMAGE_INFO = 1000108002, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_RENDER_PASS_ATTACHMENT_BEGIN_INFO = 1000108003, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_UNIFORM_BUFFER_STANDARD_LAYOUT_FEATURES = 1000253000, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES = 1000175000, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SEPARATE_DEPTH_STENCIL_LAYOUTS_FEATURES = 1000241000, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_STENCIL_LAYOUT = 1000241001, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_STENCIL_LAYOUT = 1000241002, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_HOST_QUERY_RESET_FEATURES = 1000261000, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_FEATURES = 1000207000, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TIMELINE_SEMAPHORE_PROPERTIES = 1000207001, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_SEMAPHORE_TYPE_CREATE_INFO = 1000207002, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO = 1000207003, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_SEMAPHORE_WAIT_INFO = 1000207004, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_SEMAPHORE_SIGNAL_INFO = 1000207005, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES = 1000257000, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO = 1000244001, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_BUFFER_OPAQUE_CAPTURE_ADDRESS_CREATE_INFO = 1000257002, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_MEMORY_OPAQUE_CAPTURE_ADDRESS_ALLOCATE_INFO = 1000257003, + // Provided by VK_VERSION_1_2 + VK_STRUCTURE_TYPE_DEVICE_MEMORY_OPAQUE_CAPTURE_ADDRESS_INFO = 1000257004, + // Provided by VK_KHR_swapchain + VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR = 1000001000, + // Provided by VK_KHR_swapchain + VK_STRUCTURE_TYPE_PRESENT_INFO_KHR = 1000001001, + // Provided by VK_KHR_swapchain with VK_VERSION_1_1, VK_KHR_device_group with VK_KHR_surface + VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_CAPABILITIES_KHR = 1000060007, + // Provided by VK_KHR_swapchain with VK_VERSION_1_1, VK_KHR_device_group with VK_KHR_swapchain + VK_STRUCTURE_TYPE_IMAGE_SWAPCHAIN_CREATE_INFO_KHR = 1000060008, + // Provided by VK_KHR_swapchain with VK_VERSION_1_1, VK_KHR_device_group with VK_KHR_swapchain + VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_SWAPCHAIN_INFO_KHR = 1000060009, + // Provided by VK_KHR_swapchain with VK_VERSION_1_1, VK_KHR_device_group with VK_KHR_swapchain + VK_STRUCTURE_TYPE_ACQUIRE_NEXT_IMAGE_INFO_KHR = 1000060010, + // Provided by VK_KHR_swapchain with VK_VERSION_1_1, VK_KHR_device_group with VK_KHR_swapchain + VK_STRUCTURE_TYPE_DEVICE_GROUP_PRESENT_INFO_KHR = 1000060011, + // Provided by VK_KHR_swapchain with VK_VERSION_1_1, VK_KHR_device_group with VK_KHR_swapchain + VK_STRUCTURE_TYPE_DEVICE_GROUP_SWAPCHAIN_CREATE_INFO_KHR = 1000060012, + // Provided by VK_KHR_display + VK_STRUCTURE_TYPE_DISPLAY_MODE_CREATE_INFO_KHR = 1000002000, + // Provided by VK_KHR_display + VK_STRUCTURE_TYPE_DISPLAY_SURFACE_CREATE_INFO_KHR = 1000002001, + // Provided by VK_KHR_display_swapchain + VK_STRUCTURE_TYPE_DISPLAY_PRESENT_INFO_KHR = 1000003000, + // Provided by VK_KHR_xlib_surface + VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR = 1000004000, + // Provided by VK_KHR_xcb_surface + VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR = 1000005000, + // Provided by VK_KHR_wayland_surface + VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR = 1000006000, + // Provided by VK_KHR_android_surface + VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR = 1000008000, + // Provided by VK_KHR_win32_surface + VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR = 1000009000, + // Provided by VK_EXT_debug_report + VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT = 1000011000, + // Provided by VK_AMD_rasterization_order + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD = 1000018000, + // Provided by VK_EXT_debug_marker + VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT = 1000022000, + // Provided by VK_EXT_debug_marker + VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_TAG_INFO_EXT = 1000022001, + // Provided by VK_EXT_debug_marker + VK_STRUCTURE_TYPE_DEBUG_MARKER_MARKER_INFO_EXT = 1000022002, + // Provided by VK_NV_dedicated_allocation + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_IMAGE_CREATE_INFO_NV = 1000026000, + // Provided by VK_NV_dedicated_allocation + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_BUFFER_CREATE_INFO_NV = 1000026001, + // Provided by VK_NV_dedicated_allocation + VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV = 1000026002, + // Provided by VK_EXT_transform_feedback + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT = 1000028000, + // Provided by VK_EXT_transform_feedback + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT = 1000028001, + // Provided by VK_EXT_transform_feedback + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_STREAM_CREATE_INFO_EXT = 1000028002, + // Provided by VK_NVX_image_view_handle + VK_STRUCTURE_TYPE_IMAGE_VIEW_HANDLE_INFO_NVX = 1000030000, + // Provided by VK_NVX_image_view_handle + VK_STRUCTURE_TYPE_IMAGE_VIEW_ADDRESS_PROPERTIES_NVX = 1000030001, + // Provided by VK_AMD_texture_gather_bias_lod + VK_STRUCTURE_TYPE_TEXTURE_LOD_GATHER_FORMAT_PROPERTIES_AMD = 1000041000, + // Provided by VK_GGP_stream_descriptor_surface + VK_STRUCTURE_TYPE_STREAM_DESCRIPTOR_SURFACE_CREATE_INFO_GGP = 1000049000, + // Provided by VK_NV_corner_sampled_image + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CORNER_SAMPLED_IMAGE_FEATURES_NV = 1000050000, + // Provided by VK_NV_external_memory + VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_NV = 1000056000, + // Provided by VK_NV_external_memory + VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_NV = 1000056001, + // Provided by VK_NV_external_memory_win32 + VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057000, + // Provided by VK_NV_external_memory_win32 + VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_NV = 1000057001, + // Provided by VK_NV_win32_keyed_mutex + VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_NV = 1000058000, + // Provided by VK_EXT_validation_flags + VK_STRUCTURE_TYPE_VALIDATION_FLAGS_EXT = 1000061000, + // Provided by VK_NN_vi_surface + VK_STRUCTURE_TYPE_VI_SURFACE_CREATE_INFO_NN = 1000062000, + // Provided by VK_EXT_texture_compression_astc_hdr + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXTURE_COMPRESSION_ASTC_HDR_FEATURES_EXT = 1000066000, + // Provided by VK_EXT_astc_decode_mode + VK_STRUCTURE_TYPE_IMAGE_VIEW_ASTC_DECODE_MODE_EXT = 1000067000, + // Provided by VK_EXT_astc_decode_mode + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ASTC_DECODE_FEATURES_EXT = 1000067001, + // Provided by VK_KHR_external_memory_win32 + VK_STRUCTURE_TYPE_IMPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073000, + // Provided by VK_KHR_external_memory_win32 + VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR = 1000073001, + // Provided by VK_KHR_external_memory_win32 + VK_STRUCTURE_TYPE_MEMORY_WIN32_HANDLE_PROPERTIES_KHR = 1000073002, + // Provided by VK_KHR_external_memory_win32 + VK_STRUCTURE_TYPE_MEMORY_GET_WIN32_HANDLE_INFO_KHR = 1000073003, + // Provided by VK_KHR_external_memory_fd + VK_STRUCTURE_TYPE_IMPORT_MEMORY_FD_INFO_KHR = 1000074000, + // Provided by VK_KHR_external_memory_fd + VK_STRUCTURE_TYPE_MEMORY_FD_PROPERTIES_KHR = 1000074001, + // Provided by VK_KHR_external_memory_fd + VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR = 1000074002, + // Provided by VK_KHR_win32_keyed_mutex + VK_STRUCTURE_TYPE_WIN32_KEYED_MUTEX_ACQUIRE_RELEASE_INFO_KHR = 1000075000, + // Provided by VK_KHR_external_semaphore_win32 + VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078000, + // Provided by VK_KHR_external_semaphore_win32 + VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_WIN32_HANDLE_INFO_KHR = 1000078001, + // Provided by VK_KHR_external_semaphore_win32 + VK_STRUCTURE_TYPE_D3D12_FENCE_SUBMIT_INFO_KHR = 1000078002, + // Provided by VK_KHR_external_semaphore_win32 + VK_STRUCTURE_TYPE_SEMAPHORE_GET_WIN32_HANDLE_INFO_KHR = 1000078003, + // Provided by VK_KHR_external_semaphore_fd + VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR = 1000079000, + // Provided by VK_KHR_external_semaphore_fd + VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR = 1000079001, + // Provided by VK_KHR_push_descriptor + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR = 1000080000, + // Provided by VK_EXT_conditional_rendering + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT = 1000081000, + // Provided by VK_EXT_conditional_rendering + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT = 1000081001, + // Provided by VK_EXT_conditional_rendering + VK_STRUCTURE_TYPE_CONDITIONAL_RENDERING_BEGIN_INFO_EXT = 1000081002, + // Provided by VK_KHR_incremental_present + VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR = 1000084000, + // Provided by VK_NV_clip_space_w_scaling + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_W_SCALING_STATE_CREATE_INFO_NV = 1000087000, + // Provided by VK_EXT_display_surface_counter + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_EXT = 1000090000, + // Provided by VK_EXT_display_control + VK_STRUCTURE_TYPE_DISPLAY_POWER_INFO_EXT = 1000091000, + // Provided by VK_EXT_display_control + VK_STRUCTURE_TYPE_DEVICE_EVENT_INFO_EXT = 1000091001, + // Provided by VK_EXT_display_control + VK_STRUCTURE_TYPE_DISPLAY_EVENT_INFO_EXT = 1000091002, + // Provided by VK_EXT_display_control + VK_STRUCTURE_TYPE_SWAPCHAIN_COUNTER_CREATE_INFO_EXT = 1000091003, + // Provided by VK_GOOGLE_display_timing + VK_STRUCTURE_TYPE_PRESENT_TIMES_INFO_GOOGLE = 1000092000, + // Provided by VK_NVX_multiview_per_view_attributes + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PER_VIEW_ATTRIBUTES_PROPERTIES_NVX = 1000097000, + // Provided by VK_NV_viewport_swizzle + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV = 1000098000, + // Provided by VK_EXT_discard_rectangles + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT = 1000099000, + // Provided by VK_EXT_discard_rectangles + VK_STRUCTURE_TYPE_PIPELINE_DISCARD_RECTANGLE_STATE_CREATE_INFO_EXT = 1000099001, + // Provided by VK_EXT_conservative_rasterization + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT = 1000101000, + // Provided by VK_EXT_conservative_rasterization + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_CONSERVATIVE_STATE_CREATE_INFO_EXT = 1000101001, + // Provided by VK_EXT_depth_clip_enable + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEPTH_CLIP_ENABLE_FEATURES_EXT = 1000102000, + // Provided by VK_EXT_depth_clip_enable + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_DEPTH_CLIP_STATE_CREATE_INFO_EXT = 1000102001, + // Provided by VK_EXT_hdr_metadata + VK_STRUCTURE_TYPE_HDR_METADATA_EXT = 1000105000, + // Provided by VK_KHR_shared_presentable_image + VK_STRUCTURE_TYPE_SHARED_PRESENT_SURFACE_CAPABILITIES_KHR = 1000111000, + // Provided by VK_KHR_external_fence_win32 + VK_STRUCTURE_TYPE_IMPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114000, + // Provided by VK_KHR_external_fence_win32 + VK_STRUCTURE_TYPE_EXPORT_FENCE_WIN32_HANDLE_INFO_KHR = 1000114001, + // Provided by VK_KHR_external_fence_win32 + VK_STRUCTURE_TYPE_FENCE_GET_WIN32_HANDLE_INFO_KHR = 1000114002, + // Provided by VK_KHR_external_fence_fd + VK_STRUCTURE_TYPE_IMPORT_FENCE_FD_INFO_KHR = 1000115000, + // Provided by VK_KHR_external_fence_fd + VK_STRUCTURE_TYPE_FENCE_GET_FD_INFO_KHR = 1000115001, + // Provided by VK_KHR_performance_query + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_FEATURES_KHR = 1000116000, + // Provided by VK_KHR_performance_query + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PERFORMANCE_QUERY_PROPERTIES_KHR = 1000116001, + // Provided by VK_KHR_performance_query + VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_CREATE_INFO_KHR = 1000116002, + // Provided by VK_KHR_performance_query + VK_STRUCTURE_TYPE_PERFORMANCE_QUERY_SUBMIT_INFO_KHR = 1000116003, + // Provided by VK_KHR_performance_query + VK_STRUCTURE_TYPE_ACQUIRE_PROFILING_LOCK_INFO_KHR = 1000116004, + // Provided by VK_KHR_performance_query + VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_KHR = 1000116005, + // Provided by VK_KHR_performance_query + VK_STRUCTURE_TYPE_PERFORMANCE_COUNTER_DESCRIPTION_KHR = 1000116006, + // Provided by VK_KHR_get_surface_capabilities2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SURFACE_INFO_2_KHR = 1000119000, + // Provided by VK_KHR_get_surface_capabilities2 + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR = 1000119001, + // Provided by VK_KHR_get_surface_capabilities2 + VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR = 1000119002, + // Provided by VK_KHR_get_display_properties2 + VK_STRUCTURE_TYPE_DISPLAY_PROPERTIES_2_KHR = 1000121000, + // Provided by VK_KHR_get_display_properties2 + VK_STRUCTURE_TYPE_DISPLAY_PLANE_PROPERTIES_2_KHR = 1000121001, + // Provided by VK_KHR_get_display_properties2 + VK_STRUCTURE_TYPE_DISPLAY_MODE_PROPERTIES_2_KHR = 1000121002, + // Provided by VK_KHR_get_display_properties2 + VK_STRUCTURE_TYPE_DISPLAY_PLANE_INFO_2_KHR = 1000121003, + // Provided by VK_KHR_get_display_properties2 + VK_STRUCTURE_TYPE_DISPLAY_PLANE_CAPABILITIES_2_KHR = 1000121004, + // Provided by VK_MVK_ios_surface + VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK = 1000122000, + // Provided by VK_MVK_macos_surface + VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK = 1000123000, + // Provided by VK_EXT_debug_utils + VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT = 1000128000, + // Provided by VK_EXT_debug_utils + VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_TAG_INFO_EXT = 1000128001, + // Provided by VK_EXT_debug_utils + VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT = 1000128002, + // Provided by VK_EXT_debug_utils + VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT = 1000128003, + // Provided by VK_EXT_debug_utils + VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT = 1000128004, + // Provided by VK_ANDROID_external_memory_android_hardware_buffer + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_USAGE_ANDROID = 1000129000, + // Provided by VK_ANDROID_external_memory_android_hardware_buffer + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID = 1000129001, + // Provided by VK_ANDROID_external_memory_android_hardware_buffer + VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID = 1000129002, + // Provided by VK_ANDROID_external_memory_android_hardware_buffer + VK_STRUCTURE_TYPE_IMPORT_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129003, + // Provided by VK_ANDROID_external_memory_android_hardware_buffer + VK_STRUCTURE_TYPE_MEMORY_GET_ANDROID_HARDWARE_BUFFER_INFO_ANDROID = 1000129004, + // Provided by VK_ANDROID_external_memory_android_hardware_buffer + VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID = 1000129005, + // Provided by VK_EXT_inline_uniform_block + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT = 1000138000, + // Provided by VK_EXT_inline_uniform_block + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_PROPERTIES_EXT = 1000138001, + // Provided by VK_EXT_inline_uniform_block + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT = 1000138002, + // Provided by VK_EXT_inline_uniform_block + VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_INLINE_UNIFORM_BLOCK_CREATE_INFO_EXT = 1000138003, + // Provided by VK_EXT_sample_locations + VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT = 1000143000, + // Provided by VK_EXT_sample_locations + VK_STRUCTURE_TYPE_RENDER_PASS_SAMPLE_LOCATIONS_BEGIN_INFO_EXT = 1000143001, + // Provided by VK_EXT_sample_locations + VK_STRUCTURE_TYPE_PIPELINE_SAMPLE_LOCATIONS_STATE_CREATE_INFO_EXT = 1000143002, + // Provided by VK_EXT_sample_locations + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLE_LOCATIONS_PROPERTIES_EXT = 1000143003, + // Provided by VK_EXT_sample_locations + VK_STRUCTURE_TYPE_MULTISAMPLE_PROPERTIES_EXT = 1000143004, + // Provided by VK_EXT_blend_operation_advanced + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT = 1000148000, + // Provided by VK_EXT_blend_operation_advanced + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_PROPERTIES_EXT = 1000148001, + // Provided by VK_EXT_blend_operation_advanced + VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_ADVANCED_STATE_CREATE_INFO_EXT = 1000148002, + // Provided by VK_NV_fragment_coverage_to_color + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_TO_COLOR_STATE_CREATE_INFO_NV = 1000149000, + // Provided by VK_KHR_acceleration_structure + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR = 1000150007, + // Provided by VK_KHR_acceleration_structure + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR = 1000150000, + // Provided by VK_KHR_acceleration_structure + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_DEVICE_ADDRESS_INFO_KHR = 1000150002, + // Provided by VK_KHR_acceleration_structure + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR = 1000150003, + // Provided by VK_KHR_acceleration_structure + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR = 1000150004, + // Provided by VK_KHR_acceleration_structure + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR = 1000150005, + // Provided by VK_KHR_acceleration_structure + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR = 1000150006, + // Provided by VK_KHR_acceleration_structure + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_INFO_KHR = 1000150009, + // Provided by VK_KHR_acceleration_structure + VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_INFO_KHR = 1000150010, + // Provided by VK_KHR_acceleration_structure + VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_TO_MEMORY_INFO_KHR = 1000150011, + // Provided by VK_KHR_acceleration_structure + VK_STRUCTURE_TYPE_COPY_MEMORY_TO_ACCELERATION_STRUCTURE_INFO_KHR = 1000150012, + // Provided by VK_KHR_acceleration_structure + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR = 1000150013, + // Provided by VK_KHR_acceleration_structure + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR = 1000150014, + // Provided by VK_KHR_acceleration_structure + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR = 1000150017, + // Provided by VK_KHR_acceleration_structure + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR = 1000150020, + // Provided by VK_KHR_ray_tracing_pipeline + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR = 1000347000, + // Provided by VK_KHR_ray_tracing_pipeline + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR = 1000347001, + // Provided by VK_KHR_ray_tracing_pipeline + VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR = 1000150015, + // Provided by VK_KHR_ray_tracing_pipeline + VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR = 1000150016, + // Provided by VK_KHR_ray_tracing_pipeline + VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_INTERFACE_CREATE_INFO_KHR = 1000150018, + // Provided by VK_KHR_ray_query + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR = 1000348013, + // Provided by VK_NV_framebuffer_mixed_samples + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_MODULATION_STATE_CREATE_INFO_NV = 1000152000, + // Provided by VK_NV_shader_sm_builtins + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_FEATURES_NV = 1000154000, + // Provided by VK_NV_shader_sm_builtins + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SM_BUILTINS_PROPERTIES_NV = 1000154001, + // Provided by VK_EXT_image_drm_format_modifier + VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT = 1000158000, + // Provided by VK_EXT_image_drm_format_modifier + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_DRM_FORMAT_MODIFIER_INFO_EXT = 1000158002, + // Provided by VK_EXT_image_drm_format_modifier + VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT = 1000158003, + // Provided by VK_EXT_image_drm_format_modifier + VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT = 1000158004, + // Provided by VK_EXT_image_drm_format_modifier + VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT = 1000158005, + // Provided by VK_EXT_validation_cache + VK_STRUCTURE_TYPE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160000, + // Provided by VK_EXT_validation_cache + VK_STRUCTURE_TYPE_SHADER_MODULE_VALIDATION_CACHE_CREATE_INFO_EXT = 1000160001, + // Provided by VK_KHR_portability_subset + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_FEATURES_KHR = 1000163000, + // Provided by VK_KHR_portability_subset + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PORTABILITY_SUBSET_PROPERTIES_KHR = 1000163001, + // Provided by VK_NV_shading_rate_image + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SHADING_RATE_IMAGE_STATE_CREATE_INFO_NV = 1000164000, + // Provided by VK_NV_shading_rate_image + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_FEATURES_NV = 1000164001, + // Provided by VK_NV_shading_rate_image + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADING_RATE_IMAGE_PROPERTIES_NV = 1000164002, + // Provided by VK_NV_shading_rate_image + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_COARSE_SAMPLE_ORDER_STATE_CREATE_INFO_NV = 1000164005, + // Provided by VK_NV_ray_tracing + VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV = 1000165000, + // Provided by VK_NV_ray_tracing + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_NV = 1000165001, + // Provided by VK_NV_ray_tracing + VK_STRUCTURE_TYPE_GEOMETRY_NV = 1000165003, + // Provided by VK_NV_ray_tracing + VK_STRUCTURE_TYPE_GEOMETRY_TRIANGLES_NV = 1000165004, + // Provided by VK_NV_ray_tracing + VK_STRUCTURE_TYPE_GEOMETRY_AABB_NV = 1000165005, + // Provided by VK_NV_ray_tracing + VK_STRUCTURE_TYPE_BIND_ACCELERATION_STRUCTURE_MEMORY_INFO_NV = 1000165006, + // Provided by VK_NV_ray_tracing + VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_NV = 1000165007, + // Provided by VK_NV_ray_tracing + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_MEMORY_REQUIREMENTS_INFO_NV = 1000165008, + // Provided by VK_NV_ray_tracing + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV = 1000165009, + // Provided by VK_NV_ray_tracing + VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV = 1000165011, + // Provided by VK_NV_ray_tracing + VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_INFO_NV = 1000165012, + // Provided by VK_NV_representative_fragment_test + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_REPRESENTATIVE_FRAGMENT_TEST_FEATURES_NV = 1000166000, + // Provided by VK_NV_representative_fragment_test + VK_STRUCTURE_TYPE_PIPELINE_REPRESENTATIVE_FRAGMENT_TEST_STATE_CREATE_INFO_NV = 1000166001, + // Provided by VK_EXT_filter_cubic + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_VIEW_IMAGE_FORMAT_INFO_EXT = 1000170000, + // Provided by VK_EXT_filter_cubic + VK_STRUCTURE_TYPE_FILTER_CUBIC_IMAGE_VIEW_IMAGE_FORMAT_PROPERTIES_EXT = 1000170001, + // Provided by VK_EXT_global_priority + VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT = 1000174000, + // Provided by VK_EXT_external_memory_host + VK_STRUCTURE_TYPE_IMPORT_MEMORY_HOST_POINTER_INFO_EXT = 1000178000, + // Provided by VK_EXT_external_memory_host + VK_STRUCTURE_TYPE_MEMORY_HOST_POINTER_PROPERTIES_EXT = 1000178001, + // Provided by VK_EXT_external_memory_host + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT = 1000178002, + // Provided by VK_KHR_shader_clock + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CLOCK_FEATURES_KHR = 1000181000, + // Provided by VK_AMD_pipeline_compiler_control + VK_STRUCTURE_TYPE_PIPELINE_COMPILER_CONTROL_CREATE_INFO_AMD = 1000183000, + // Provided by VK_EXT_calibrated_timestamps + VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_EXT = 1000184000, + // Provided by VK_AMD_shader_core_properties + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD = 1000185000, + // Provided by VK_AMD_memory_overallocation_behavior + VK_STRUCTURE_TYPE_DEVICE_MEMORY_OVERALLOCATION_CREATE_INFO_AMD = 1000189000, + // Provided by VK_EXT_vertex_attribute_divisor + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT = 1000190000, + // Provided by VK_EXT_vertex_attribute_divisor + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_DIVISOR_STATE_CREATE_INFO_EXT = 1000190001, + // Provided by VK_EXT_vertex_attribute_divisor + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT = 1000190002, + // Provided by VK_GGP_frame_token + VK_STRUCTURE_TYPE_PRESENT_FRAME_TOKEN_GGP = 1000191000, + // Provided by VK_EXT_pipeline_creation_feedback + VK_STRUCTURE_TYPE_PIPELINE_CREATION_FEEDBACK_CREATE_INFO_EXT = 1000192000, + // Provided by VK_NV_compute_shader_derivatives + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COMPUTE_SHADER_DERIVATIVES_FEATURES_NV = 1000201000, + // Provided by VK_NV_mesh_shader + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_FEATURES_NV = 1000202000, + // Provided by VK_NV_mesh_shader + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MESH_SHADER_PROPERTIES_NV = 1000202001, + // Provided by VK_NV_fragment_shader_barycentric + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_BARYCENTRIC_FEATURES_NV = 1000203000, + // Provided by VK_NV_shader_image_footprint + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_FOOTPRINT_FEATURES_NV = 1000204000, + // Provided by VK_NV_scissor_exclusive + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_EXCLUSIVE_SCISSOR_STATE_CREATE_INFO_NV = 1000205000, + // Provided by VK_NV_scissor_exclusive + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXCLUSIVE_SCISSOR_FEATURES_NV = 1000205002, + // Provided by VK_NV_device_diagnostic_checkpoints + VK_STRUCTURE_TYPE_CHECKPOINT_DATA_NV = 1000206000, + // Provided by VK_NV_device_diagnostic_checkpoints + VK_STRUCTURE_TYPE_QUEUE_FAMILY_CHECKPOINT_PROPERTIES_NV = 1000206001, + // Provided by VK_INTEL_shader_integer_functions2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_INTEGER_FUNCTIONS_2_FEATURES_INTEL = 1000209000, + // Provided by VK_INTEL_performance_query + VK_STRUCTURE_TYPE_QUERY_POOL_PERFORMANCE_QUERY_CREATE_INFO_INTEL = 1000210000, + // Provided by VK_INTEL_performance_query + VK_STRUCTURE_TYPE_INITIALIZE_PERFORMANCE_API_INFO_INTEL = 1000210001, + // Provided by VK_INTEL_performance_query + VK_STRUCTURE_TYPE_PERFORMANCE_MARKER_INFO_INTEL = 1000210002, + // Provided by VK_INTEL_performance_query + VK_STRUCTURE_TYPE_PERFORMANCE_STREAM_MARKER_INFO_INTEL = 1000210003, + // Provided by VK_INTEL_performance_query + VK_STRUCTURE_TYPE_PERFORMANCE_OVERRIDE_INFO_INTEL = 1000210004, + // Provided by VK_INTEL_performance_query + VK_STRUCTURE_TYPE_PERFORMANCE_CONFIGURATION_ACQUIRE_INFO_INTEL = 1000210005, + // Provided by VK_EXT_pci_bus_info + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT = 1000212000, + // Provided by VK_AMD_display_native_hdr + VK_STRUCTURE_TYPE_DISPLAY_NATIVE_HDR_SURFACE_CAPABILITIES_AMD = 1000213000, + // Provided by VK_AMD_display_native_hdr + VK_STRUCTURE_TYPE_SWAPCHAIN_DISPLAY_NATIVE_HDR_CREATE_INFO_AMD = 1000213001, + // Provided by VK_FUCHSIA_imagepipe_surface + VK_STRUCTURE_TYPE_IMAGEPIPE_SURFACE_CREATE_INFO_FUCHSIA = 1000214000, + // Provided by VK_KHR_shader_terminate_invocation + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_TERMINATE_INVOCATION_FEATURES_KHR = 1000215000, + // Provided by VK_EXT_metal_surface + VK_STRUCTURE_TYPE_METAL_SURFACE_CREATE_INFO_EXT = 1000217000, + // Provided by VK_EXT_fragment_density_map + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_FEATURES_EXT = 1000218000, + // Provided by VK_EXT_fragment_density_map + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_PROPERTIES_EXT = 1000218001, + // Provided by VK_EXT_fragment_density_map + VK_STRUCTURE_TYPE_RENDER_PASS_FRAGMENT_DENSITY_MAP_CREATE_INFO_EXT = 1000218002, + // Provided by VK_EXT_subgroup_size_control + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT = 1000225000, + // Provided by VK_EXT_subgroup_size_control + VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT = 1000225001, + // Provided by VK_EXT_subgroup_size_control + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT = 1000225002, + // Provided by VK_KHR_fragment_shading_rate + VK_STRUCTURE_TYPE_FRAGMENT_SHADING_RATE_ATTACHMENT_INFO_KHR = 1000226000, + // Provided by VK_KHR_fragment_shading_rate + VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_STATE_CREATE_INFO_KHR = 1000226001, + // Provided by VK_KHR_fragment_shading_rate + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_PROPERTIES_KHR = 1000226002, + // Provided by VK_KHR_fragment_shading_rate + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_FEATURES_KHR = 1000226003, + // Provided by VK_KHR_fragment_shading_rate + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_KHR = 1000226004, + // Provided by VK_AMD_shader_core_properties2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_2_AMD = 1000227000, + // Provided by VK_AMD_device_coherent_memory + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COHERENT_MEMORY_FEATURES_AMD = 1000229000, + // Provided by VK_EXT_shader_image_atomic_int64 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_IMAGE_ATOMIC_INT64_FEATURES_EXT = 1000234000, + // Provided by VK_EXT_memory_budget + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT = 1000237000, + // Provided by VK_EXT_memory_priority + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PRIORITY_FEATURES_EXT = 1000238000, + // Provided by VK_EXT_memory_priority + VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT = 1000238001, + // Provided by VK_KHR_surface_protected_capabilities + VK_STRUCTURE_TYPE_SURFACE_PROTECTED_CAPABILITIES_KHR = 1000239000, + // Provided by VK_NV_dedicated_allocation_image_aliasing + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEDICATED_ALLOCATION_IMAGE_ALIASING_FEATURES_NV = 1000240000, + // Provided by VK_EXT_buffer_device_address + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES_EXT = 1000244000, + // Provided by VK_EXT_buffer_device_address + VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT = 1000244002, + // Provided by VK_EXT_tooling_info + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TOOL_PROPERTIES_EXT = 1000245000, + // Provided by VK_EXT_validation_features + VK_STRUCTURE_TYPE_VALIDATION_FEATURES_EXT = 1000247000, + // Provided by VK_NV_cooperative_matrix + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_FEATURES_NV = 1000249000, + // Provided by VK_NV_cooperative_matrix + VK_STRUCTURE_TYPE_COOPERATIVE_MATRIX_PROPERTIES_NV = 1000249001, + // Provided by VK_NV_cooperative_matrix + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COOPERATIVE_MATRIX_PROPERTIES_NV = 1000249002, + // Provided by VK_NV_coverage_reduction_mode + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_COVERAGE_REDUCTION_MODE_FEATURES_NV = 1000250000, + // Provided by VK_NV_coverage_reduction_mode + VK_STRUCTURE_TYPE_PIPELINE_COVERAGE_REDUCTION_STATE_CREATE_INFO_NV = 1000250001, + // Provided by VK_NV_coverage_reduction_mode + VK_STRUCTURE_TYPE_FRAMEBUFFER_MIXED_SAMPLES_COMBINATION_NV = 1000250002, + // Provided by VK_EXT_fragment_shader_interlock + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADER_INTERLOCK_FEATURES_EXT = 1000251000, + // Provided by VK_EXT_ycbcr_image_arrays + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_YCBCR_IMAGE_ARRAYS_FEATURES_EXT = 1000252000, + // Provided by VK_EXT_full_screen_exclusive + VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_INFO_EXT = 1000255000, + // Provided by VK_EXT_full_screen_exclusive + VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_FULL_SCREEN_EXCLUSIVE_EXT = 1000255002, + // Provided by VK_EXT_full_screen_exclusive with VK_KHR_win32_surface + VK_STRUCTURE_TYPE_SURFACE_FULL_SCREEN_EXCLUSIVE_WIN32_INFO_EXT = 1000255001, + // Provided by VK_EXT_headless_surface + VK_STRUCTURE_TYPE_HEADLESS_SURFACE_CREATE_INFO_EXT = 1000256000, + // Provided by VK_EXT_line_rasterization + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_FEATURES_EXT = 1000259000, + // Provided by VK_EXT_line_rasterization + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_LINE_STATE_CREATE_INFO_EXT = 1000259001, + // Provided by VK_EXT_line_rasterization + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_LINE_RASTERIZATION_PROPERTIES_EXT = 1000259002, + // Provided by VK_EXT_shader_atomic_float + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_ATOMIC_FLOAT_FEATURES_EXT = 1000260000, + // Provided by VK_EXT_index_type_uint8 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INDEX_TYPE_UINT8_FEATURES_EXT = 1000265000, + // Provided by VK_EXT_extended_dynamic_state + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTENDED_DYNAMIC_STATE_FEATURES_EXT = 1000267000, + // Provided by VK_KHR_pipeline_executable_properties + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_EXECUTABLE_PROPERTIES_FEATURES_KHR = 1000269000, + // Provided by VK_KHR_pipeline_executable_properties + VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR = 1000269001, + // Provided by VK_KHR_pipeline_executable_properties + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_PROPERTIES_KHR = 1000269002, + // Provided by VK_KHR_pipeline_executable_properties + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INFO_KHR = 1000269003, + // Provided by VK_KHR_pipeline_executable_properties + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_STATISTIC_KHR = 1000269004, + // Provided by VK_KHR_pipeline_executable_properties + VK_STRUCTURE_TYPE_PIPELINE_EXECUTABLE_INTERNAL_REPRESENTATION_KHR = 1000269005, + // Provided by VK_EXT_shader_demote_to_helper_invocation + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DEMOTE_TO_HELPER_INVOCATION_FEATURES_EXT = 1000276000, + // Provided by VK_NV_device_generated_commands + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_PROPERTIES_NV = 1000277000, + // Provided by VK_NV_device_generated_commands + VK_STRUCTURE_TYPE_GRAPHICS_SHADER_GROUP_CREATE_INFO_NV = 1000277001, + // Provided by VK_NV_device_generated_commands + VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_SHADER_GROUPS_CREATE_INFO_NV = 1000277002, + // Provided by VK_NV_device_generated_commands + VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_TOKEN_NV = 1000277003, + // Provided by VK_NV_device_generated_commands + VK_STRUCTURE_TYPE_INDIRECT_COMMANDS_LAYOUT_CREATE_INFO_NV = 1000277004, + // Provided by VK_NV_device_generated_commands + VK_STRUCTURE_TYPE_GENERATED_COMMANDS_INFO_NV = 1000277005, + // Provided by VK_NV_device_generated_commands + VK_STRUCTURE_TYPE_GENERATED_COMMANDS_MEMORY_REQUIREMENTS_INFO_NV = 1000277006, + // Provided by VK_NV_device_generated_commands + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_GENERATED_COMMANDS_FEATURES_NV = 1000277007, + // Provided by VK_EXT_texel_buffer_alignment + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_FEATURES_EXT = 1000281000, + // Provided by VK_EXT_texel_buffer_alignment + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT = 1000281001, + // Provided by VK_QCOM_render_pass_transform + VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_RENDER_PASS_TRANSFORM_INFO_QCOM = 1000282000, + // Provided by VK_QCOM_render_pass_transform + VK_STRUCTURE_TYPE_RENDER_PASS_TRANSFORM_BEGIN_INFO_QCOM = 1000282001, + // Provided by VK_EXT_device_memory_report + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DEVICE_MEMORY_REPORT_FEATURES_EXT = 1000284000, + // Provided by VK_EXT_device_memory_report + VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT = 1000284001, + // Provided by VK_EXT_device_memory_report + VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT = 1000284002, + // Provided by VK_EXT_robustness2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_FEATURES_EXT = 1000286000, + // Provided by VK_EXT_robustness2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT = 1000286001, + // Provided by VK_EXT_custom_border_color + VK_STRUCTURE_TYPE_SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT = 1000287000, + // Provided by VK_EXT_custom_border_color + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_PROPERTIES_EXT = 1000287001, + // Provided by VK_EXT_custom_border_color + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CUSTOM_BORDER_COLOR_FEATURES_EXT = 1000287002, + // Provided by VK_KHR_pipeline_library + VK_STRUCTURE_TYPE_PIPELINE_LIBRARY_CREATE_INFO_KHR = 1000290000, + // Provided by VK_EXT_private_data + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PRIVATE_DATA_FEATURES_EXT = 1000295000, + // Provided by VK_EXT_private_data + VK_STRUCTURE_TYPE_DEVICE_PRIVATE_DATA_CREATE_INFO_EXT = 1000295001, + // Provided by VK_EXT_private_data + VK_STRUCTURE_TYPE_PRIVATE_DATA_SLOT_CREATE_INFO_EXT = 1000295002, + // Provided by VK_EXT_pipeline_creation_cache_control + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PIPELINE_CREATION_CACHE_CONTROL_FEATURES_EXT = 1000297000, + // Provided by VK_NV_device_diagnostics_config + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DIAGNOSTICS_CONFIG_FEATURES_NV = 1000300000, + // Provided by VK_NV_device_diagnostics_config + VK_STRUCTURE_TYPE_DEVICE_DIAGNOSTICS_CONFIG_CREATE_INFO_NV = 1000300001, + // Provided by VK_NV_fragment_shading_rate_enums + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_PROPERTIES_NV = 1000326000, + // Provided by VK_NV_fragment_shading_rate_enums + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_SHADING_RATE_ENUMS_FEATURES_NV = 1000326001, + // Provided by VK_NV_fragment_shading_rate_enums + VK_STRUCTURE_TYPE_PIPELINE_FRAGMENT_SHADING_RATE_ENUM_STATE_CREATE_INFO_NV = 1000326002, + // Provided by VK_EXT_fragment_density_map2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_FEATURES_EXT = 1000332000, + // Provided by VK_EXT_fragment_density_map2 + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FRAGMENT_DENSITY_MAP_2_PROPERTIES_EXT = 1000332001, + // Provided by VK_QCOM_rotated_copy_commands + VK_STRUCTURE_TYPE_COPY_COMMAND_TRANSFORM_INFO_QCOM = 1000333000, + // Provided by VK_EXT_image_robustness + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_ROBUSTNESS_FEATURES_EXT = 1000335000, + // Provided by VK_KHR_copy_commands2 + VK_STRUCTURE_TYPE_COPY_BUFFER_INFO_2_KHR = 1000337000, + // Provided by VK_KHR_copy_commands2 + VK_STRUCTURE_TYPE_COPY_IMAGE_INFO_2_KHR = 1000337001, + // Provided by VK_KHR_copy_commands2 + VK_STRUCTURE_TYPE_COPY_BUFFER_TO_IMAGE_INFO_2_KHR = 1000337002, + // Provided by VK_KHR_copy_commands2 + VK_STRUCTURE_TYPE_COPY_IMAGE_TO_BUFFER_INFO_2_KHR = 1000337003, + // Provided by VK_KHR_copy_commands2 + VK_STRUCTURE_TYPE_BLIT_IMAGE_INFO_2_KHR = 1000337004, + // Provided by VK_KHR_copy_commands2 + VK_STRUCTURE_TYPE_RESOLVE_IMAGE_INFO_2_KHR = 1000337005, + // Provided by VK_KHR_copy_commands2 + VK_STRUCTURE_TYPE_BUFFER_COPY_2_KHR = 1000337006, + // Provided by VK_KHR_copy_commands2 + VK_STRUCTURE_TYPE_IMAGE_COPY_2_KHR = 1000337007, + // Provided by VK_KHR_copy_commands2 + VK_STRUCTURE_TYPE_IMAGE_BLIT_2_KHR = 1000337008, + // Provided by VK_KHR_copy_commands2 + VK_STRUCTURE_TYPE_BUFFER_IMAGE_COPY_2_KHR = 1000337009, + // Provided by VK_KHR_copy_commands2 + VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR = 1000337010, + // Provided by VK_EXT_4444_formats + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_4444_FORMATS_FEATURES_EXT = 1000340000, + // Provided by VK_EXT_directfb_surface + VK_STRUCTURE_TYPE_DIRECTFB_SURFACE_CREATE_INFO_EXT = 1000346000, + // Provided by VK_VALVE_mutable_descriptor_type + VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MUTABLE_DESCRIPTOR_TYPE_FEATURES_VALVE = 1000351000, + // Provided by VK_VALVE_mutable_descriptor_type + VK_STRUCTURE_TYPE_MUTABLE_DESCRIPTOR_TYPE_CREATE_INFO_VALVE = 1000351002, + VK_STRUCTURE_TYPE_MAX_ENUM = 0x7FFFFFFF, +} diff --git a/vulkan-sys/src/enums/subpasscontents.rs b/vulkan-sys/src/enums/subpasscontents.rs new file mode 100644 index 0000000..f7f5a61 --- /dev/null +++ b/vulkan-sys/src/enums/subpasscontents.rs @@ -0,0 +1,8 @@ +pub use VkSubpassContents::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkSubpassContents { + VK_SUBPASS_CONTENTS_INLINE = 0, + VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS = 1, +} diff --git a/vulkan-sys/src/enums/subpassdescriptionflags.rs b/vulkan-sys/src/enums/subpassdescriptionflags.rs new file mode 100644 index 0000000..6bd0535 --- /dev/null +++ b/vulkan-sys/src/enums/subpassdescriptionflags.rs @@ -0,0 +1,13 @@ +pub use VkSubpassDescriptionFlags::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkSubpassDescriptionFlags { + VK_SUBPASS_DESCRIPTION_PER_VIEW_ATTRIBUTES_BIT_NVX = 0x0000_0001, + VK_SUBPASS_DESCRIPTION_PER_VIEW_POSITION_X_ONLY_BIT_NVX = 0x0000_0002, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkSubpassDescriptionFlagBits(u32); +SetupVkFlags!(VkSubpassDescriptionFlags, VkSubpassDescriptionFlagBits); diff --git a/vulkan-sys/src/enums/surfacetransformflagskhr.rs b/vulkan-sys/src/enums/surfacetransformflagskhr.rs new file mode 100644 index 0000000..44abc2a --- /dev/null +++ b/vulkan-sys/src/enums/surfacetransformflagskhr.rs @@ -0,0 +1,26 @@ +pub use VkSurfaceTransformFlagsKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkSurfaceTransformFlagsKHR { + VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR = 0x00000001, + VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR = 0x00000002, + VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR = 0x00000004, + VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR = 0x00000008, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_BIT_KHR = 0x00000010, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_90_BIT_KHR = 0x00000020, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_180_BIT_KHR = 0x00000040, + VK_SURFACE_TRANSFORM_HORIZONTAL_MIRROR_ROTATE_270_BIT_KHR = 0x00000080, + VK_SURFACE_TRANSFORM_INHERIT_BIT_KHR = 0x00000100, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkSurfaceTransformFlagBitsKHR(u32); +SetupVkFlags!(VkSurfaceTransformFlagsKHR, VkSurfaceTransformFlagBitsKHR); + +impl Default for VkSurfaceTransformFlagBitsKHR { + fn default() -> Self { + VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR.into() + } +} diff --git a/vulkan-sys/src/enums/swapchaincreateflagskhr.rs b/vulkan-sys/src/enums/swapchaincreateflagskhr.rs new file mode 100644 index 0000000..8b72c7c --- /dev/null +++ b/vulkan-sys/src/enums/swapchaincreateflagskhr.rs @@ -0,0 +1,14 @@ +pub use VkSwapchainCreateFlagsKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkSwapchainCreateFlagsKHR { + VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR = 0x0000_0001, + VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR = 0x0000_0002, + VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR = 0x0000_0004, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkSwapchainCreateFlagBitsKHR(u32); +SetupVkFlags!(VkSwapchainCreateFlagsKHR, VkSwapchainCreateFlagBitsKHR); diff --git a/vulkan-sys/src/enums/systemallocationscope.rs b/vulkan-sys/src/enums/systemallocationscope.rs new file mode 100644 index 0000000..f9383aa --- /dev/null +++ b/vulkan-sys/src/enums/systemallocationscope.rs @@ -0,0 +1,11 @@ +pub use VkSystemAllocationScope::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkSystemAllocationScope { + VK_SYSTEM_ALLOCATION_SCOPE_COMMAND = 0, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT = 1, + VK_SYSTEM_ALLOCATION_SCOPE_CACHE = 2, + VK_SYSTEM_ALLOCATION_SCOPE_DEVICE = 3, + VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE = 4, +} diff --git a/vulkan-sys/src/enums/vertexinputrate.rs b/vulkan-sys/src/enums/vertexinputrate.rs new file mode 100644 index 0000000..2c30493 --- /dev/null +++ b/vulkan-sys/src/enums/vertexinputrate.rs @@ -0,0 +1,8 @@ +pub use VkVertexInputRate::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkVertexInputRate { + VK_VERTEX_INPUT_RATE_VERTEX = 0, + VK_VERTEX_INPUT_RATE_INSTANCE = 1, +} diff --git a/vulkan-sys/src/enums/waylandsurfacecreateflagskhr.rs b/vulkan-sys/src/enums/waylandsurfacecreateflagskhr.rs new file mode 100644 index 0000000..87ed59f --- /dev/null +++ b/vulkan-sys/src/enums/waylandsurfacecreateflagskhr.rs @@ -0,0 +1,15 @@ +pub use VkWaylandSurfaceCreateFlagsKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkWaylandSurfaceCreateFlagsKHR { + VK_WAYLAND_SURFACE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkWaylandSurfaceCreateFlagBitsKHR(u32); +SetupVkFlags!( + VkWaylandSurfaceCreateFlagsKHR, + VkWaylandSurfaceCreateFlagBitsKHR +); diff --git a/vulkan-sys/src/enums/win32surfacecreateflagskhr.rs b/vulkan-sys/src/enums/win32surfacecreateflagskhr.rs new file mode 100644 index 0000000..aad9dc2 --- /dev/null +++ b/vulkan-sys/src/enums/win32surfacecreateflagskhr.rs @@ -0,0 +1,15 @@ +pub use VkWin32SurfaceCreateFlagsKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkWin32SurfaceCreateFlagsKHR { + VK_WIN32_SURFACE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkWin32SurfaceCreateFlagBitsKHR(u32); +SetupVkFlags!( + VkWin32SurfaceCreateFlagsKHR, + VkWin32SurfaceCreateFlagBitsKHR +); diff --git a/vulkan-sys/src/enums/xcbsurfacecreateflagskhr.rs b/vulkan-sys/src/enums/xcbsurfacecreateflagskhr.rs new file mode 100644 index 0000000..11b0583 --- /dev/null +++ b/vulkan-sys/src/enums/xcbsurfacecreateflagskhr.rs @@ -0,0 +1,12 @@ +pub use VkXcbSurfaceCreateFlagsKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkXcbSurfaceCreateFlagsKHR { + VK_XCB_SURFACE_CREATE_NULL_BIT = 0, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkXcbSurfaceCreateFlagBitsKHR(u32); +SetupVkFlags!(VkXcbSurfaceCreateFlagsKHR, VkXcbSurfaceCreateFlagBitsKHR); diff --git a/vulkan-sys/src/enums/xlibsurfacecreateflagskhr.rs b/vulkan-sys/src/enums/xlibsurfacecreateflagskhr.rs new file mode 100644 index 0000000..d44e25e --- /dev/null +++ b/vulkan-sys/src/enums/xlibsurfacecreateflagskhr.rs @@ -0,0 +1,12 @@ +pub use VkXlibSurfaceCreateFlagsKHR::*; + +#[repr(u32)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum VkXlibSurfaceCreateFlagsKHR { + VK_XLIB_SURFACE_CREATE_NULL_BIT, +} + +#[repr(C)] +#[derive(Clone, Copy, Eq, PartialEq, Hash)] +pub struct VkXlibSurfaceCreateFlagBitsKHR(u32); +SetupVkFlags!(VkXlibSurfaceCreateFlagsKHR, VkXlibSurfaceCreateFlagBitsKHR); diff --git a/vulkan-sys/src/functions/core/device.rs b/vulkan-sys/src/functions/core/device.rs new file mode 100644 index 0000000..6c589b6 --- /dev/null +++ b/vulkan-sys/src/functions/core/device.rs @@ -0,0 +1,831 @@ +use crate::prelude::*; +use library_loader::load_function_ptrs; + +use std::os::raw::c_void; + +load_function_ptrs!(DeviceFunctions, { + vkDestroyDevice(device: VkDevice, pAllocator: *const VkAllocationCallbacks) -> (), + + vkGetDeviceQueue( + device: VkDevice, + queueFamilyIndex: u32, + queueIndex: u32, + pQueue: *mut VkQueue + ) -> (), + + vkWaitForFences( + device: VkDevice, + fenceCount: u32, + pFences: *const VkFence, + waitAll: VkBool32, + timeout: u64 + ) -> VkResult, + + vkGetQueryPoolResults( + device: VkDevice, + queryPool: VkQueryPool, + firstQuery: u32, + queryCount: u32, + dataSize: usize, + pData: *mut c_void, + stride: VkDeviceSize, + flags: VkQueryResultFlagBits + ) -> VkResult, + + vkDeviceWaitIdle( + device: VkDevice + ) -> VkResult, + + // Queue + vkQueueSubmit( + queue: VkQueue, + submitCount: u32, + pSubmits: *const VkSubmitInfo, + fence: VkFence + ) -> VkResult, + + vkQueueWaitIdle(queue: VkQueue) -> VkResult, + + // buffer + vkCreateBuffer( + device: VkDevice, + pCreateInfo: *const VkBufferCreateInfo, + pAllocator: *const VkAllocationCallbacks, + pBuffer: *mut VkBuffer + ) -> VkResult, + + vkDestroyBuffer( + device: VkDevice, + buffer: VkBuffer, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + vkGetBufferMemoryRequirements( + device: VkDevice, + buffer: VkBuffer, + pMemoryRequirements: *mut VkMemoryRequirements + ) -> (), + + // memory + vkAllocateMemory( + device: VkDevice, + pAllocateInfo: *const VkMemoryAllocateInfo, + pAllocator: *const VkAllocationCallbacks, + pMemory: *mut VkDeviceMemory + ) -> VkResult, + + vkFreeMemory( + device: VkDevice, + memory: VkDeviceMemory, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + vkMapMemory( + device: VkDevice, + memory: VkDeviceMemory, + offset: VkDeviceSize, + size: VkDeviceSize, + flags: VkMemoryMapFlags, + ppData: *mut *mut c_void + ) -> VkResult, + + vkUnmapMemory(device: VkDevice, memory: VkDeviceMemory) -> (), + + vkBindBufferMemory( + device: VkDevice, + buffer: VkBuffer, + memory: VkDeviceMemory, + memoryOffset: VkDeviceSize + ) -> VkResult, + + // render pass + vkCreateRenderPass( + device: VkDevice, + pCreateInfo: *const VkRenderPassCreateInfo, + pAllocator: *const VkAllocationCallbacks, + pRenderPass: *mut VkRenderPass + ) -> VkResult, + + vkDestroyRenderPass( + device: VkDevice, + renderPass: VkRenderPass, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + // image + vkCreateImage( + device: VkDevice, + pCreateInfo: *const VkImageCreateInfo, + pAllocator: *const VkAllocationCallbacks, + pImage: *mut VkImage + ) -> VkResult, + + vkDestroyImage( + device: VkDevice, + image: VkImage, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + vkGetImageSubresourceLayout( + device: VkDevice, + image: VkImage, + pSubresource: *const VkImageSubresource, + pLayout: *mut VkSubresourceLayout + ) -> (), + + vkCreateImageView( + device: VkDevice, + pCreateInfo: *const VkImageViewCreateInfo, + pAllocator: *const VkAllocationCallbacks, + pView: *mut VkImageView + ) -> VkResult, + + vkDestroyImageView( + device: VkDevice, + imageView: VkImageView, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + vkGetImageMemoryRequirements( + device: VkDevice, + image: VkImage, + pMemoryRequirements: *mut VkMemoryRequirements + ) -> (), + + vkGetImageSparseMemoryRequirements( + device: VkDevice, + image: VkImage, + pSparseMemoryRequirementCount: *mut u32, + pSparseMemoryRequirements: *mut VkSparseImageMemoryRequirements + ) -> (), + + vkBindImageMemory( + device: VkDevice, + image: VkImage, + memory: VkDeviceMemory, + memoryOffset: VkDeviceSize + ) -> VkResult, + + vkCreateSampler( + device: VkDevice, + pCreateInfo: *const VkSamplerCreateInfo, + pAllocator: *const VkAllocationCallbacks, + pSampler: *mut VkSampler + ) -> VkResult, + + vkDestroySampler( + device: VkDevice, + sampler: VkSampler, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + // buffer view + vkCreateBufferView( + device: VkDevice, + pCreateInfo: *const VkBufferViewCreateInfo, + pAllocator: *const VkAllocationCallbacks, + pView: *mut VkBufferView + ) -> VkResult, + + vkDestroyBufferView( + device: VkDevice, + bufferView: VkBufferView, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + // fence + vkCreateFence( + device: VkDevice, + pCreateInfo: *const VkFenceCreateInfo, + pAllocator: *const VkAllocationCallbacks, + pFence: *mut VkFence + ) -> VkResult, + + vkDestroyFence( + device: VkDevice, + fence: VkFence, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + vkResetFences(device: VkDevice, fenceCount: u32, pFences: *const VkFence) -> VkResult, + + // semaphore + vkCreateSemaphore( + device: VkDevice, + pCreateInfo: *const VkSemaphoreCreateInfo, + pAllocator: *const VkAllocationCallbacks, + pSemaphore: *mut VkSemaphore + ) -> VkResult, + + vkDestroySemaphore( + device: VkDevice, + semaphore: VkSemaphore, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + // shadermodule + vkCreateShaderModule( + device: VkDevice, + pCreateInfo: *const VkShaderModuleCreateInfo, + pAllocator: *const VkAllocationCallbacks, + pShaderModule: *mut VkShaderModule + ) -> VkResult, + + vkDestroyShaderModule( + device: VkDevice, + shaderModule: VkShaderModule, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + // descriptor pool + vkCreateDescriptorPool( + device: VkDevice, + pCreateInfo: *const VkDescriptorPoolCreateInfo, + pAllocator: *const VkAllocationCallbacks, + pDescriptorPool: *mut VkDescriptorPool + ) -> VkResult, + + vkDestroyDescriptorPool( + device: VkDevice, + descriptorPool: VkDescriptorPool, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + vkResetDescriptorPool( + device: VkDevice, + descriptorPool: VkDescriptorPool, + flags: VkDescriptorPoolResetFlags + ) -> VkResult, + + // descriptor set layout + vkCreateDescriptorSetLayout( + device: VkDevice, + pCreateInfo: *const VkDescriptorSetLayoutCreateInfo, + pAllocator: *const VkAllocationCallbacks, + pSetLayout: *mut VkDescriptorSetLayout + ) -> VkResult, + + vkDestroyDescriptorSetLayout( + device: VkDevice, + descriptorSetLayout: VkDescriptorSetLayout, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + // descriptor set + vkAllocateDescriptorSets( + device: VkDevice, + pAllocateInfo: *const VkDescriptorSetAllocateInfo<'_>, + pDescriptorSets: *mut VkDescriptorSet + ) -> VkResult, + + vkFreeDescriptorSets( + device: VkDevice, + descriptorPool: VkDescriptorPool, + descriptorSetCount: u32, + pDescriptorSets: *const VkDescriptorSet + ) -> VkResult, + + vkUpdateDescriptorSets( + device: VkDevice, + descriptorWriteCount: u32, + pDescriptorWrites: *const VkWriteDescriptorSet, + descriptorCopyCount: u32, + pDescriptorCopies: *const VkCopyDescriptorSet + ) -> (), + + // event + vkCreateEvent( + device: VkDevice, + pCreateInfo: *const VkEventCreateInfo, + pAllocator: *const VkAllocationCallbacks, + pEvent: *mut VkEvent + ) -> VkResult, + + vkDestroyEvent( + device: VkDevice, + event: VkEvent, + pAllocator: *const VkAllocationCallbacks + ) ->(), + + vkGetEventStatus(device: VkDevice, event: VkEvent) -> VkResult, + + vkSetEvent(device: VkDevice, event: VkEvent) -> VkResult, + + vkResetEvent(device: VkDevice, event: VkEvent) -> VkResult, + + // command pool + vkCreateCommandPool( + device: VkDevice, + pCreateInfo: *const VkCommandPoolCreateInfo, + pAllocator: *const VkAllocationCallbacks, + pCommandPool: *mut VkCommandPool + ) -> VkResult, + + vkDestroyCommandPool( + device: VkDevice, + commandPool: VkCommandPool, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + vkResetCommandPool( + device: VkDevice, + commandPool: VkCommandPool, + flags: VkCommandPoolResetFlags + ) -> VkResult, + + vkTrimCommandPool( + device: VkDevice, + commandPool: VkCommandPool, + flags: VkCommandPoolTrimFlags + ) -> (), + + // framebuffer + vkCreateFramebuffer( + device: VkDevice, + pCreateInfo: *const VkFramebufferCreateInfo, + pAllocator: *const VkAllocationCallbacks, + pFramebuffer: *mut VkFramebuffer + ) -> VkResult, + + vkDestroyFramebuffer( + device: VkDevice, + framebuffer: VkFramebuffer, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + // command buffer + vkAllocateCommandBuffers( + device: VkDevice, + pAllocateInfo: *const VkCommandBufferAllocateInfo, + pCommandBuffers: *mut VkCommandBuffer + ) -> VkResult, + + vkFreeCommandBuffers( + device: VkDevice, + commandPool: VkCommandPool, + commandBufferCount: u32, + pCommandBuffers: *const VkCommandBuffer + ) -> (), + + vkBeginCommandBuffer( + commandBuffer: VkCommandBuffer, + pBeginInfo: *const VkCommandBufferBeginInfo + ) -> VkResult, + + vkEndCommandBuffer(commandBuffer: VkCommandBuffer) -> VkResult, + + vkResetCommandBuffer( + commandBuffer: VkCommandBuffer, + flags: VkCommandBufferResetFlagBits + ) -> VkResult, + + vkCmdBindPipeline( + commandBuffer: VkCommandBuffer, + pipelineBindPoint: VkPipelineBindPoint, + pipeline: VkPipeline + ) -> (), + + vkCmdSetViewport( + commandBuffer: VkCommandBuffer, + firstViewport: u32, + viewportCount: u32, + pViewports: *const VkViewport + ) -> (), + + vkCmdSetScissor( + commandBuffer: VkCommandBuffer, + firstScissor: u32, + scissorCount: u32, + pScissors: *const VkRect2D + ) -> (), + + vkCmdSetLineWidth(commandBuffer: VkCommandBuffer, lineWidth: f32) -> (), + + vkCmdSetDepthBias( + commandBuffer: VkCommandBuffer, + depthBiasConstantFactor: f32, + depthBiasClamp: f32, + depthBiasSlopeFactor: f32 + ) -> (), + + vkCmdSetBlendConstants(commandBuffer: VkCommandBuffer, blendConstants: *const f32) -> (), + + vkCmdSetDepthBounds( + commandBuffer: VkCommandBuffer, + minDepthBounds: f32, + maxDepthBounds: f32 + ) -> (), + + vkCmdSetStencilCompareMask( + commandBuffer: VkCommandBuffer, + faceMask: VkStencilFaceFlags, + compareMask: u32 + ) -> (), + + vkCmdSetStencilWriteMask( + commandBuffer: VkCommandBuffer, + faceMask: VkStencilFaceFlags, + writeMask: u32 + ) -> (), + + vkCmdSetStencilReference( + commandBuffer: VkCommandBuffer, + faceMask: VkStencilFaceFlags, + reference: u32 + ) -> (), + + vkCmdBindDescriptorSets( + commandBuffer: VkCommandBuffer, + pipelineBindPoint: VkPipelineBindPoint, + layout: VkPipelineLayout, + firstSet: u32, + descriptorSetCount: u32, + pDescriptorSets: *const VkDescriptorSet, + dynamicOffsetCount: u32, + pDynamicOffsets: *const u32 + ) -> (), + + vkCmdBindIndexBuffer( + commandBuffer: VkCommandBuffer, + buffer: VkBuffer, + offset: VkDeviceSize, + indexType: VkIndexType + ) -> (), + + vkCmdBindVertexBuffers( + commandBuffer: VkCommandBuffer, + firstBinding: u32, + bindingCount: u32, + pBuffers: *const VkBuffer, + pOffsets: *const VkDeviceSize + ) -> (), + + vkCmdDraw( + commandBuffer: VkCommandBuffer, + vertexCount: u32, + instanceCount: u32, + firstVertex: u32, + firstInstance: u32 + ) -> (), + + vkCmdDrawIndexed( + commandBuffer: VkCommandBuffer, + indexCount: u32, + instanceCount: u32, + firstIndex: u32, + vertexOffset: i32, + firstInstance: u32 + ) -> (), + + vkCmdDrawIndirect( + commandBuffer: VkCommandBuffer, + buffer: VkBuffer, + offset: VkDeviceSize, + drawCount: u32, + stride: u32 + ) -> (), + + vkCmdDrawIndexedIndirect( + commandBuffer: VkCommandBuffer, + buffer: VkBuffer, + offset: VkDeviceSize, + drawCount: u32, + stride: u32 + ) -> (), + + vkCmdDispatch(commandBuffer: VkCommandBuffer, x: u32, y: u32, z: u32) -> (), + + vkCmdDispatchIndirect( + commandBuffer: VkCommandBuffer, + buffer: VkBuffer, + offset: VkDeviceSize + ) -> (), + + vkCmdCopyBuffer( + commandBuffer: VkCommandBuffer, + srcBuffer: VkBuffer, + dstBuffer: VkBuffer, + regionCount: u32, + pRegions: *const VkBufferCopy + ) -> (), + + vkCmdCopyImage( + commandBuffer: VkCommandBuffer, + srcImage: VkImage, + srcImageLayout: VkImageLayout, + dstImage: VkImage, + dstImageLayout: VkImageLayout, + regionCount: u32, + pRegions: *const VkImageCopy + ) -> (), + + vkCmdBlitImage( + commandBuffer: VkCommandBuffer, + srcImage: VkImage, + srcImageLayout: VkImageLayout, + dstImage: VkImage, + dstImageLayout: VkImageLayout, + regionCount: u32, + pRegions: *const VkImageBlit, + filter: VkFilter + ) -> (), + + vkCmdCopyBufferToImage( + commandBuffer: VkCommandBuffer, + srcBuffer: VkBuffer, + dstImage: VkImage, + dstImageLayout: VkImageLayout, + regionCount: u32, + pRegions: *const VkBufferImageCopy + ) -> (), + + vkCmdCopyImageToBuffer( + commandBuffer: VkCommandBuffer, + srcImage: VkImage, + srcImageLayout: VkImageLayout, + dstBuffer: VkBuffer, + regionCount: u32, + pRegions: *const VkBufferImageCopy + ) -> (), + + vkCmdUpdateBuffer( + commandBuffer: VkCommandBuffer, + dstBuffer: VkBuffer, + dstOffset: VkDeviceSize, + dataSize: VkDeviceSize, + pData: *const u32 + ) -> (), + + vkCmdFillBuffer( + commandBuffer: VkCommandBuffer, + dstBuffer: VkBuffer, + dstOffset: VkDeviceSize, + size: VkDeviceSize, + data: u32 + ) -> (), + + vkCmdClearColorImage( + commandBuffer: VkCommandBuffer, + image: VkImage, + imageLayout: VkImageLayout, + pColor: *const VkClearColorValue, + rangeCount: u32, + pRanges: *const VkImageSubresourceRange + ) -> (), + + vkCmdClearDepthStencilImage( + commandBuffer: VkCommandBuffer, + image: VkImage, + imageLayout: VkImageLayout, + pDepthStencil: *const VkClearDepthStencilValue, + rangeCount: u32, + pRanges: *const VkImageSubresourceRange + ) -> (), + + vkCmdClearAttachments( + commandBuffer: VkCommandBuffer, + attachmentCount: u32, + pAttachments: *const VkClearAttachment, + VkRectCount: u32, + pRects: *const VkClearRect + ) -> (), + + vkCmdResolveImage( + commandBuffer: VkCommandBuffer, + srcImage: VkImage, + srcImageLayout: VkImageLayout, + dstImage: VkImage, + dstImageLayout: VkImageLayout, + regionCount: u32, + pRegions: *const VkImageResolve + ) -> (), + + vkCmdSetEvent( + commandBuffer: VkCommandBuffer, + event: VkEvent, + stageMask: VkPipelineStageFlags + ) -> (), + + vkCmdResetEvent( + commandBuffer: VkCommandBuffer, + event: VkEvent, + stageMask: VkPipelineStageFlags + ) -> (), + + vkCmdWaitEvents( + commandBuffer: VkCommandBuffer, + eventCount: u32, + pEvents: *const VkEvent, + srcStageMask: VkPipelineStageFlags, + dstStageMask: VkPipelineStageFlags, + memoryBarrierCount: u32, + pMemoryBarriers: *const VkMemoryBarrier, + bufferMemoryBarrierCount: u32, + pBufferMemoryBarriers: *const VkBufferMemoryBarrier, + imageMemoryBarrierCount: u32, + pImageMemoryBarriers: *const VkImageMemoryBarrier + ) -> (), + + vkCmdPipelineBarrier( + commandBuffer: VkCommandBuffer, + srcStageMask: VkPipelineStageFlagBits, + dstStageMask: VkPipelineStageFlagBits, + dependencyFlags: VkDependencyFlagBits, + memoryBarrierCount: u32, + pMemoryBarriers: *const VkMemoryBarrier, + bufferMemoryBarrierCount: u32, + pBufferMemoryBarriers: *const VkBufferMemoryBarrier, + imageMemoryBarrierCount: u32, + pImageMemoryBarriers: *const VkImageMemoryBarrier + ) -> (), + + vkCmdBeginQuery( + commandBuffer: VkCommandBuffer, + queryPool: VkQueryPool, + query: u32, + flags: VkQueryControlFlagBits + ) -> (), + + vkCmdEndQuery(commandBuffer: VkCommandBuffer, queryPool: VkQueryPool, query: u32) -> (), + + vkCmdResetQueryPool( + commandBuffer: VkCommandBuffer, + queryPool: VkQueryPool, + firstQuery: u32, + queryCount: u32 + ) -> (), + + vkCmdWriteTimestamp( + commandBuffer: VkCommandBuffer, + pipelineStage: VkPipelineStageFlagBits, + queryPool: VkQueryPool, + query: u32 + ) -> (), + + vkCmdCopyQueryPoolResults( + commandBuffer: VkCommandBuffer, + queryPool: VkQueryPool, + firstQuery: u32, + queryCount: u32, + dstBuffer: VkBuffer, + dstOffset: VkDeviceSize, + stride: VkDeviceSize, + flags: VkQueryResultFlags + ) -> (), + + vkCmdPushConstants( + commandBuffer: VkCommandBuffer, + layout: VkPipelineLayout, + stageFlags: VkShaderStageFlagBits, + offset: u32, + size: u32, + pValues: *const c_void + ) -> (), + + vkCmdBeginRenderPass( + commandBuffer: VkCommandBuffer, + pRenderPassBegin: *const VkRenderPassBeginInfo, + contents: VkSubpassContents + ) -> (), + + vkCmdNextSubpass(commandBuffer: VkCommandBuffer, contents: VkSubpassContents) -> (), + + vkCmdEndRenderPass(commandBuffer: VkCommandBuffer) -> (), + + vkCmdExecuteCommands( + commandBuffer: VkCommandBuffer, + commandBufferCount: u32, + pCommandBuffers: *const VkCommandBuffer + ) -> (), + + // query pool + vkCreateQueryPool( + device: VkDevice, + pCreateInfo: *const VkQueryPoolCreateInfo, + pAllocator: *const VkAllocationCallbacks, + pQueryPool: *mut VkQueryPool + ) -> VkResult, + + vkDestroyQueryPool( + device: VkDevice, + queryPool: VkQueryPool, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + // pipeline cache + vkCreatePipelineCache( + device: VkDevice, + pCreateInfo: *const VkPipelineCacheCreateInfo, + pAllocator: *const VkAllocationCallbacks, + pPipelineCache: *mut VkPipelineCache + ) -> VkResult, + + vkDestroyPipelineCache( + device: VkDevice, + pipelineCache: VkPipelineCache, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + vkGetPipelineCacheData( + device: VkDevice, + pipelineCache: VkPipelineCache, + pDataSize: *mut usize, + pData: *mut c_void + ) -> VkResult, + + vkMergePipelineCaches( + device: VkDevice, + dstCache: VkPipelineCache, + srcCacheCount: u32, + pSrcCaches: *const VkPipelineCache + ) -> VkResult, + + // pipeline layout + vkCreatePipelineLayout( + device: VkDevice, + pCreateInfo: *const VkPipelineLayoutCreateInfo, + pAllocator: *const VkAllocationCallbacks, + pPipelineLayout: *mut VkPipelineLayout + ) -> VkResult, + + vkDestroyPipelineLayout( + device: VkDevice, + pipelineLayout: VkPipelineLayout, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + // pipeline + vkCreateGraphicsPipelines( + device: VkDevice, + pipelineCache: VkPipelineCache, + createInfoCount: u32, + pCreateInfos: *const VkGraphicsPipelineCreateInfo, + pAllocator: *const VkAllocationCallbacks, + pPipelines: *mut VkPipeline + ) -> VkResult, + + vkCreateComputePipelines( + device: VkDevice, + pipelineCache: VkPipelineCache, + createInfoCount: u32, + pCreateInfos: *const VkComputePipelineCreateInfo, + pAllocator: *const VkAllocationCallbacks, + pPipelines: *mut VkPipeline + ) -> VkResult, + + vkDestroyPipeline( + device: VkDevice, + pipeline: VkPipeline, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + // device address + vkGetBufferDeviceAddress( + device: VkDevice, + pInfo: *const VkBufferDeviceAddressInfo + ) -> VkDeviceAddress, + + vkFlushMappedMemoryRanges( + device: VkDevice, + memoryRangeCount: u32, + pMemoryRanges: *const VkMappedMemoryRange + ) -> VkResult, + + vkInvalidateMappedMemoryRanges( + device: VkDevice, + memoryRangeCount: u32, + pMemoryRanges: *const VkMappedMemoryRange + ) -> VkResult, + + vkGetBufferMemoryRequirements2( + device: VkDevice, + pInfo: *const VkBufferMemoryRequirementsInfo2, + pMemoryRequirements: *mut VkMemoryRequirements2 + ) -> (), + + vkGetImageMemoryRequirements2( + device: VkDevice, + pInfo: *const VkImageMemoryRequirementsInfo2, + pMemoryRequirements: *mut VkMemoryRequirements2 + ) -> (), + + vkBindBufferMemory2( + device: VkDevice, + bindInfoCount: u32, + pBindInfos: *const VkBindBufferMemoryInfo + ) -> VkResult, + + vkBindImageMemory2( + device: VkDevice, + bindInfoCount: u32, + pBindInfos: *const VkBindImageMemoryInfo + ) -> VkResult, +}); + +impl DeviceFunctions { + pub fn new(instance_functions: &InstanceFunctions, device: VkDevice) -> Self { + Self::load(|name| unsafe { + instance_functions.vkGetDeviceProcAddr(device, name.as_ptr()) as *const std::ffi::c_void + }) + } +} diff --git a/vulkan-sys/src/functions/core/entry.rs b/vulkan-sys/src/functions/core/entry.rs new file mode 100644 index 0000000..95879d5 --- /dev/null +++ b/vulkan-sys/src/functions/core/entry.rs @@ -0,0 +1,32 @@ +use crate::prelude::*; +use library_loader::load_function_ptrs; + +use std::os::raw::c_char; + +load_function_ptrs!(EntryFunctions, { + vkCreateInstance( + pCreateInfo: *const VkInstanceCreateInfo<'_>, + pAllocator: *const VkAllocationCallbacks, + pInstance: *mut VkInstance + ) -> VkResult, + + vkEnumerateInstanceExtensionProperties( + pLayerName: *const c_char, + pPropertyCount: *mut u32, + pProperties: *mut VkExtensionProperties + ) -> VkResult, + + vkEnumerateInstanceLayerProperties( + pPropertyCount: *mut u32, + pProperties: *mut VkLayerProperties + ) -> VkResult, +}); + +impl EntryFunctions { + pub fn new(static_functions: &StaticFunctions) -> Self { + EntryFunctions::load(|name| unsafe { + static_functions.vkGetInstanceProcAddr(VkInstance::NULL_HANDLE, name.as_ptr()) + as *const std::ffi::c_void + }) + } +} diff --git a/vulkan-sys/src/functions/core/instance.rs b/vulkan-sys/src/functions/core/instance.rs new file mode 100644 index 0000000..c36dff1 --- /dev/null +++ b/vulkan-sys/src/functions/core/instance.rs @@ -0,0 +1,94 @@ +use crate::prelude::*; +use library_loader::load_function_ptrs; + +use std::os::raw::c_char; + +load_function_ptrs!(InstanceFunctions, { + vkGetDeviceProcAddr(device: VkDevice, pName: *const c_char) -> PFN_vkVoidFunction, + + vkDestroyInstance(instance: VkInstance, pAllocator: *const VkAllocationCallbacks) -> (), + + // physical device + vkEnumeratePhysicalDevices( + Instance: VkInstance, + pPhysicalDeviceCount: *mut u32, + pPhysicalDevices: *mut VkPhysicalDevice + ) -> VkResult, + + vkGetPhysicalDeviceProperties( + physicalDevice: VkPhysicalDevice, + pProperties: *mut VkPhysicalDeviceProperties + ) -> (), + + vkGetPhysicalDeviceFeatures( + physicalDevice: VkPhysicalDevice, + pFeatures: *mut VkPhysicalDeviceFeatures + ) -> (), + + vkGetPhysicalDeviceFormatProperties( + physicalDevice: VkPhysicalDevice, + format: VkFormat, + pFormatProperties: *mut VkFormatProperties + ) -> (), + + vkGetPhysicalDeviceQueueFamilyProperties( + physicalDevice: VkPhysicalDevice, + pQueueFamilyPropertyCount: *mut u32, + pQueueFamilyProperties: *mut VkQueueFamilyProperties + ) -> (), + + vkGetPhysicalDeviceMemoryProperties( + physicalDevice: VkPhysicalDevice, + pMemoryProperties: *mut VkPhysicalDeviceMemoryProperties + ) -> (), + + vkGetPhysicalDeviceSparseImageFormatProperties( + physicalDevice: VkPhysicalDevice, + format: VkFormat, + ty: VkImageType, + samples: VkSampleCountFlags, + usage: VkImageUsageFlagBits, + tiling: VkImageTiling, + pPropertyCount: *mut u32, + pProperties: *mut VkSparseImageFormatProperties + ) -> (), + + vkGetPhysicalDeviceImageFormatProperties( + physicalDevice: VkPhysicalDevice, + format: VkFormat, + imageType: VkImageType, + tiling: VkImageTiling, + usage: VkImageUsageFlagBits, + flags: VkImageCreateFlagBits, + pImageFormatProperties: *mut VkImageFormatProperties + ) -> VkResult, + + // device + vkCreateDevice( + physicalDevice: VkPhysicalDevice, + pCreateInfo: *const VkDeviceCreateInfo<'_>, + pAllocator: *const VkAllocationCallbacks, + pDevice: *mut VkDevice + ) -> VkResult, + + vkEnumerateDeviceExtensionProperties( + physicalDevice: VkPhysicalDevice, + pLayerName: *const c_char, + pPropertyCount: *mut u32, + pProperties: *mut VkExtensionProperties + ) -> VkResult, + + vkGetPhysicalDeviceMemoryProperties2( + physicalDevice: VkPhysicalDevice, + pMemoryProperties: *mut VkPhysicalDeviceMemoryProperties2 + ) -> (), +}); + +impl InstanceFunctions { + pub fn new(static_functions: &StaticFunctions, instance: VkInstance) -> InstanceFunctions { + InstanceFunctions::load(|name| unsafe { + static_functions.vkGetInstanceProcAddr(instance, name.as_ptr()) + as *const std::ffi::c_void + }) + } +} diff --git a/vulkan-sys/src/functions/core/maintenance3.rs b/vulkan-sys/src/functions/core/maintenance3.rs new file mode 100644 index 0000000..04d9aa6 --- /dev/null +++ b/vulkan-sys/src/functions/core/maintenance3.rs @@ -0,0 +1,18 @@ +use crate::prelude::*; +use library_loader::load_function_ptrs; + +load_function_ptrs!(Maintenance3Functions, { + vkGetDescriptorSetLayoutSupport( + device: VkDevice, + pCreateInfo: *const VkDescriptorSetLayoutCreateInfo, + pSupport: *mut VkDescriptorSetLayoutSupport + ) -> (), +}); + +impl Maintenance3Functions { + pub fn new(instance_functions: &InstanceFunctions, device: VkDevice) -> Self { + Self::load(|name| unsafe { + instance_functions.vkGetDeviceProcAddr(device, name.as_ptr()) as *const std::ffi::c_void + }) + } +} diff --git a/vulkan-sys/src/functions/core/mod.rs b/vulkan-sys/src/functions/core/mod.rs new file mode 100644 index 0000000..580ab68 --- /dev/null +++ b/vulkan-sys/src/functions/core/mod.rs @@ -0,0 +1,7 @@ +pub mod device; +pub mod entry; +pub mod instance; +pub mod maintenance3; +pub mod statics; + +pub mod prelude; diff --git a/vulkan-sys/src/functions/core/prelude.rs b/vulkan-sys/src/functions/core/prelude.rs new file mode 100644 index 0000000..0285873 --- /dev/null +++ b/vulkan-sys/src/functions/core/prelude.rs @@ -0,0 +1,5 @@ +pub use super::device::*; +pub use super::entry::*; +pub use super::instance::*; +pub use super::maintenance3::*; +pub use super::statics::*; diff --git a/vulkan-sys/src/functions/core/statics.rs b/vulkan-sys/src/functions/core/statics.rs new file mode 100644 index 0000000..7228323 --- /dev/null +++ b/vulkan-sys/src/functions/core/statics.rs @@ -0,0 +1,17 @@ +use crate::prelude::*; +use library_loader::load_function_ptrs_from_lib; + +use std::os::raw::c_char; + +#[cfg(target_os = "linux")] +const VULKAN_LIB: &str = "libvulkan.so"; + +#[cfg(target_os = "macos")] +const VULKAN_LIB: &str = "libMoltenVK.dylib"; + +#[cfg(target_os = "windows")] +const VULKAN_LIB: &str = "vulkan-1.dll"; + +load_function_ptrs_from_lib!(StaticFunctions, VULKAN_LIB, { + vkGetInstanceProcAddr(Instance: VkInstance, pName: *const c_char) -> PFN_vkVoidFunction, +}); diff --git a/vulkan-sys/src/functions/ext/debug_report_callback.rs b/vulkan-sys/src/functions/ext/debug_report_callback.rs new file mode 100644 index 0000000..a9e44ec --- /dev/null +++ b/vulkan-sys/src/functions/ext/debug_report_callback.rs @@ -0,0 +1,43 @@ +use crate::prelude::*; +use library_loader::load_function_ptrs; + +use std::os::raw::c_char; +use std::os::raw::c_void; + +pub type PFN_vkDebugReportCallbackEXT = extern "system" fn( + VkDebugReportFlagsEXT, + VkDebugReportObjectTypeEXT, + u64, + usize, + i32, + *const c_char, + *const c_char, + *mut c_void, +) -> VkBool32; + +load_function_ptrs!(DebugReportCallbackFunctions, { + vkCreateDebugReportCallbackEXT( + instance: VkInstance, + createInfo: *const VkDebugReportCallbackCreateInfoEXT, + allocationCallbacks: *const VkAllocationCallbacks, + debugReportCallback: *mut VkDebugReportCallbackEXT + ) -> VkResult, + + vkDestroyDebugReportCallbackEXT( + instance: VkInstance, + debugReportCallback: VkDebugReportCallbackEXT, + allocationCallbacks: *const VkAllocationCallbacks + ) -> (), +}); + +impl DebugReportCallbackFunctions { + pub fn new( + static_functions: &StaticFunctions, + instance: VkInstance, + ) -> DebugReportCallbackFunctions { + DebugReportCallbackFunctions::load(|name| unsafe { + static_functions.vkGetInstanceProcAddr(instance, name.as_ptr()) + as *const std::ffi::c_void + }) + } +} diff --git a/vulkan-sys/src/functions/ext/debug_utils_messenger.rs b/vulkan-sys/src/functions/ext/debug_utils_messenger.rs new file mode 100644 index 0000000..58dd6a1 --- /dev/null +++ b/vulkan-sys/src/functions/ext/debug_utils_messenger.rs @@ -0,0 +1,38 @@ +use crate::prelude::*; +use library_loader::load_function_ptrs; + +use std::os::raw::c_void; + +pub type PFN_vkDebugUtilsMessengerCallbackEXT = extern "system" fn( + VkDebugUtilsMessageSeverityFlagsEXT, + VkDebugUtilsMessageTypeFlagsEXT, + *const VkDebugUtilsMessengerCallbackDataEXT, + *mut c_void, +) -> VkBool32; + +load_function_ptrs!(DebugUtilsMessengerFunctions, { + vkCreateDebugUtilsMessengerEXT( + instance: VkInstance, + createInfo: *const VkDebugUtilsMessengerCreateInfoEXT, + allocationCallbacks: *const VkAllocationCallbacks, + debugUtilsMessenger: *mut VkDebugUtilsMessengerEXT + ) -> VkResult, + + vkDestroyDebugUtilsMessengerEXT( + instance: VkInstance, + debugUtilsMessenger: VkDebugUtilsMessengerEXT, + allocationCallbacks: *const VkAllocationCallbacks + ) -> (), +}); + +impl DebugUtilsMessengerFunctions { + pub fn new( + static_functions: &StaticFunctions, + instance: VkInstance, + ) -> DebugUtilsMessengerFunctions { + DebugUtilsMessengerFunctions::load(|name| unsafe { + static_functions.vkGetInstanceProcAddr(instance, name.as_ptr()) + as *const std::ffi::c_void + }) + } +} diff --git a/vulkan-sys/src/functions/ext/mod.rs b/vulkan-sys/src/functions/ext/mod.rs new file mode 100644 index 0000000..5c59037 --- /dev/null +++ b/vulkan-sys/src/functions/ext/mod.rs @@ -0,0 +1,4 @@ +pub mod debug_report_callback; +pub mod debug_utils_messenger; + +pub mod prelude; diff --git a/vulkan-sys/src/functions/ext/prelude.rs b/vulkan-sys/src/functions/ext/prelude.rs new file mode 100644 index 0000000..39a0edb --- /dev/null +++ b/vulkan-sys/src/functions/ext/prelude.rs @@ -0,0 +1,2 @@ +pub use super::debug_report_callback::*; +pub use super::debug_utils_messenger::*; diff --git a/vulkan-sys/src/functions/khr/acceleration_structure.rs b/vulkan-sys/src/functions/khr/acceleration_structure.rs new file mode 100644 index 0000000..2e8a3c5 --- /dev/null +++ b/vulkan-sys/src/functions/khr/acceleration_structure.rs @@ -0,0 +1,152 @@ +use crate::prelude::*; +use library_loader::load_function_ptrs; + +use std::os::raw::c_void; + +load_function_ptrs!(AccelerationStructureFunctions, { + vkBuildAccelerationStructuresKHR( + device: VkDevice, + deferredOperation: VkDeferredOperationKHR, + infoCount: u32, + pInfos: *const VkAccelerationStructureBuildGeometryInfoKHR, + ppBuildRangeInfos: *const *const VkAccelerationStructureBuildRangeInfoKHR + ) -> VkResult, + + vkCmdBuildAccelerationStructuresIndirectKHR( + commandBuffer: VkCommandBuffer, + infoCount: u32, + pInfo: *const VkAccelerationStructureBuildGeometryInfoKHR, + pIndirectDeviceAddresses: *const VkDeviceAddress, + pIndirectStrides: *const u32, + ppMaxPrimitiveCounts: *const *const u32 + ) -> (), + + vkCmdBuildAccelerationStructuresKHR( + commandBuffer: VkCommandBuffer, + infoCount: u32, + pInfos: *const VkAccelerationStructureBuildGeometryInfoKHR, + ppBuildRangeInfos: *const *const VkAccelerationStructureBuildRangeInfoKHR + ) -> (), + + vkCmdCopyAccelerationStructureKHR( + commandBuffer: VkCommandBuffer, + pInfo: *const VkCopyAccelerationStructureInfoKHR + ) -> (), + + vkCmdCopyAccelerationStructureToMemoryKHR( + commandBuffer: VkCommandBuffer, + pInfo: *const VkCopyAccelerationStructureToMemoryInfoKHR + ) -> (), + + vkCmdCopyMemoryToAccelerationStructureKHR( + commandBuffer: VkCommandBuffer, + pInfo: *const VkCopyMemoryToAccelerationStructureInfoKHR + ) -> (), + + vkCmdWriteAccelerationStructuresPropertiesKHR( + commandBuffer: VkCommandBuffer, + accelerationStructureCount: u32, + pAccelerationStructures: *const VkAccelerationStructureKHR, + queryType: VkQueryType, + queryPool: VkQueryPool, + firstQuery: u32 + ) -> (), + + vkCopyAccelerationStructureKHR( + device: VkDevice, + deferredOperation: VkDeferredOperationKHR, + pInfo: *const VkCopyAccelerationStructureInfoKHR + ) -> VkResult, + + vkCopyAccelerationStructureToMemoryKHR( + device: VkDevice, + deferredOperation: VkDeferredOperationKHR, + pInfo: *const VkCopyAccelerationStructureToMemoryInfoKHR + ) -> VkResult, + + vkCopyMemoryToAccelerationStructureKHR( + device: VkDevice, + deferredOperation: VkDeferredOperationKHR, + pInfo: *const VkCopyMemoryToAccelerationStructureInfoKHR + ) -> VkResult, + + vkCreateAccelerationStructureKHR( + device: VkDevice, + pCreateInfo: *const VkAccelerationStructureCreateInfoKHR, + pAllocator: *const VkAllocationCallbacks, + pAccelerationStructure: *mut VkAccelerationStructureKHR + ) -> VkResult, + + vkDestroyAccelerationStructureKHR( + device: VkDevice, + accelerationStructure: VkAccelerationStructureKHR, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + vkGetAccelerationStructureBuildSizesKHR( + device: VkDevice, + buildType: VkAccelerationStructureBuildTypeKHR, + pBuildInfo: *const VkAccelerationStructureBuildGeometryInfoKHR, + pMaxPrimitiveCounts: *const u32, + pSizeInfo: *mut VkAccelerationStructureBuildSizesInfoKHR + ) -> (), + + vkGetAccelerationStructureDeviceAddressKHR( + device: VkDevice, + pInfo: *const VkAccelerationStructureDeviceAddressInfoKHR + ) -> VkDeviceAddress, + + vkGetDeviceAccelerationStructureCompatibilityKHR( + device: VkDevice, + pVersionInfo: *const VkAccelerationStructureVersionInfoKHR, + pCompatibility: *mut VkAccelerationStructureCompatibilityKHR + ) -> (), + + vkWriteAccelerationStructuresPropertiesKHR( + device: VkDevice, + accelerationStructureCount: u32, + pAccelerationStructures: *const VkAccelerationStructureKHR, + queryType: VkQueryType, + dataSize: isize, + pData: *mut c_void, + stride: isize + ) -> VkResult, +}); + +impl AccelerationStructureFunctions { + pub fn new(instance_functions: &InstanceFunctions, device: VkDevice) -> Self { + Self::load(|name| unsafe { + instance_functions.vkGetDeviceProcAddr(device, name.as_ptr()) as *const std::ffi::c_void + }) + } +} + +// vkBindAccelerationStructureMemoryKHR( +// device: VkDevice, +// bindInfoCount: u32, +// pBindInfos: *const VkBindAccelerationStructureMemoryInfoKHR +// ) -> VkResult, + +// vkCreateRayTracingPipelinesKHR( +// device: VkDevice, +// pipelineCache: VkPipelineCache, +// createInfoCount: u32, +// pCreateInfos: *const VkRayTracingPipelineCreateInfoKHR, +// pAllocator: *const VkAllocationCallbacks, +// pPipelines: *mut VkPipeline +// ) -> VkResult, + +// vkGetAccelerationStructureMemoryRequirementsKHR( +// device: VkDevice, +// pInfo: *const VkAccelerationStructureMemoryRequirementsInfoKHR, +// pMemoryRequirements: *mut VkMemoryRequirements2 +// ) -> (), + +// vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( +// device: VkDevice, +// pipeline: VkPipeline, +// firstGroup: u32, +// groupCount: u32, +// dataSize: isize, +// pData: *mut c_void +// ) -> VkResult, diff --git a/vulkan-sys/src/functions/khr/deferred_operations.rs b/vulkan-sys/src/functions/khr/deferred_operations.rs new file mode 100644 index 0000000..7cd8f20 --- /dev/null +++ b/vulkan-sys/src/functions/khr/deferred_operations.rs @@ -0,0 +1,39 @@ +use crate::prelude::*; +use library_loader::load_function_ptrs; + +load_function_ptrs!(DeferredOperationsFunctions, { + vkCreateDeferredOperationKHR( + device: VkDevice, + pAllocator: *const VkAllocationCallbacks, + pDeferredOperation: *mut VkDeferredOperationKHR + ) -> VkResult, + + vkDestroyDeferredOperationKHR( + device: VkDevice, + deferredOperation: VkDeferredOperationKHR, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + vkGetDeferredOperationMaxConcurrencyKHR( + device: VkDevice, + deferredOperation: VkDeferredOperationKHR + ) -> u32, + + vkGetDeferredOperationResultKHR( + device: VkDevice, + deferredOperation: VkDeferredOperationKHR + ) -> VkResult, + + vkDeferredOperationJoinKHR( + device: VkDevice, + deferredOperation: VkDeferredOperationKHR + ) -> VkResult, +}); + +impl DeferredOperationsFunctions { + pub fn new(instance_functions: &InstanceFunctions, device: VkDevice) -> Self { + Self::load(|name| unsafe { + instance_functions.vkGetDeviceProcAddr(device, name.as_ptr()) as *const std::ffi::c_void + }) + } +} diff --git a/vulkan-sys/src/functions/khr/device_wsi.rs b/vulkan-sys/src/functions/khr/device_wsi.rs new file mode 100644 index 0000000..f7226f4 --- /dev/null +++ b/vulkan-sys/src/functions/khr/device_wsi.rs @@ -0,0 +1,43 @@ +use crate::prelude::*; +use library_loader::load_function_ptrs; + +load_function_ptrs!(DeviceWSIFunctions, { + vkQueuePresentKHR(queue: VkQueue, pPresentInfo: *const VkPresentInfoKHR) -> VkResult, + + vkCreateSwapchainKHR( + device: VkDevice, + pCreateInfo: *const VkSwapchainCreateInfoKHR, + pAllocator: *const VkAllocationCallbacks, + pSwapchain: *mut VkSwapchainKHR + ) -> VkResult, + + vkDestroySwapchainKHR( + device: VkDevice, + swapchain: VkSwapchainKHR, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + vkGetSwapchainImagesKHR( + device: VkDevice, + swapchain: VkSwapchainKHR, + pSwapchainImageCount: *mut u32, + pSwapchainImages: *mut VkImage + ) -> VkResult, + + vkAcquireNextImageKHR( + device: VkDevice, + swapchain: VkSwapchainKHR, + timeout: u64, + semaphore: VkSemaphore, + fence: VkFence, + pImageIndex: *mut u32 + ) -> VkResult, +}); + +impl DeviceWSIFunctions { + pub fn new(instance_functions: &InstanceFunctions, device: VkDevice) -> Self { + Self::load(|name| unsafe { + instance_functions.vkGetDeviceProcAddr(device, name.as_ptr()) as *const std::ffi::c_void + }) + } +} diff --git a/vulkan-sys/src/functions/khr/instance_wsi.rs b/vulkan-sys/src/functions/khr/instance_wsi.rs new file mode 100644 index 0000000..4e00c6c --- /dev/null +++ b/vulkan-sys/src/functions/khr/instance_wsi.rs @@ -0,0 +1,176 @@ +use std::ffi::c_void; + +use crate::prelude::*; +use library_loader::load_function_ptrs; + +load_function_ptrs!(InstanceWSIFunctions, { + vkGetPhysicalDeviceSurfaceSupportKHR( + physicalDevice: VkPhysicalDevice, + queueFamilyIndex: u32, + surface: VkSurfaceKHR, + pSupported: *mut VkBool32 + ) -> VkResult, + + vkGetPhysicalDeviceSurfaceCapabilitiesKHR( + physicalDevice: VkPhysicalDevice, + surface: VkSurfaceKHR, + pSurfaceCapabilities: *mut VkSurfaceCapabilitiesKHR + ) -> VkResult, + + vkGetPhysicalDeviceSurfaceFormatsKHR( + physicalDevice: VkPhysicalDevice, + surface: VkSurfaceKHR, + pSurfaceFormatCount: *mut u32, + pSurfaceFormats: *mut VkSurfaceFormatKHR + ) -> VkResult, + + vkGetPhysicalDeviceSurfacePresentModesKHR( + physicalDevice: VkPhysicalDevice, + surface: VkSurfaceKHR, + pPresentModeCount: *mut u32, + pPresentModes: *mut VkPresentModeKHR + ) -> VkResult, + + vkDestroySurfaceKHR( + Instance: VkInstance, + surface: VkSurfaceKHR, + pAllocator: *const VkAllocationCallbacks + ) -> (), + + vkCreateXlibSurfaceKHR( + Instance: VkInstance, + pCreateInfo: *const VkXlibSurfaceCreateInfoKHR, + pAllocator: *const VkAllocationCallbacks, + pSurface: *mut VkSurfaceKHR + ) -> VkResult, + + vkCreateXcbSurfaceKHR( + Instance: VkInstance, + pCreateInfo: *const VkXcbSurfaceCreateInfoKHR, + pAllocator: *const VkAllocationCallbacks, + pSurface: *mut VkSurfaceKHR + ) -> VkResult, + + vkCreateWaylandSurfaceKHR( + Instance: VkInstance, + pCreateInfo: *const VkWaylandSurfaceCreateInfoKHR, + pAllocator: *const VkAllocationCallbacks, + pSurface: *mut VkSurfaceKHR + ) -> VkResult, + + // vkCreateMirSurfaceKHR( + // Instance: VkInstance, + // pCreateInfo: *const VkMirSurfaceCreateInfoKHR, + // pAllocator: *const VkAllocationCallbacks, + // pSurface: *mut VkSurfaceKHR + // ) -> VkResult, + + // vkCreateAndroidSurfaceKHR( + // Instance: VkInstance, + // pCreateInfo: *const VkAndroidSurfaceCreateInfoKHR, + // pAllocator: *const VkAllocationCallbacks, + // pSurface: *mut VkSurfaceKHR + // ) -> VkResult, + + vkCreateWin32SurfaceKHR( + Instance: VkInstance, + pCreateInfo: *const VkWin32SurfaceCreateInfoKHR, + pAllocator: *const VkAllocationCallbacks, + pSurface: *mut VkSurfaceKHR + ) -> VkResult, + + vkCreateMacOSSurfaceMVK( + Instance: VkInstance, + pCreateInfo: *const VkMacOSSurfaceCreateInfoMVK, + pAllocator: *const VkAllocationCallbacks, + pSurface: *mut VkSurfaceKHR + ) -> VkResult, + + vkGetPhysicalDeviceXlibPresentationSupportKHR( + physicalDevice: VkPhysicalDevice, + queueFamilyIndex: u32, + dpy: *mut c_void, + visualID: u32 + ) -> VkBool32, + + vkGetPhysicalDeviceXcbPresentationSupportKHR( + physicalDevice: VkPhysicalDevice, + queueFamilyIndex: u32, + connection: *mut c_void, + visual_id: u32 + ) -> VkBool32, + + vkGetPhysicalDeviceWaylandPresentationSupportKHR( + physicalDevice: VkPhysicalDevice, + queueFamilyIndex: u32, + display: *mut c_void + ) -> VkBool32, + + vkGetPhysicalDeviceMirPresentationSupportKHR( + physicalDevice: VkPhysicalDevice, + queueFamilyIndex: u32, + connection: *mut c_void + ) -> VkBool32, + + vkGetPhysicalDeviceWin32PresentationSupportKHR( + physicalDevice: VkPhysicalDevice, + queueFamilyIndex: u32 + ) -> VkBool32, + + vkGetPhysicalDeviceDisplayPropertiesKHR( + physicalDevice: VkPhysicalDevice, + pPropertyCount: *mut u32, + pProperties: *mut VkDisplayPropertiesKHR + ) -> VkResult, + + vkGetPhysicalDeviceDisplayPlanePropertiesKHR( + physicalDevice: VkPhysicalDevice, + pPropertyCount: *mut u32, + pProperties: *mut VkDisplayPlanePropertiesKHR + ) -> VkResult, + + vkGetDisplayPlaneSupportedDisplaysKHR( + physicalDevice: VkPhysicalDevice, + planeIndex: u32, + pDisplayCount: *mut u32, + pDisplays: *mut VkDisplayKHR + ) -> VkResult, + + vkGetDisplayModePropertiesKHR( + physicalDevice: VkPhysicalDevice, + display: VkDisplayKHR, + pPropertyCount: *mut u32, + pProperties: *mut VkDisplayModePropertiesKHR + ) -> VkResult, + + vkCreateDisplayModeKHR( + physicalDevice: VkPhysicalDevice, + display: VkDisplayKHR, + pCreateInfo: *const VkDisplayModeCreateInfoKHR, + pAllocator: *const VkAllocationCallbacks, + pMode: *mut VkDisplayModeKHR + ) -> VkResult, + + vkGetDisplayPlaneCapabilitiesKHR( + physicalDevice: VkPhysicalDevice, + mode: VkDisplayModeKHR, + planeIndex: u32, + pCapabilities: *mut VkDisplayPlaneCapabilitiesKHR + ) -> VkResult, + + vkCreateDisplayPlaneSurfaceKHR( + Instance: VkInstance, + pCreateInfo: *const VkDisplaySurfaceCreateInfoKHR, + pAllocator: *const VkAllocationCallbacks, + pSurface: *mut VkSurfaceKHR + ) -> VkResult, +}); + +impl InstanceWSIFunctions { + pub fn new(static_functions: &StaticFunctions, instance: VkInstance) -> InstanceWSIFunctions { + InstanceWSIFunctions::load(|name| unsafe { + static_functions.vkGetInstanceProcAddr(instance, name.as_ptr()) + as *const std::ffi::c_void + }) + } +} diff --git a/vulkan-sys/src/functions/khr/mod.rs b/vulkan-sys/src/functions/khr/mod.rs new file mode 100644 index 0000000..8d2ae64 --- /dev/null +++ b/vulkan-sys/src/functions/khr/mod.rs @@ -0,0 +1,8 @@ +pub mod acceleration_structure; +pub mod deferred_operations; +pub mod device_wsi; +pub mod instance_wsi; +pub mod physical_device_properties2; +pub mod ray_tracing_pipeline; + +pub mod prelude; diff --git a/vulkan-sys/src/functions/khr/physical_device_properties2.rs b/vulkan-sys/src/functions/khr/physical_device_properties2.rs new file mode 100644 index 0000000..dadfba0 --- /dev/null +++ b/vulkan-sys/src/functions/khr/physical_device_properties2.rs @@ -0,0 +1,55 @@ +use crate::prelude::*; +use library_loader::load_function_ptrs; + +load_function_ptrs!(PhysicalDeviceProperties2Functions, { + vkGetPhysicalDeviceProperties2KHR( + physicalDevice: VkPhysicalDevice, + pProperties: *mut VkPhysicalDeviceProperties2KHR + ) -> (), + + vkGetPhysicalDeviceFeatures2KHR( + physicalDevice: VkPhysicalDevice, + pFeatures: *mut VkPhysicalDeviceFeatures2KHR + ) -> (), + + vkGetPhysicalDeviceFormatProperties2KHR( + physicalDevice: VkPhysicalDevice, + pFormatProperties: *mut VkFormatProperties2KHR<'_> + ) -> (), + + vkGetPhysicalDeviceImageFormatProperties2KHR( + physicalDevice: VkPhysicalDevice, + pImageFormatInfo: *const VkPhysicalDeviceImageFormatInfo2KHR, + pImageFormatProperties: *mut VkImageFormatProperties2KHR<'_> + ) -> VkResult, + + vkGetPhysicalDeviceQueueFamilyProperties2KHR( + physicalDevice: VkPhysicalDevice, + pQueueFamilyPropertiesCount: *mut u32, + pQueueFamilyProperties: *mut VkQueueFamilyProperties2KHR + ) -> (), + + vkGetPhysicalDeviceMemoryProperties2KHR( + physicalDevice: VkPhysicalDevice, + pMemoryProperties: *mut VkPhysicalDeviceMemoryProperties2KHR + ) -> (), + + vkGetPhysicalDeviceSparseImageFormatProperties2KHR( + physicalDevice: VkPhysicalDevice, + pFormatInfo: *const VkPhysicalDeviceSparseImageFormatInfo2KHR, + pPropertyCount: *mut u32, + pProperties: *mut VkSparseImageFormatProperties2KHR + ) -> (), +}); + +impl PhysicalDeviceProperties2Functions { + pub fn new( + static_functions: &StaticFunctions, + instance: VkInstance, + ) -> PhysicalDeviceProperties2Functions { + PhysicalDeviceProperties2Functions::load(|name| unsafe { + static_functions.vkGetInstanceProcAddr(instance, name.as_ptr()) + as *const std::ffi::c_void + }) + } +} diff --git a/vulkan-sys/src/functions/khr/prelude.rs b/vulkan-sys/src/functions/khr/prelude.rs new file mode 100644 index 0000000..5a0ba10 --- /dev/null +++ b/vulkan-sys/src/functions/khr/prelude.rs @@ -0,0 +1,6 @@ +pub use super::acceleration_structure::*; +pub use super::deferred_operations::*; +pub use super::device_wsi::*; +pub use super::instance_wsi::*; +pub use super::physical_device_properties2::*; +pub use super::ray_tracing_pipeline::*; diff --git a/vulkan-sys/src/functions/khr/ray_tracing_pipeline.rs b/vulkan-sys/src/functions/khr/ray_tracing_pipeline.rs new file mode 100644 index 0000000..295c617 --- /dev/null +++ b/vulkan-sys/src/functions/khr/ray_tracing_pipeline.rs @@ -0,0 +1,74 @@ +use crate::prelude::*; +use library_loader::load_function_ptrs; + +use std::os::raw::c_void; + +load_function_ptrs!(RayTracingPipelineFunctions, { + vkCmdSetRayTracingPipelineStackSizeKHR( + command_buffer: VkCommandBuffer, + pipelineStackSize: u32 + ) -> (), + + vkCmdTraceRaysIndirectKHR( + commandBuffer: VkCommandBuffer, + pRaygenShaderBindingTable: *const VkStridedDeviceAddressRegionKHR, + pMissShaderBindingTable: *const VkStridedDeviceAddressRegionKHR, + pHitShaderBindingTable: *const VkStridedDeviceAddressRegionKHR, + pCallableShaderBindingTable: *const VkStridedDeviceAddressRegionKHR, + indirectDeviceAddress: VkDeviceAddress + ) -> (), + + vkCmdTraceRaysKHR( + commandBuffer: VkCommandBuffer, + pRaygenShaderBindingTable: *const VkStridedDeviceAddressRegionKHR, + pMissShaderBindingTable: *const VkStridedDeviceAddressRegionKHR, + pHitShaderBindingTable: *const VkStridedDeviceAddressRegionKHR, + pCallableShaderBindingTable: *const VkStridedDeviceAddressRegionKHR, + width: u32, + height: u32, + depth: u32 + ) -> (), + + vkCreateRayTracingPipelinesKHR( + device: VkDevice, + deferredOperation: VkDeferredOperationKHR, + pipelineCache: VkPipelineCache, + createInfoCount: u32, + pCreateInfos: *const VkRayTracingPipelineCreateInfoKHR, + pAllocator: *const VkAllocationCallbacks, + pPipelines: *mut VkPipeline + ) -> VkResult, + + vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( + device: VkDevice, + pipeline: VkPipeline, + firstGroup: u32, + groupCount: u32, + dataSize: isize, + pData: *mut c_void + ) -> VkResult, + + vkGetRayTracingShaderGroupHandlesKHR( + device: VkDevice, + pipeline: VkPipeline, + firstGroup: u32, + groupCount: u32, + dataSize: isize, + pData: *mut c_void + ) -> VkResult, + + vkGetRayTracingShaderGroupStackSizeKHR( + device: VkDevice, + pipeline: VkPipeline, + group: u32, + groupShader: VkShaderGroupShaderKHR + ) -> VkDeviceSize, +}); + +impl RayTracingPipelineFunctions { + pub fn new(instance_functions: &InstanceFunctions, device: VkDevice) -> Self { + Self::load(|name| unsafe { + instance_functions.vkGetDeviceProcAddr(device, name.as_ptr()) as *const std::ffi::c_void + }) + } +} diff --git a/vulkan-sys/src/functions/mod.rs b/vulkan-sys/src/functions/mod.rs new file mode 100644 index 0000000..510c170 --- /dev/null +++ b/vulkan-sys/src/functions/mod.rs @@ -0,0 +1,5 @@ +pub mod core; +pub mod ext; +pub mod khr; + +pub mod prelude; diff --git a/vulkan-sys/src/functions/prelude.rs b/vulkan-sys/src/functions/prelude.rs new file mode 100644 index 0000000..2a4258b --- /dev/null +++ b/vulkan-sys/src/functions/prelude.rs @@ -0,0 +1,3 @@ +pub use super::core::prelude::*; +pub use super::ext::prelude::*; +pub use super::khr::prelude::*; diff --git a/vulkan-sys/src/lib.rs b/vulkan-sys/src/lib.rs new file mode 100644 index 0000000..f9369f4 --- /dev/null +++ b/vulkan-sys/src/lib.rs @@ -0,0 +1,30 @@ +#![allow(non_upper_case_globals)] +#![allow(non_snake_case)] +#![allow(non_camel_case_types)] +#![deny(rust_2018_idioms)] + +pub mod prelude; + +pub mod custom; +pub mod enums; +pub mod structs; +pub mod types; + +pub mod functions; + +pub fn VK_MAKE_VERSION(major: u32, minor: u32, patch: u32) -> u32 { + (major as u32) << 22 | (minor as u32) << 12 | (patch as u32) +} + +pub fn VK_GET_VERSION(version: u32) -> (u32, u32, u32) { + let major = version >> 22; + let minor = (version >> 12) & 0x03FF; + let patch = version & 0x00CF; + + (major, minor, patch) +} + +#[test] +fn check_vk_version() { + assert_eq!((1, 2, 135), VK_GET_VERSION(VK_MAKE_VERSION(1, 2, 135))); +} diff --git a/vulkan-sys/src/prelude.rs b/vulkan-sys/src/prelude.rs new file mode 100644 index 0000000..a990366 --- /dev/null +++ b/vulkan-sys/src/prelude.rs @@ -0,0 +1,7 @@ +pub use crate::custom::prelude::*; +pub use crate::enums::prelude::*; +pub use crate::functions::prelude::*; +pub use crate::structs::prelude::*; +pub use crate::types::prelude::*; + +pub use crate::{VK_GET_VERSION, VK_MAKE_VERSION}; diff --git a/vulkan-sys/src/structs/amd/mod.rs b/vulkan-sys/src/structs/amd/mod.rs new file mode 100644 index 0000000..fac623e --- /dev/null +++ b/vulkan-sys/src/structs/amd/mod.rs @@ -0,0 +1,3 @@ +pub mod pipelinerasterizationstaterasterizationorderamd; + +pub mod prelude; diff --git a/vulkan-sys/src/structs/amd/pipelinerasterizationstaterasterizationorderamd.rs b/vulkan-sys/src/structs/amd/pipelinerasterizationstaterasterizationorderamd.rs new file mode 100644 index 0000000..aed4d49 --- /dev/null +++ b/vulkan-sys/src/structs/amd/pipelinerasterizationstaterasterizationorderamd.rs @@ -0,0 +1,21 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +pub struct VkPipelineRasterizationStateRasterizationOrderAMD { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub rasterizationOrder: VkRasterizationOrderAMD, +} + +impl VkPipelineRasterizationStateRasterizationOrderAMD { + pub fn new(rasterization_order: VkRasterizationOrderAMD) -> Self { + VkPipelineRasterizationStateRasterizationOrderAMD { + sType: VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_RASTERIZATION_ORDER_AMD, + pNext: ptr::null(), + rasterizationOrder: rasterization_order, + } + } +} diff --git a/vulkan-sys/src/structs/amd/prelude.rs b/vulkan-sys/src/structs/amd/prelude.rs new file mode 100644 index 0000000..bb1ca7e --- /dev/null +++ b/vulkan-sys/src/structs/amd/prelude.rs @@ -0,0 +1 @@ +pub use super::pipelinerasterizationstaterasterizationorderamd::*; diff --git a/vulkan-sys/src/structs/core/allocationcallback.rs b/vulkan-sys/src/structs/core/allocationcallback.rs new file mode 100644 index 0000000..e009c67 --- /dev/null +++ b/vulkan-sys/src/structs/core/allocationcallback.rs @@ -0,0 +1,39 @@ +use crate::prelude::*; + +use std::os::raw::c_void; + +pub type PFN_vkAllocationFunction = + extern "system" fn(*mut c_void, usize, usize, VkSystemAllocationScope) -> *mut c_void; + +pub type PFN_vkReallocationFunction = extern "system" fn( + *mut c_void, + *mut c_void, + usize, + usize, + VkSystemAllocationScope, +) -> *mut c_void; + +pub type PFN_vkFreeFunction = extern "system" fn(*mut c_void, *mut c_void); +pub type PFN_vkInternalAllocationNotification = extern "system" fn( + *mut c_void, + usize, + VkInternalAllocationType, + VkSystemAllocationScope, +) -> *mut c_void; + +pub type PFN_vkInternalFreeNotification = extern "system" fn( + *mut c_void, + usize, + VkInternalAllocationType, + VkSystemAllocationScope, +) -> *mut c_void; + +#[repr(C)] +pub struct VkAllocationCallbacks { + pub pUserData: *mut c_void, + pub pfnAllocation: PFN_vkAllocationFunction, + pub pfnReallocation: PFN_vkReallocationFunction, + pub pfnFree: PFN_vkFreeFunction, + pub pfnInternalAllocation: PFN_vkInternalAllocationNotification, + pub pfnInternalFree: PFN_vkInternalFreeNotification, +} diff --git a/vulkan-sys/src/structs/core/applicationinfo.rs b/vulkan-sys/src/structs/core/applicationinfo.rs new file mode 100644 index 0000000..cef272f --- /dev/null +++ b/vulkan-sys/src/structs/core/applicationinfo.rs @@ -0,0 +1,62 @@ +use crate::prelude::*; + +use std::ffi::CStr; +use std::fmt; +use std::marker::PhantomData; +use std::os::raw::{c_char, c_void}; +use std::ptr; + +#[repr(C)] +pub struct VkApplicationInfo<'a> { + lt: PhantomData<&'a ()>, + pub sType: VkStructureType, + pub pNext: *const c_void, + pub pApplicationName: *const c_char, + pub applicationVersion: u32, + pub pEngineName: *const c_char, + pub engineVersion: u32, + pub apiVersion: u32, +} + +impl<'a> VkApplicationInfo<'a> { + pub fn new( + application_name: &'a VkString, + application_version: u32, + engine_name: &'a VkString, + engine_version: u32, + api_version: u32, + ) -> Self { + VkApplicationInfo { + lt: PhantomData, + sType: VK_STRUCTURE_TYPE_APPLICATION_INFO, + pNext: ptr::null(), + pApplicationName: application_name.as_ptr(), + applicationVersion: application_version, + pEngineName: engine_name.as_ptr(), + engineVersion: engine_version, + apiVersion: api_version, + } + } +} + +impl<'a> fmt::Debug for VkApplicationInfo<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let application_name_c = unsafe { CStr::from_ptr(self.pApplicationName) }; + let application_name = application_name_c.to_str().unwrap_or(""); + + let engine_name_c = unsafe { CStr::from_ptr(self.pEngineName) }; + let engine_name = engine_name_c.to_str().unwrap_or(""); + + write!( + f, + "{{ sType: {:?}, pNext: {:?}, pApplicationName: {}, applicationVersion: {}, pEngineName: {}, engineVersion: {}, apiVersion: {} }}", + self.sType, + self.pNext, + application_name, + self.applicationVersion, + engine_name, + self.engineVersion, + self.apiVersion + ) + } +} diff --git a/vulkan-sys/src/structs/core/attachmentdescription.rs b/vulkan-sys/src/structs/core/attachmentdescription.rs new file mode 100644 index 0000000..f43004b --- /dev/null +++ b/vulkan-sys/src/structs/core/attachmentdescription.rs @@ -0,0 +1,45 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug, Clone)] +pub struct VkAttachmentDescription { + pub flags: VkAttachmentDescriptionFlagBits, + pub format: VkFormat, + pub samples: VkSampleCountFlagBits, + pub loadOp: VkAttachmentLoadOp, + pub storeOp: VkAttachmentStoreOp, + pub stencilLoadOp: VkAttachmentLoadOp, + pub stencilStoreOp: VkAttachmentStoreOp, + pub initialLayout: VkImageLayout, + pub finalLayout: VkImageLayout, +} + +impl VkAttachmentDescription { + pub fn new( + flags: T, + format: VkFormat, + samples: U, + load_op: VkAttachmentLoadOp, + store_op: VkAttachmentStoreOp, + stencil_load_op: VkAttachmentLoadOp, + stencil_store_op: VkAttachmentStoreOp, + initial_layout: VkImageLayout, + final_layout: VkImageLayout, + ) -> Self + where + T: Into, + U: Into, + { + VkAttachmentDescription { + flags: flags.into(), + format, + samples: samples.into(), + loadOp: load_op, + storeOp: store_op, + stencilLoadOp: stencil_load_op, + stencilStoreOp: stencil_store_op, + initialLayout: initial_layout, + finalLayout: final_layout, + } + } +} diff --git a/vulkan-sys/src/structs/core/attachmentreference.rs b/vulkan-sys/src/structs/core/attachmentreference.rs new file mode 100644 index 0000000..7e53971 --- /dev/null +++ b/vulkan-sys/src/structs/core/attachmentreference.rs @@ -0,0 +1,8 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug, Clone, Copy)] +pub struct VkAttachmentReference { + pub attachment: u32, + pub layout: VkImageLayout, +} diff --git a/vulkan-sys/src/structs/core/base_in_structure.rs b/vulkan-sys/src/structs/core/base_in_structure.rs new file mode 100644 index 0000000..5daf521 --- /dev/null +++ b/vulkan-sys/src/structs/core/base_in_structure.rs @@ -0,0 +1,27 @@ +use crate::prelude::*; + +use std::os::raw::c_void; + +#[repr(C)] +pub struct VkBaseInStructure { + pub sType: VkStructureType, + pub pNext: *const c_void, +} + +impl VkBaseInStructure { + /// https://raw.githubusercontent.com/MaikKlein/ash/master/ash/src/vk.rs + pub(crate) unsafe fn ptr_chain_iter( + me: *mut Self, + ) -> impl Iterator { + let ptr = me as *mut VkBaseInStructure; + (0..).scan(ptr, |p_ptr, _| { + if p_ptr.is_null() { + return None; + } + let n_ptr = (**p_ptr).pNext as *mut VkBaseInStructure; + let old = *p_ptr; + *p_ptr = n_ptr; + Some(old) + }) + } +} diff --git a/vulkan-sys/src/structs/core/base_out_structure.rs b/vulkan-sys/src/structs/core/base_out_structure.rs new file mode 100644 index 0000000..dbee57d --- /dev/null +++ b/vulkan-sys/src/structs/core/base_out_structure.rs @@ -0,0 +1,27 @@ +use crate::prelude::*; + +use std::os::raw::c_void; + +#[repr(C)] +pub struct VkBaseOutStructure { + pub sType: VkStructureType, + pub pNext: *mut c_void, +} + +impl VkBaseOutStructure { + /// https://raw.githubusercontent.com/MaikKlein/ash/master/ash/src/vk.rs + pub(crate) unsafe fn ptr_chain_iter( + me: *mut Self, + ) -> impl Iterator { + let ptr = me as *mut VkBaseOutStructure; + (0..).scan(ptr, |p_ptr, _| { + if p_ptr.is_null() { + return None; + } + let n_ptr = (**p_ptr).pNext as *mut VkBaseOutStructure; + let old = *p_ptr; + *p_ptr = n_ptr; + Some(old) + }) + } +} diff --git a/vulkan-sys/src/structs/core/bind_buffer_memory_info.rs b/vulkan-sys/src/structs/core/bind_buffer_memory_info.rs new file mode 100644 index 0000000..913ff6e --- /dev/null +++ b/vulkan-sys/src/structs/core/bind_buffer_memory_info.rs @@ -0,0 +1,26 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkBindBufferMemoryInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub buffer: VkBuffer, + pub memory: VkDeviceMemory, + pub memoryOffset: VkDeviceSize, +} + +impl VkBindBufferMemoryInfo { + pub fn new(buffer: VkBuffer, memory: VkDeviceMemory, memory_offset: VkDeviceSize) -> Self { + VkBindBufferMemoryInfo { + sType: VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, + pNext: ptr::null(), + buffer, + memory, + memoryOffset: memory_offset, + } + } +} diff --git a/vulkan-sys/src/structs/core/bind_image_memory_info.rs b/vulkan-sys/src/structs/core/bind_image_memory_info.rs new file mode 100644 index 0000000..889cb60 --- /dev/null +++ b/vulkan-sys/src/structs/core/bind_image_memory_info.rs @@ -0,0 +1,26 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkBindImageMemoryInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub image: VkImage, + pub memory: VkDeviceMemory, + pub memoryOffset: VkDeviceSize, +} + +impl VkBindImageMemoryInfo { + pub fn new(image: VkImage, memory: VkDeviceMemory, memory_offset: VkDeviceSize) -> Self { + VkBindImageMemoryInfo { + sType: VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO, + pNext: ptr::null(), + image, + memory, + memoryOffset: memory_offset, + } + } +} diff --git a/vulkan-sys/src/structs/core/bindsparseinfo.rs b/vulkan-sys/src/structs/core/bindsparseinfo.rs new file mode 100644 index 0000000..3bcd14a --- /dev/null +++ b/vulkan-sys/src/structs/core/bindsparseinfo.rs @@ -0,0 +1,20 @@ +use crate::prelude::*; + +use std::os::raw::c_void; + +#[repr(C)] +#[derive(Debug)] +pub struct VkBindSparseInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub waitSemaphoreCount: u32, + pub pWaitSemaphores: *const VkSemaphore, + pub bufferBindCount: u32, + pub pBufferBinds: *const VkSparseBufferMemoryBindInfo, + pub imageOpaqueBindCount: u32, + pub pImageOpaqueBinds: *const VkSparseImageOpaqueMemoryBindInfo, + pub imageBindCount: u32, + pub pImageBinds: *const VkSparseImageMemoryBindInfo, + pub signalSemaphoreCount: u32, + pub pSignalSemaphores: *const VkSemaphore, +} diff --git a/vulkan-sys/src/structs/core/buffer_device_address_info.rs b/vulkan-sys/src/structs/core/buffer_device_address_info.rs new file mode 100644 index 0000000..e81277e --- /dev/null +++ b/vulkan-sys/src/structs/core/buffer_device_address_info.rs @@ -0,0 +1,24 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +pub type VkBufferDeviceAddressInfoKHR = VkBufferDeviceAddressInfo; +pub type VkBufferDeviceAddressInfoEXT = VkBufferDeviceAddressInfo; + +#[repr(C)] +pub struct VkBufferDeviceAddressInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub buffer: VkBuffer, +} + +impl VkBufferDeviceAddressInfo { + pub fn new(buffer: VkBuffer) -> Self { + VkBufferDeviceAddressInfo { + sType: VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, + pNext: ptr::null(), + buffer, + } + } +} diff --git a/vulkan-sys/src/structs/core/buffer_memory_requirements_info_2.rs b/vulkan-sys/src/structs/core/buffer_memory_requirements_info_2.rs new file mode 100644 index 0000000..8a02cf1 --- /dev/null +++ b/vulkan-sys/src/structs/core/buffer_memory_requirements_info_2.rs @@ -0,0 +1,22 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkBufferMemoryRequirementsInfo2 { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub buffer: VkBuffer, +} + +impl VkBufferMemoryRequirementsInfo2 { + pub fn new(buffer: VkBuffer) -> Self { + VkBufferMemoryRequirementsInfo2 { + sType: VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2, + pNext: ptr::null(), + buffer, + } + } +} diff --git a/vulkan-sys/src/structs/core/buffercopy.rs b/vulkan-sys/src/structs/core/buffercopy.rs new file mode 100644 index 0000000..0ac6fe9 --- /dev/null +++ b/vulkan-sys/src/structs/core/buffercopy.rs @@ -0,0 +1,9 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkBufferCopy { + pub srcOffset: VkDeviceSize, + pub dstOffset: VkDeviceSize, + pub size: VkDeviceSize, +} diff --git a/vulkan-sys/src/structs/core/buffercreateinfo.rs b/vulkan-sys/src/structs/core/buffercreateinfo.rs new file mode 100644 index 0000000..c5ca8b9 --- /dev/null +++ b/vulkan-sys/src/structs/core/buffercreateinfo.rs @@ -0,0 +1,45 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkBufferCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkBufferCreateFlagBits, + pub size: VkDeviceSize, + pub usage: VkBufferUsageFlagBits, + pub sharingMode: VkSharingMode, + pub queueFamilyIndexCount: u32, + pub pQueueFamilyIndices: *const u32, +} + +impl VkBufferCreateInfo { + pub fn new<'a, 'b: 'a, T, U>( + flags: T, + size: VkDeviceSize, + usage: U, + sharing_mode: VkSharingMode, + queue_family_indices: &'b [u32], + ) -> VkBufferCreateInfo + where + T: Into, + U: Into, + { + VkBufferCreateInfo { + sType: VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + size, + usage: usage.into(), + sharingMode: sharing_mode, + queueFamilyIndexCount: queue_family_indices.len() as u32, + pQueueFamilyIndices: queue_family_indices.as_ptr(), + } + } +} + +impl_pnext_in!(VkBufferCreateInfo, VkBufferDeviceAddressCreateInfoEXT); +impl_pnext_in!(VkBufferCreateInfo, VkExternalMemoryBufferCreateInfo); diff --git a/vulkan-sys/src/structs/core/bufferdeviceaddresscreateinfoext.rs b/vulkan-sys/src/structs/core/bufferdeviceaddresscreateinfoext.rs new file mode 100644 index 0000000..7d47af8 --- /dev/null +++ b/vulkan-sys/src/structs/core/bufferdeviceaddresscreateinfoext.rs @@ -0,0 +1,22 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkBufferDeviceAddressCreateInfoEXT { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub deviceAddress: VkDeviceSize, +} + +impl VkBufferDeviceAddressCreateInfoEXT { + pub fn new(device_address: VkDeviceSize) -> Self { + VkBufferDeviceAddressCreateInfoEXT { + sType: VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_CREATE_INFO_EXT, + pNext: ptr::null(), + deviceAddress: device_address, + } + } +} diff --git a/vulkan-sys/src/structs/core/bufferimagecopy.rs b/vulkan-sys/src/structs/core/bufferimagecopy.rs new file mode 100644 index 0000000..e8feb12 --- /dev/null +++ b/vulkan-sys/src/structs/core/bufferimagecopy.rs @@ -0,0 +1,12 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkBufferImageCopy { + pub bufferOffset: VkDeviceSize, + pub bufferRowLength: u32, + pub bufferImageHeight: u32, + pub imageSubresource: VkImageSubresourceLayers, + pub imageOffset: VkOffset3D, + pub imageExtent: VkExtent3D, +} diff --git a/vulkan-sys/src/structs/core/buffermemorybarrier.rs b/vulkan-sys/src/structs/core/buffermemorybarrier.rs new file mode 100644 index 0000000..fb499ff --- /dev/null +++ b/vulkan-sys/src/structs/core/buffermemorybarrier.rs @@ -0,0 +1,46 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkBufferMemoryBarrier { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub srcAccessMask: VkAccessFlagBits, + pub dstAccessMask: VkAccessFlagBits, + pub srcQueueFamilyIndex: u32, + pub dstQueueFamilyIndex: u32, + pub buffer: VkBuffer, + pub offset: VkDeviceSize, + pub size: VkDeviceSize, +} + +impl VkBufferMemoryBarrier { + pub fn new( + src_access_mask: S, + dst_access_mask: T, + src_queue_family_index: u32, + dst_queue_family_index: u32, + buffer: VkBuffer, + offset: VkDeviceSize, + size: VkDeviceSize, + ) -> VkBufferMemoryBarrier + where + S: Into, + T: Into, + { + VkBufferMemoryBarrier { + sType: VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, + pNext: ptr::null(), + srcAccessMask: src_access_mask.into(), + dstAccessMask: dst_access_mask.into(), + srcQueueFamilyIndex: src_queue_family_index, + dstQueueFamilyIndex: dst_queue_family_index, + buffer, + offset, + size, + } + } +} diff --git a/vulkan-sys/src/structs/core/bufferviewcreateinfo.rs b/vulkan-sys/src/structs/core/bufferviewcreateinfo.rs new file mode 100644 index 0000000..79ea131 --- /dev/null +++ b/vulkan-sys/src/structs/core/bufferviewcreateinfo.rs @@ -0,0 +1,39 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkBufferViewCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkBufferViewCreateFlagBits, + pub buffer: VkBuffer, + pub format: VkFormat, + pub offset: VkDeviceSize, + pub range: VkDeviceSize, +} + +impl VkBufferViewCreateInfo { + pub fn new( + flags: T, + buffer: VkBuffer, + format: VkFormat, + offset: VkDeviceSize, + range: VkDeviceSize, + ) -> Self + where + T: Into, + { + VkBufferViewCreateInfo { + sType: VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + buffer, + format, + offset, + range, + } + } +} diff --git a/vulkan-sys/src/structs/core/clearattachment.rs b/vulkan-sys/src/structs/core/clearattachment.rs new file mode 100644 index 0000000..1bd7bc2 --- /dev/null +++ b/vulkan-sys/src/structs/core/clearattachment.rs @@ -0,0 +1,9 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkClearAttachment { + pub aspectMask: VkImageAspectFlagBits, + pub colorAttachment: u32, + pub clearValue: VkClearValue, +} diff --git a/vulkan-sys/src/structs/core/clearcolorvalue.rs b/vulkan-sys/src/structs/core/clearcolorvalue.rs new file mode 100644 index 0000000..68118ce --- /dev/null +++ b/vulkan-sys/src/structs/core/clearcolorvalue.rs @@ -0,0 +1,35 @@ +use std::mem; + +#[repr(C)] +#[derive(Debug, Clone)] +pub struct VkClearColorValue([u32; 4]); + +impl VkClearColorValue { + #[inline] + pub fn as_float32(&self) -> &[f32; 4] { + unsafe { mem::transmute(&self.0) } + } + + #[inline] + pub fn as_int32(&self) -> &[i32; 4] { + unsafe { mem::transmute(&self.0) } + } + + #[inline] + pub fn as_uint32(&self) -> &[u32; 4] { + &self.0 + } + + #[inline] + pub fn float32(val: [f32; 4]) -> VkClearColorValue { + VkClearColorValue(unsafe { mem::transmute(val) }) + } + #[inline] + pub fn int32(val: [i32; 4]) -> VkClearColorValue { + VkClearColorValue(unsafe { mem::transmute(val) }) + } + #[inline] + pub fn uint32(val: [u32; 4]) -> VkClearColorValue { + VkClearColorValue(val) + } +} diff --git a/vulkan-sys/src/structs/core/cleardepthstencilvalue.rs b/vulkan-sys/src/structs/core/cleardepthstencilvalue.rs new file mode 100644 index 0000000..fb5efaa --- /dev/null +++ b/vulkan-sys/src/structs/core/cleardepthstencilvalue.rs @@ -0,0 +1,6 @@ +#[repr(C)] +#[derive(Debug)] +pub struct VkClearDepthStencilValue { + pub depth: f32, + pub stencil: u32, +} diff --git a/vulkan-sys/src/structs/core/clearrect.rs b/vulkan-sys/src/structs/core/clearrect.rs new file mode 100644 index 0000000..9d4601b --- /dev/null +++ b/vulkan-sys/src/structs/core/clearrect.rs @@ -0,0 +1,9 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkClearRect { + pub VkRect: VkRect2D, + pub baseArrayLayer: u32, + pub layerCount: u32, +} diff --git a/vulkan-sys/src/structs/core/clearvalue.rs b/vulkan-sys/src/structs/core/clearvalue.rs new file mode 100644 index 0000000..f2dfe2e --- /dev/null +++ b/vulkan-sys/src/structs/core/clearvalue.rs @@ -0,0 +1,28 @@ +use crate::prelude::*; + +use std::mem; + +#[repr(C)] +#[derive(Debug, Clone)] +pub struct VkClearValue(VkClearColorValue); + +impl VkClearValue { + #[inline] + pub fn as_color(&self) -> &VkClearColorValue { + &self.0 + } + #[inline] + pub fn as_depth_stencil(&self) -> &VkClearDepthStencilValue { + unsafe { mem::transmute(&self.0) } + } + + #[inline] + pub fn color(val: VkClearColorValue) -> VkClearValue { + VkClearValue(val) + } + #[inline] + pub fn depth_stencil(val: VkClearDepthStencilValue) -> VkClearValue { + let val = (val, [0u32, 0u32]); + VkClearValue(unsafe { mem::transmute(val) }) + } +} diff --git a/vulkan-sys/src/structs/core/commandbufferallocateinfo.rs b/vulkan-sys/src/structs/core/commandbufferallocateinfo.rs new file mode 100644 index 0000000..aba0f14 --- /dev/null +++ b/vulkan-sys/src/structs/core/commandbufferallocateinfo.rs @@ -0,0 +1,30 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkCommandBufferAllocateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub commandPool: VkCommandPool, + pub level: VkCommandBufferLevel, + pub commandBufferCount: u32, +} + +impl VkCommandBufferAllocateInfo { + pub fn new( + command_pool: VkCommandPool, + level: VkCommandBufferLevel, + command_buffer_count: u32, + ) -> Self { + VkCommandBufferAllocateInfo { + sType: VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, + pNext: ptr::null(), + commandPool: command_pool, + level, + commandBufferCount: command_buffer_count, + } + } +} diff --git a/vulkan-sys/src/structs/core/commandbufferbegininfo.rs b/vulkan-sys/src/structs/core/commandbufferbegininfo.rs new file mode 100644 index 0000000..7297a9e --- /dev/null +++ b/vulkan-sys/src/structs/core/commandbufferbegininfo.rs @@ -0,0 +1,34 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkCommandBufferBeginInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkCommandBufferUsageFlagBits, + pub pInheritanceInfo: *const VkCommandBufferInheritanceInfo, +} + +impl VkCommandBufferBeginInfo { + pub fn new(flags: T) -> Self + where + T: Into, + { + VkCommandBufferBeginInfo { + sType: VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, + pNext: ptr::null(), + flags: flags.into(), + pInheritanceInfo: ptr::null(), + } + } + + pub fn set_inheritance_info<'a, 'b: 'a>( + &'a mut self, + inheritance_info: &'b VkCommandBufferInheritanceInfo, + ) { + self.pInheritanceInfo = inheritance_info as *const VkCommandBufferInheritanceInfo; + } +} diff --git a/vulkan-sys/src/structs/core/commandbufferinheritanceinfo.rs b/vulkan-sys/src/structs/core/commandbufferinheritanceinfo.rs new file mode 100644 index 0000000..1d764da --- /dev/null +++ b/vulkan-sys/src/structs/core/commandbufferinheritanceinfo.rs @@ -0,0 +1,46 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkCommandBufferInheritanceInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub renderPass: VkRenderPass, + pub subpass: u32, + pub framebuffer: VkFramebuffer, + pub occlusionQueryEnable: VkBool32, + pub queryFlagBits: VkQueryControlFlagBits, + pub pipelineStatistics: VkQueryPipelineStatisticFlagBits, +} + +impl VkCommandBufferInheritanceInfo { + pub fn new(renderpass: VkRenderPass, subpass: u32, framebuffer: VkFramebuffer) -> Self { + VkCommandBufferInheritanceInfo { + sType: VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO, + pNext: ptr::null(), + renderPass: renderpass, + subpass, + framebuffer, + occlusionQueryEnable: VK_FALSE, + queryFlagBits: 0u32.into(), + pipelineStatistics: 0u32.into(), + } + } + + pub fn set_query( + &mut self, + occlusion_query_enable: bool, + query_flag: T, + pipeline_statisctics: U, + ) where + T: Into, + U: Into, + { + self.occlusionQueryEnable = occlusion_query_enable.into(); + self.queryFlagBits = query_flag.into(); + self.pipelineStatistics = pipeline_statisctics.into(); + } +} diff --git a/vulkan-sys/src/structs/core/commandpoolcreateinfo.rs b/vulkan-sys/src/structs/core/commandpoolcreateinfo.rs new file mode 100644 index 0000000..0c94467 --- /dev/null +++ b/vulkan-sys/src/structs/core/commandpoolcreateinfo.rs @@ -0,0 +1,27 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkCommandPoolCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkCommandPoolCreateFlagBits, + pub queueFamilyIndex: u32, +} + +impl VkCommandPoolCreateInfo { + pub fn new(flags: T, queue_family_index: u32) -> Self + where + T: Into, + { + VkCommandPoolCreateInfo { + sType: VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + queueFamilyIndex: queue_family_index, + } + } +} diff --git a/vulkan-sys/src/structs/core/componentmapping.rs b/vulkan-sys/src/structs/core/componentmapping.rs new file mode 100644 index 0000000..9350df5 --- /dev/null +++ b/vulkan-sys/src/structs/core/componentmapping.rs @@ -0,0 +1,21 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug, Clone)] +pub struct VkComponentMapping { + pub r: VkComponentSwizzle, + pub g: VkComponentSwizzle, + pub b: VkComponentSwizzle, + pub a: VkComponentSwizzle, +} + +impl Default for VkComponentMapping { + fn default() -> Self { + VkComponentMapping { + r: VK_COMPONENT_SWIZZLE_IDENTITY, + g: VK_COMPONENT_SWIZZLE_IDENTITY, + b: VK_COMPONENT_SWIZZLE_IDENTITY, + a: VK_COMPONENT_SWIZZLE_IDENTITY, + } + } +} diff --git a/vulkan-sys/src/structs/core/computepipelinecreateinfo.rs b/vulkan-sys/src/structs/core/computepipelinecreateinfo.rs new file mode 100644 index 0000000..0917409 --- /dev/null +++ b/vulkan-sys/src/structs/core/computepipelinecreateinfo.rs @@ -0,0 +1,42 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkComputePipelineCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkPipelineCreateFlagBits, + pub stage: VkPipelineShaderStageCreateInfo, + pub layout: VkPipelineLayout, + pub basePipelineHandle: VkPipeline, + pub basePipelineIndex: i32, +} + +impl VkComputePipelineCreateInfo { + pub fn new( + flags: T, + stage: VkPipelineShaderStageCreateInfo, + layout: VkPipelineLayout, + ) -> Self + where + T: Into, + { + VkComputePipelineCreateInfo { + sType: VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + stage, + layout, + basePipelineHandle: VkPipeline::NULL_HANDLE, + basePipelineIndex: -1, + } + } + + pub fn set_base_pipeline(&mut self, pipeline: VkPipeline, index: i32) { + self.basePipelineHandle = pipeline; + self.basePipelineIndex = index; + } +} diff --git a/vulkan-sys/src/structs/core/copydescriptorset.rs b/vulkan-sys/src/structs/core/copydescriptorset.rs new file mode 100644 index 0000000..3feef93 --- /dev/null +++ b/vulkan-sys/src/structs/core/copydescriptorset.rs @@ -0,0 +1,17 @@ +use crate::prelude::*; + +use std::os::raw::c_void; + +#[repr(C)] +#[derive(Debug)] +pub struct VkCopyDescriptorSet { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub srcSet: VkDescriptorSet, + pub srcBinding: u32, + pub srcArrayElement: u32, + pub dstSet: VkDescriptorSet, + pub dstBinding: u32, + pub dstArrayElement: u32, + pub descriptorCount: u32, +} diff --git a/vulkan-sys/src/structs/core/debugreportcallbackcreateinfoext.rs b/vulkan-sys/src/structs/core/debugreportcallbackcreateinfoext.rs new file mode 100644 index 0000000..a40cabb --- /dev/null +++ b/vulkan-sys/src/structs/core/debugreportcallbackcreateinfoext.rs @@ -0,0 +1,31 @@ +use crate::prelude::*; +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +pub struct VkDebugReportCallbackCreateInfoEXT { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkDebugReportFlagBitsEXT, + pub pfnCallback: PFN_vkDebugReportCallbackEXT, + pub pUserData: *mut c_void, +} + +impl VkDebugReportCallbackCreateInfoEXT { + pub fn new(flags: T, callback: PFN_vkDebugReportCallbackEXT) -> Self + where + T: Into, + { + VkDebugReportCallbackCreateInfoEXT { + sType: VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT, + pNext: ptr::null(), + flags: flags.into(), + pfnCallback: callback, + pUserData: ptr::null_mut(), + } + } + + pub fn set_user_data<'a, 'b: 'a, T>(&'a mut self, user_data: &'b mut T) { + self.pUserData = user_data as *mut T as *mut c_void; + } +} diff --git a/vulkan-sys/src/structs/core/debugutilmessengercallbackdataext.rs b/vulkan-sys/src/structs/core/debugutilmessengercallbackdataext.rs new file mode 100644 index 0000000..893e2be --- /dev/null +++ b/vulkan-sys/src/structs/core/debugutilmessengercallbackdataext.rs @@ -0,0 +1,61 @@ +use crate::prelude::*; + +use super::super::raw_to_slice; + +use std::os::raw::{c_char, c_void}; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkDebugUtilsMessengerCallbackDataEXT { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkDebugUtilsMessengerCallbackDataFlagBitsEXT, + pub pMessageIdName: *const c_char, + pub messageIdNumber: i32, + pub pMessage: *const c_char, + pub queueLabelCount: u32, + pub pQueueLabels: *const VkDebugUtilsLabelEXT, + pub cmdBufLabelCount: u32, + pub pCmdBufLabels: *const VkDebugUtilsLabelEXT, + pub objectCount: u32, + pub pObjects: *const VkDebugUtilsObjectNameInfoEXT, +} + +impl VkDebugUtilsMessengerCallbackDataEXT { + pub fn new<'a, 'b: 'a, 'c: 'a, 'd: 'a, 'e: 'a, T>( + flags: T, + message_id_name: &VkString, + message_id_number: i32, + message: &'b VkString, + queue_labels: &'c [VkDebugUtilsLabelEXT], + cmd_buf_labels: &'d [VkDebugUtilsLabelEXT], + objects: &'e [VkDebugUtilsObjectNameInfoEXT], + ) -> Self + where + T: Into, + { + VkDebugUtilsMessengerCallbackDataEXT { + sType: VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CALLBACK_DATA_EXT, + pNext: ptr::null(), + flags: flags.into(), + pMessageIdName: message_id_name.as_ptr(), + messageIdNumber: message_id_number, + pMessage: message.as_ptr(), + queueLabelCount: queue_labels.len() as u32, + pQueueLabels: queue_labels.as_ptr(), + cmdBufLabelCount: cmd_buf_labels.len() as u32, + pCmdBufLabels: cmd_buf_labels.as_ptr(), + objectCount: objects.len() as u32, + pObjects: objects.as_ptr(), + } + } + + pub fn objects(&self) -> &[VkDebugUtilsObjectNameInfoEXT] { + raw_to_slice(self.pObjects, self.objectCount) + } + + pub fn message(&self) -> Result { + VkString::try_from(self.pMessage) + } +} diff --git a/vulkan-sys/src/structs/core/debugutilslabelext.rs b/vulkan-sys/src/structs/core/debugutilslabelext.rs new file mode 100644 index 0000000..0b139e5 --- /dev/null +++ b/vulkan-sys/src/structs/core/debugutilslabelext.rs @@ -0,0 +1,12 @@ +use crate::prelude::*; + +use std::os::raw::{c_char, c_void}; + +#[repr(C)] +#[derive(Debug)] +pub struct VkDebugUtilsLabelEXT { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub pLabelName: *const c_char, + pub color: [f32; 4], +} diff --git a/vulkan-sys/src/structs/core/debugutilsmessengercreateinfoext.rs b/vulkan-sys/src/structs/core/debugutilsmessengercreateinfoext.rs new file mode 100644 index 0000000..02fb277 --- /dev/null +++ b/vulkan-sys/src/structs/core/debugutilsmessengercreateinfoext.rs @@ -0,0 +1,43 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +pub struct VkDebugUtilsMessengerCreateInfoEXT { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkDebugUtilsMessengerCreateFlagBitsEXT, + pub messageSeverity: VkDebugUtilsMessageSeverityFlagBitsEXT, + pub messageType: VkDebugUtilsMessageTypeFlagBitsEXT, + pub pfnUserCallback: PFN_vkDebugUtilsMessengerCallbackEXT, + pub pUserData: *mut c_void, +} + +impl VkDebugUtilsMessengerCreateInfoEXT { + pub fn new( + flags: T, + message_severity: U, + message_type: V, + user_callback: PFN_vkDebugUtilsMessengerCallbackEXT, + ) -> Self + where + T: Into, + U: Into, + V: Into, + { + VkDebugUtilsMessengerCreateInfoEXT { + sType: VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT, + pNext: ptr::null(), + flags: flags.into(), + messageSeverity: message_severity.into(), + messageType: message_type.into(), + pfnUserCallback: user_callback, + pUserData: ptr::null_mut(), + } + } + + pub fn set_user_data<'a, 'b: 'a, W>(&'a mut self, user_data: &'b mut W) { + self.pUserData = user_data as *mut W as *mut c_void; + } +} diff --git a/vulkan-sys/src/structs/core/debugutilsobjectnameinfoext.rs b/vulkan-sys/src/structs/core/debugutilsobjectnameinfoext.rs new file mode 100644 index 0000000..cfe3c88 --- /dev/null +++ b/vulkan-sys/src/structs/core/debugutilsobjectnameinfoext.rs @@ -0,0 +1,34 @@ +use crate::prelude::*; + +use std::os::raw::{c_char, c_void}; +use std::ptr; + +#[repr(C)] +#[derive(Debug, Clone)] +pub struct VkDebugUtilsObjectNameInfoEXT { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub objectType: VkObjectType, + pub objectHandle: u64, + pub pObjectName: *const c_char, +} + +impl VkDebugUtilsObjectNameInfoEXT { + pub fn new<'a, 'b: 'a>( + object_type: VkObjectType, + object_handle: u64, + object_name: &'b VkString, + ) -> Self { + VkDebugUtilsObjectNameInfoEXT { + sType: VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT, + pNext: ptr::null(), + objectType: object_type, + objectHandle: object_handle, + pObjectName: object_name.as_ptr(), + } + } + + pub fn object_name(&self) -> Result { + VkString::try_from(self.pObjectName) + } +} diff --git a/vulkan-sys/src/structs/core/descriptorbufferinfo.rs b/vulkan-sys/src/structs/core/descriptorbufferinfo.rs new file mode 100644 index 0000000..a114616 --- /dev/null +++ b/vulkan-sys/src/structs/core/descriptorbufferinfo.rs @@ -0,0 +1,9 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug, Clone)] +pub struct VkDescriptorBufferInfo { + pub buffer: VkBuffer, + pub offset: VkDeviceSize, + pub range: VkDeviceSize, +} diff --git a/vulkan-sys/src/structs/core/descriptorimageinfo.rs b/vulkan-sys/src/structs/core/descriptorimageinfo.rs new file mode 100644 index 0000000..fee4b6a --- /dev/null +++ b/vulkan-sys/src/structs/core/descriptorimageinfo.rs @@ -0,0 +1,9 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug, Clone)] +pub struct VkDescriptorImageInfo { + pub sampler: VkSampler, + pub imageView: VkImageView, + pub imageLayout: VkImageLayout, +} diff --git a/vulkan-sys/src/structs/core/descriptorpoolcreateinfo.rs b/vulkan-sys/src/structs/core/descriptorpoolcreateinfo.rs new file mode 100644 index 0000000..74ad49b --- /dev/null +++ b/vulkan-sys/src/structs/core/descriptorpoolcreateinfo.rs @@ -0,0 +1,35 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkDescriptorPoolCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkDescriptorPoolCreateFlagBits, + pub maxSets: u32, + pub poolSizeCount: u32, + pub pPoolSizes: *const VkDescriptorPoolSize, +} + +impl VkDescriptorPoolCreateInfo { + pub fn new<'a, 'b: 'a, T>( + flags: T, + max_sets: u32, + pool_sizes: &'b [VkDescriptorPoolSize], + ) -> Self + where + T: Into, + { + VkDescriptorPoolCreateInfo { + sType: VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + maxSets: max_sets, + poolSizeCount: pool_sizes.len() as u32, + pPoolSizes: pool_sizes.as_ptr(), + } + } +} diff --git a/vulkan-sys/src/structs/core/descriptorpoolsize.rs b/vulkan-sys/src/structs/core/descriptorpoolsize.rs new file mode 100644 index 0000000..19d7003 --- /dev/null +++ b/vulkan-sys/src/structs/core/descriptorpoolsize.rs @@ -0,0 +1,8 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkDescriptorPoolSize { + pub ty: VkDescriptorType, + pub descriptorCount: u32, +} diff --git a/vulkan-sys/src/structs/core/descriptorsetallocateinfo.rs b/vulkan-sys/src/structs/core/descriptorsetallocateinfo.rs new file mode 100644 index 0000000..cc3714d --- /dev/null +++ b/vulkan-sys/src/structs/core/descriptorsetallocateinfo.rs @@ -0,0 +1,37 @@ +use crate::prelude::*; + +use std::marker::PhantomData; +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkDescriptorSetAllocateInfo<'a> { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub descriptorPool: VkDescriptorPool, + pub descriptorSetCount: u32, + layouts_lt: PhantomData<&'a VkDescriptorSetLayout>, + pub pSetLayouts: *const VkDescriptorSetLayout, +} + +impl<'a> VkDescriptorSetAllocateInfo<'a> { + pub fn new( + descriptor_pool: VkDescriptorPool, + set_layouts: &'a [VkDescriptorSetLayout], + ) -> Self { + VkDescriptorSetAllocateInfo { + sType: VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, + pNext: ptr::null(), + descriptorPool: descriptor_pool, + descriptorSetCount: set_layouts.len() as u32, + layouts_lt: PhantomData, + pSetLayouts: set_layouts.as_ptr(), + } + } +} + +impl_pnext_in!( + VkDescriptorSetAllocateInfo<'_>, + VkDescriptorSetVariableDescriptorCountAllocateInfoEXT +); diff --git a/vulkan-sys/src/structs/core/descriptorsetlayoutbinding.rs b/vulkan-sys/src/structs/core/descriptorsetlayoutbinding.rs new file mode 100644 index 0000000..a2ceb6f --- /dev/null +++ b/vulkan-sys/src/structs/core/descriptorsetlayoutbinding.rs @@ -0,0 +1,54 @@ +use crate::prelude::*; + +use std::ptr; + +#[repr(C)] +#[derive(Debug, Clone)] +pub struct VkDescriptorSetLayoutBinding { + pub binding: u32, + pub descriptorType: VkDescriptorType, + pub descriptorCount: u32, + pub stageFlagBits: VkShaderStageFlagBits, + pub pImmutableSamplers: *const VkSampler, +} + +impl VkDescriptorSetLayoutBinding { + pub fn new(binding: u32, descriptor_type: VkDescriptorType, stage_flags: T) -> Self + where + T: Into, + { + VkDescriptorSetLayoutBinding { + binding, + descriptorType: descriptor_type, + descriptorCount: 1, + stageFlagBits: stage_flags.into(), + pImmutableSamplers: ptr::null(), + } + } + + pub fn new_array( + binding: u32, + count: u32, + descriptor_type: VkDescriptorType, + stage_flags: T, + ) -> Self + where + T: Into, + { + VkDescriptorSetLayoutBinding { + binding, + descriptorType: descriptor_type, + descriptorCount: count, + stageFlagBits: stage_flags.into(), + pImmutableSamplers: ptr::null(), + } + } + + pub fn set_immutable_samplers<'a, 'b: 'a>(&'a mut self, immutable_samplers: &'b [VkSampler]) { + self.pImmutableSamplers = if immutable_samplers.is_empty() { + ptr::null() + } else { + immutable_samplers.as_ptr() + }; + } +} diff --git a/vulkan-sys/src/structs/core/descriptorsetlayoutcreateinfo.rs b/vulkan-sys/src/structs/core/descriptorsetlayoutcreateinfo.rs new file mode 100644 index 0000000..53489ca --- /dev/null +++ b/vulkan-sys/src/structs/core/descriptorsetlayoutcreateinfo.rs @@ -0,0 +1,34 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkDescriptorSetLayoutCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkDescriptorSetLayoutCreateFlagBits, + pub bindingCount: u32, + pub pBindings: *const VkDescriptorSetLayoutBinding, +} + +impl VkDescriptorSetLayoutCreateInfo { + pub fn new(flags: T, bindings: &[VkDescriptorSetLayoutBinding]) -> Self + where + T: Into, + { + VkDescriptorSetLayoutCreateInfo { + sType: VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + bindingCount: bindings.len() as u32, + pBindings: bindings.as_ptr(), + } + } +} + +impl_pnext_in!( + VkDescriptorSetLayoutCreateInfo, + VkDescriptorSetLayoutBindingFlagsCreateInfoEXT +); diff --git a/vulkan-sys/src/structs/core/descriptorsetlayoutsupport.rs b/vulkan-sys/src/structs/core/descriptorsetlayoutsupport.rs new file mode 100644 index 0000000..cf7ef61 --- /dev/null +++ b/vulkan-sys/src/structs/core/descriptorsetlayoutsupport.rs @@ -0,0 +1,31 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +pub struct VkDescriptorSetLayoutSupport { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub supported: VkBool32, +} + +impl VkDescriptorSetLayoutSupport { + pub fn new(supported: impl Into) -> Self { + VkDescriptorSetLayoutSupport { + sType: VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_SUPPORT, + pNext: ptr::null(), + supported: supported.into(), + } + } +} + +impl Default for VkDescriptorSetLayoutSupport { + fn default() -> Self { + Self::new(VK_FALSE) + } +} + +impl_pnext_in!( + VkDescriptorSetLayoutSupport, + VkDescriptorSetVariableDescriptorCountLayoutSupportEXT +); diff --git a/vulkan-sys/src/structs/core/devicecreateinfo.rs b/vulkan-sys/src/structs/core/devicecreateinfo.rs new file mode 100644 index 0000000..f1df1f9 --- /dev/null +++ b/vulkan-sys/src/structs/core/devicecreateinfo.rs @@ -0,0 +1,84 @@ +use crate::prelude::*; + +use core::slice; +use std::marker::PhantomData; +use std::os::raw::{c_char, c_void}; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkDeviceCreateInfo<'a> { + lt: PhantomData<&'a ()>, + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkDeviceCreateFlagBits, + pub queueCreateInfoCount: u32, + pub pQueueCreateInfos: *const VkDeviceQueueCreateInfo, + pub enabledLayerCount: u32, + pub ppEnabledLayerNames: *const *const c_char, + pub enabledExtensionCount: u32, + pub ppEnabledExtensionNames: *const *const c_char, + pub pEnabledFeatures: *const VkPhysicalDeviceFeatures, +} + +impl<'a> VkDeviceCreateInfo<'a> { + pub fn new( + flags: T, + queue_create_info: &'a [VkDeviceQueueCreateInfo], + enabled_extension_names: &'a VkNames, + enabled_features: &'a VkPhysicalDeviceFeatures, + ) -> Self + where + T: Into, + { + VkDeviceCreateInfo { + lt: PhantomData, + sType: VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + queueCreateInfoCount: queue_create_info.len() as u32, + pQueueCreateInfos: queue_create_info.as_ptr(), + enabledLayerCount: 0, + ppEnabledLayerNames: ptr::null(), + enabledExtensionCount: enabled_extension_names.c_names().len() as u32, + ppEnabledExtensionNames: enabled_extension_names.c_names().as_ptr(), + pEnabledFeatures: enabled_features as *const _, + } + } + + pub fn extension_names(&self) -> Vec { + let mut names = Vec::new(); + let extensions: &[*const c_char] = unsafe { + slice::from_raw_parts( + self.ppEnabledExtensionNames, + self.enabledExtensionCount as usize, + ) + }; + + for extension in extensions { + names.push(VkString::try_from(*extension).unwrap()); + } + + names + } +} + +impl_pnext_in!( + VkDeviceCreateInfo<'_>, + VkPhysicalDeviceDescriptorIndexingFeaturesEXT +); + +impl_pnext_in!( + VkDeviceCreateInfo<'_>, + VkPhysicalDeviceBufferDeviceAddressFeaturesEXT +); + +impl_pnext_in!( + VkDeviceCreateInfo<'_>, + VkPhysicalDeviceAccelerationStructureFeaturesKHR +); + +impl_pnext_in!( + VkDeviceCreateInfo<'_>, + VkPhysicalDeviceRayTracingFeaturesKHR +); diff --git a/vulkan-sys/src/structs/core/devicequeuecreateinfo.rs b/vulkan-sys/src/structs/core/devicequeuecreateinfo.rs new file mode 100644 index 0000000..976855e --- /dev/null +++ b/vulkan-sys/src/structs/core/devicequeuecreateinfo.rs @@ -0,0 +1,31 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug, Clone)] +pub struct VkDeviceQueueCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkDeviceQueueCreateFlagBits, + pub queueFamilyIndex: u32, + pub queueCount: u32, + pub pQueuePriorities: *const f32, +} + +impl VkDeviceQueueCreateInfo { + pub fn new(flags: T, queue_family_index: u32, queue_priorities: &[f32]) -> Self + where + T: Into, + { + VkDeviceQueueCreateInfo { + sType: VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + queueFamilyIndex: queue_family_index, + queueCount: queue_priorities.len() as u32, + pQueuePriorities: queue_priorities.as_ptr(), + } + } +} diff --git a/vulkan-sys/src/structs/core/dispatchindirectcommand.rs b/vulkan-sys/src/structs/core/dispatchindirectcommand.rs new file mode 100644 index 0000000..71145f6 --- /dev/null +++ b/vulkan-sys/src/structs/core/dispatchindirectcommand.rs @@ -0,0 +1,7 @@ +#[repr(C)] +#[derive(Debug)] +pub struct VkDispatchIndirectCommand { + pub x: u32, + pub y: u32, + pub z: u32, +} diff --git a/vulkan-sys/src/structs/core/displayplanecapabilities.rs b/vulkan-sys/src/structs/core/displayplanecapabilities.rs new file mode 100644 index 0000000..5ce304c --- /dev/null +++ b/vulkan-sys/src/structs/core/displayplanecapabilities.rs @@ -0,0 +1,15 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkDisplayPlaneCapabilitiesKHR { + pub supportedAlpha: VkDisplayPlaneAlphaFlagBitsKHR, + pub minSrcPosition: VkOffset2D, + pub maxSrcPosition: VkOffset2D, + pub minSrcExtent: VkExtent2D, + pub maxSrcExtent: VkExtent2D, + pub minDstPosition: VkOffset2D, + pub maxDstPosition: VkOffset2D, + pub minDstExtent: VkExtent2D, + pub maxDstExtent: VkExtent2D, +} diff --git a/vulkan-sys/src/structs/core/displayplaneproperties.rs b/vulkan-sys/src/structs/core/displayplaneproperties.rs new file mode 100644 index 0000000..798475d --- /dev/null +++ b/vulkan-sys/src/structs/core/displayplaneproperties.rs @@ -0,0 +1,8 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkDisplayPlanePropertiesKHR { + pub currentDisplay: VkDisplayKHR, + pub currentStackIndex: u32, +} diff --git a/vulkan-sys/src/structs/core/displayproperties.rs b/vulkan-sys/src/structs/core/displayproperties.rs new file mode 100644 index 0000000..11acacd --- /dev/null +++ b/vulkan-sys/src/structs/core/displayproperties.rs @@ -0,0 +1,21 @@ +use crate::prelude::*; + +use std::os::raw::c_char; + +#[repr(C)] +#[derive(Debug)] +pub struct VkDisplayPropertiesKHR { + pub display: VkDisplayKHR, + pub displayName: *const c_char, + pub physicalDimensions: VkExtent2D, + pub physicalResolution: VkExtent2D, + pub supportedTransforms: VkSurfaceTransformFlagBitsKHR, + pub planeReorderPossible: VkBool32, + pub persistentContent: VkBool32, +} + +impl VkDisplayPropertiesKHR { + pub fn display_name(&self) -> Result { + VkString::try_from(self.displayName) + } +} diff --git a/vulkan-sys/src/structs/core/drawindexedindirectcommand.rs b/vulkan-sys/src/structs/core/drawindexedindirectcommand.rs new file mode 100644 index 0000000..3e455a0 --- /dev/null +++ b/vulkan-sys/src/structs/core/drawindexedindirectcommand.rs @@ -0,0 +1,9 @@ +#[repr(C)] +#[derive(Debug)] +pub struct VkDrawIndexedIndirectCommand { + pub indexCount: u32, + pub instanceCount: u32, + pub firstIndex: u32, + pub vertexOffset: i32, + pub firstInstance: u32, +} diff --git a/vulkan-sys/src/structs/core/drawindirectcommand.rs b/vulkan-sys/src/structs/core/drawindirectcommand.rs new file mode 100644 index 0000000..c18867e --- /dev/null +++ b/vulkan-sys/src/structs/core/drawindirectcommand.rs @@ -0,0 +1,8 @@ +#[repr(C)] +#[derive(Debug)] +pub struct VkDrawIndirectCommand { + pub vertexCount: u32, + pub instanceCount: u32, + pub firstVertex: u32, + pub firstInstance: u32, +} diff --git a/vulkan-sys/src/structs/core/eventcreateinfo.rs b/vulkan-sys/src/structs/core/eventcreateinfo.rs new file mode 100644 index 0000000..b06462d --- /dev/null +++ b/vulkan-sys/src/structs/core/eventcreateinfo.rs @@ -0,0 +1,25 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkEventCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkEventCreateFlagBits, +} + +impl VkEventCreateInfo { + pub fn new(flags: T) -> Self + where + T: Into, + { + VkEventCreateInfo { + sType: VK_STRUCTURE_TYPE_EVENT_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + } + } +} diff --git a/vulkan-sys/src/structs/core/extensionproperties.rs b/vulkan-sys/src/structs/core/extensionproperties.rs new file mode 100644 index 0000000..2b2cac9 --- /dev/null +++ b/vulkan-sys/src/structs/core/extensionproperties.rs @@ -0,0 +1,25 @@ +use crate::prelude::*; + +use std::os::raw::c_char; + +#[repr(C)] +#[derive(Copy, Clone)] +pub struct VkExtensionProperties { + pub extensionName: [c_char; VK_MAX_EXTENSION_NAME_SIZE as usize], + pub specVersion: u32, +} + +impl VkExtensionProperties { + pub fn extension_name(&self) -> Result { + VkString::try_from(&self.extensionName as *const c_char) + } +} + +impl Default for VkExtensionProperties { + fn default() -> Self { + VkExtensionProperties { + extensionName: [0; VK_MAX_EXTENSION_NAME_SIZE as usize], + specVersion: 0, + } + } +} diff --git a/vulkan-sys/src/structs/core/extent2d.rs b/vulkan-sys/src/structs/core/extent2d.rs new file mode 100644 index 0000000..1f53425 --- /dev/null +++ b/vulkan-sys/src/structs/core/extent2d.rs @@ -0,0 +1,6 @@ +#[repr(C)] +#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)] +pub struct VkExtent2D { + pub width: u32, + pub height: u32, +} diff --git a/vulkan-sys/src/structs/core/extent3d.rs b/vulkan-sys/src/structs/core/extent3d.rs new file mode 100644 index 0000000..8568061 --- /dev/null +++ b/vulkan-sys/src/structs/core/extent3d.rs @@ -0,0 +1,7 @@ +#[repr(C)] +#[derive(Debug, Clone)] +pub struct VkExtent3D { + pub width: u32, + pub height: u32, + pub depth: u32, +} diff --git a/vulkan-sys/src/structs/core/externalmemorybuffercreateinfo.rs b/vulkan-sys/src/structs/core/externalmemorybuffercreateinfo.rs new file mode 100644 index 0000000..8ac0ac1 --- /dev/null +++ b/vulkan-sys/src/structs/core/externalmemorybuffercreateinfo.rs @@ -0,0 +1,25 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkExternalMemoryBufferCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub handleTypes: VkExternalMemoryHandleTypeFlags, +} + +impl VkExternalMemoryBufferCreateInfo { + pub fn new(handle_types: T) -> Self + where + T: Into, + { + VkExternalMemoryBufferCreateInfo { + sType: VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO, + pNext: ptr::null(), + handleTypes: handle_types.into(), + } + } +} diff --git a/vulkan-sys/src/structs/core/fencecreateinfo.rs b/vulkan-sys/src/structs/core/fencecreateinfo.rs new file mode 100644 index 0000000..9a1519d --- /dev/null +++ b/vulkan-sys/src/structs/core/fencecreateinfo.rs @@ -0,0 +1,25 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkFenceCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkFenceCreateFlagBits, +} + +impl VkFenceCreateInfo { + pub fn new(flags: T) -> Self + where + T: Into, + { + VkFenceCreateInfo { + sType: VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + } + } +} diff --git a/vulkan-sys/src/structs/core/formatproperties.rs b/vulkan-sys/src/structs/core/formatproperties.rs new file mode 100644 index 0000000..1ecec3c --- /dev/null +++ b/vulkan-sys/src/structs/core/formatproperties.rs @@ -0,0 +1,9 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkFormatProperties { + pub linearTilingFeatures: VkFormatFeatureFlagBits, + pub optimalTilingFeatures: VkFormatFeatureFlagBits, + pub bufferFeatures: VkFormatFeatureFlagBits, +} diff --git a/vulkan-sys/src/structs/core/framebuffercreateinfo.rs b/vulkan-sys/src/structs/core/framebuffercreateinfo.rs new file mode 100644 index 0000000..d86c323 --- /dev/null +++ b/vulkan-sys/src/structs/core/framebuffercreateinfo.rs @@ -0,0 +1,44 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkFramebufferCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkFramebufferCreateFlagBits, + pub renderPass: VkRenderPass, + pub attachmentCount: u32, + pub pAttachments: *const VkImageView, + pub width: u32, + pub height: u32, + pub layers: u32, +} + +impl VkFramebufferCreateInfo { + pub fn new( + flags: T, + renderpass: VkRenderPass, + attachments: &[VkImageView], + width: u32, + height: u32, + layers: u32, + ) -> Self + where + T: Into, + { + VkFramebufferCreateInfo { + sType: VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + renderPass: renderpass, + attachmentCount: attachments.len() as u32, + pAttachments: attachments.as_ptr(), + width, + height, + layers, + } + } +} diff --git a/vulkan-sys/src/structs/core/graphicspipelinecreateinfo.rs b/vulkan-sys/src/structs/core/graphicspipelinecreateinfo.rs new file mode 100644 index 0000000..886fdf2 --- /dev/null +++ b/vulkan-sys/src/structs/core/graphicspipelinecreateinfo.rs @@ -0,0 +1,101 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkGraphicsPipelineCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkPipelineCreateFlagBits, + pub stageCount: u32, + pub pStages: *const VkPipelineShaderStageCreateInfo, + pub pVertexInputState: *const VkPipelineVertexInputStateCreateInfo, + pub pInputAssemblyState: *const VkPipelineInputAssemblyStateCreateInfo, + pub pTessellationState: *const VkPipelineTessellationStateCreateInfo, + pub pViewportState: *const VkPipelineViewportStateCreateInfo, + pub pRasterizationState: *const VkPipelineRasterizationStateCreateInfo, + pub pMultisampleState: *const VkPipelineMultisampleStateCreateInfo, + pub pDepthStencilState: *const VkPipelineDepthStencilStateCreateInfo, + pub pColorBlendState: *const VkPipelineColorBlendStateCreateInfo, + pub pDynamicState: *const VkPipelineDynamicStateCreateInfo, + pub layout: VkPipelineLayout, + pub renderPass: VkRenderPass, + pub subpass: u32, + pub basePipelineHandle: VkPipeline, + pub basePipelineIndex: i32, +} + +impl VkGraphicsPipelineCreateInfo { + pub fn new( + flags: T, + stages: &[VkPipelineShaderStageCreateInfo], + vertex_input: Option<&VkPipelineVertexInputStateCreateInfo>, + input_assembly: Option<&VkPipelineInputAssemblyStateCreateInfo>, + tesselation: Option<&VkPipelineTessellationStateCreateInfo>, + viewport: Option<&VkPipelineViewportStateCreateInfo>, + rasterization: &VkPipelineRasterizationStateCreateInfo, + multisample: Option<&VkPipelineMultisampleStateCreateInfo>, + depth_stencil: Option<&VkPipelineDepthStencilStateCreateInfo>, + color_blend: Option<&VkPipelineColorBlendStateCreateInfo>, + dynamic: Option<&VkPipelineDynamicStateCreateInfo>, + layout: VkPipelineLayout, + renderpass: VkRenderPass, + subpass: u32, + ) -> Self + where + T: Into, + { + VkGraphicsPipelineCreateInfo { + sType: VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + stageCount: stages.len() as u32, + pStages: stages.as_ptr(), + pVertexInputState: match vertex_input { + Some(state) => state as *const _, + None => ptr::null(), + }, + pInputAssemblyState: match input_assembly { + Some(state) => state as *const _, + None => ptr::null(), + }, + pTessellationState: match tesselation { + Some(state) => state as *const _, + None => ptr::null(), + }, + pViewportState: match viewport { + Some(state) => state as *const _, + None => ptr::null(), + }, + pRasterizationState: rasterization, + pMultisampleState: match multisample { + Some(state) => state as *const _, + None => ptr::null(), + }, + pDepthStencilState: match depth_stencil { + Some(state) => state as *const _, + None => ptr::null(), + }, + pColorBlendState: match color_blend { + Some(state) => state as *const _, + None => ptr::null(), + }, + pDynamicState: match dynamic { + Some(state) => state as *const _, + None => ptr::null(), + }, + layout, + renderPass: renderpass, + subpass, + basePipelineHandle: VkPipeline::NULL_HANDLE, + basePipelineIndex: -1, + } + } + + pub fn set_base_pipeline(&mut self, pipeline: VkPipeline, index: i32) { + self.basePipelineHandle = pipeline; + self.basePipelineIndex = index; + } +} diff --git a/vulkan-sys/src/structs/core/image_memory_requirements_info_2.rs b/vulkan-sys/src/structs/core/image_memory_requirements_info_2.rs new file mode 100644 index 0000000..d38da07 --- /dev/null +++ b/vulkan-sys/src/structs/core/image_memory_requirements_info_2.rs @@ -0,0 +1,22 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkImageMemoryRequirementsInfo2 { + pub sType: VkStructureType, + pub pNext: *mut c_void, + pub image: VkImage, +} + +impl VkImageMemoryRequirementsInfo2 { + pub fn new(image: VkImage) -> Self { + VkImageMemoryRequirementsInfo2 { + sType: VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2, + pNext: ptr::null_mut(), + image, + } + } +} diff --git a/vulkan-sys/src/structs/core/imageblit.rs b/vulkan-sys/src/structs/core/imageblit.rs new file mode 100644 index 0000000..76c272c --- /dev/null +++ b/vulkan-sys/src/structs/core/imageblit.rs @@ -0,0 +1,10 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkImageBlit { + pub srcSubresource: VkImageSubresourceLayers, + pub srcOffsets: [VkOffset3D; 2], + pub dstSubresource: VkImageSubresourceLayers, + pub dstOffsets: [VkOffset3D; 2], +} diff --git a/vulkan-sys/src/structs/core/imagecopy.rs b/vulkan-sys/src/structs/core/imagecopy.rs new file mode 100644 index 0000000..4b60950 --- /dev/null +++ b/vulkan-sys/src/structs/core/imagecopy.rs @@ -0,0 +1,11 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkImageCopy { + pub srcSubresource: VkImageSubresourceLayers, + pub srcOffset: VkOffset3D, + pub dstSubresource: VkImageSubresourceLayers, + pub dstOffset: VkOffset3D, + pub extent: VkExtent3D, +} diff --git a/vulkan-sys/src/structs/core/imagecreateinfo.rs b/vulkan-sys/src/structs/core/imagecreateinfo.rs new file mode 100644 index 0000000..172f543 --- /dev/null +++ b/vulkan-sys/src/structs/core/imagecreateinfo.rs @@ -0,0 +1,64 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkImageCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkImageCreateFlagBits, + pub imageType: VkImageType, + pub format: VkFormat, + pub extent: VkExtent3D, + pub mipLevels: u32, + pub arrayLayers: u32, + pub samples: VkSampleCountFlagBits, + pub tiling: VkImageTiling, + pub usage: VkImageUsageFlagBits, + pub sharingMode: VkSharingMode, + pub queueFamilyIndexCount: u32, + pub pQueueFamilyIndices: *const u32, + pub initialLayout: VkImageLayout, +} + +impl VkImageCreateInfo { + pub fn new( + flags: T, + image_type: VkImageType, + format: VkFormat, + extent: VkExtent3D, + mip_levels: u32, + array_layers: u32, + samples: U, + tiling: VkImageTiling, + usage: V, + sharing_mode: VkSharingMode, + queue_family_indices: &[u32], + initial_layout: VkImageLayout, + ) -> Self + where + T: Into, + U: Into, + V: Into, + { + VkImageCreateInfo { + sType: VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + imageType: image_type, + format, + extent, + mipLevels: mip_levels, + arrayLayers: array_layers, + samples: samples.into(), + tiling, + usage: usage.into(), + sharingMode: sharing_mode, + queueFamilyIndexCount: queue_family_indices.len() as u32, + pQueueFamilyIndices: queue_family_indices.as_ptr(), + initialLayout: initial_layout, + } + } +} diff --git a/vulkan-sys/src/structs/core/imageformatproperties.rs b/vulkan-sys/src/structs/core/imageformatproperties.rs new file mode 100644 index 0000000..752e009 --- /dev/null +++ b/vulkan-sys/src/structs/core/imageformatproperties.rs @@ -0,0 +1,11 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkImageFormatProperties { + pub maxExtent: VkExtent3D, + pub maxMipLevels: u32, + pub maxArrayLayers: u32, + pub sampleCounts: VkSampleCountFlagBits, + pub maxResourceSize: VkDeviceSize, +} diff --git a/vulkan-sys/src/structs/core/imagememorybarrier.rs b/vulkan-sys/src/structs/core/imagememorybarrier.rs new file mode 100644 index 0000000..08c2af5 --- /dev/null +++ b/vulkan-sys/src/structs/core/imagememorybarrier.rs @@ -0,0 +1,49 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkImageMemoryBarrier { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub srcAccessMask: VkAccessFlagBits, + pub dstAccessMask: VkAccessFlagBits, + pub oldLayout: VkImageLayout, + pub newLayout: VkImageLayout, + pub srcQueueFamilyIndex: u32, + pub dstQueueFamilyIndex: u32, + pub image: VkImage, + pub subresourceRange: VkImageSubresourceRange, +} + +impl VkImageMemoryBarrier { + pub fn new( + src_access_mask: S, + dst_access_mask: T, + old_layout: VkImageLayout, + new_layout: VkImageLayout, + src_queue_family_index: u32, + dst_queue_family_index: u32, + image: VkImage, + subresource_range: VkImageSubresourceRange, + ) -> VkImageMemoryBarrier + where + S: Into, + T: Into, + { + VkImageMemoryBarrier { + sType: VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, + pNext: ptr::null(), + srcAccessMask: src_access_mask.into(), + dstAccessMask: dst_access_mask.into(), + oldLayout: old_layout, + newLayout: new_layout, + srcQueueFamilyIndex: src_queue_family_index, + dstQueueFamilyIndex: dst_queue_family_index, + image, + subresourceRange: subresource_range, + } + } +} diff --git a/vulkan-sys/src/structs/core/imageresolve.rs b/vulkan-sys/src/structs/core/imageresolve.rs new file mode 100644 index 0000000..f0c04bd --- /dev/null +++ b/vulkan-sys/src/structs/core/imageresolve.rs @@ -0,0 +1,11 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkImageResolve { + pub srcSubresource: VkImageSubresourceLayers, + pub srcOffset: VkOffset3D, + pub dstSubresource: VkImageSubresourceLayers, + pub dstOffset: VkOffset3D, + pub extent: VkExtent3D, +} diff --git a/vulkan-sys/src/structs/core/imagesubresource.rs b/vulkan-sys/src/structs/core/imagesubresource.rs new file mode 100644 index 0000000..7ea84fa --- /dev/null +++ b/vulkan-sys/src/structs/core/imagesubresource.rs @@ -0,0 +1,9 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkImageSubresource { + pub aspectMask: VkImageAspectFlagBits, + pub mipLevel: u32, + pub arrayLayer: u32, +} diff --git a/vulkan-sys/src/structs/core/imagesubresourcelayers.rs b/vulkan-sys/src/structs/core/imagesubresourcelayers.rs new file mode 100644 index 0000000..4105380 --- /dev/null +++ b/vulkan-sys/src/structs/core/imagesubresourcelayers.rs @@ -0,0 +1,10 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkImageSubresourceLayers { + pub aspectMask: VkImageAspectFlagBits, + pub mipLevel: u32, + pub baseArrayLayer: u32, + pub layerCount: u32, +} diff --git a/vulkan-sys/src/structs/core/imagesubresourcerange.rs b/vulkan-sys/src/structs/core/imagesubresourcerange.rs new file mode 100644 index 0000000..78b0cce --- /dev/null +++ b/vulkan-sys/src/structs/core/imagesubresourcerange.rs @@ -0,0 +1,11 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug, Clone)] +pub struct VkImageSubresourceRange { + pub aspectMask: VkImageAspectFlagBits, + pub baseMipLevel: u32, + pub levelCount: u32, + pub baseArrayLayer: u32, + pub layerCount: u32, +} diff --git a/vulkan-sys/src/structs/core/imageviewcreateinfo.rs b/vulkan-sys/src/structs/core/imageviewcreateinfo.rs new file mode 100644 index 0000000..82293c3 --- /dev/null +++ b/vulkan-sys/src/structs/core/imageviewcreateinfo.rs @@ -0,0 +1,42 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkImageViewCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkImageViewCreateFlagBits, + pub image: VkImage, + pub viewType: VkImageViewType, + pub format: VkFormat, + pub components: VkComponentMapping, + pub subresourceRange: VkImageSubresourceRange, +} + +impl VkImageViewCreateInfo { + pub fn new( + flags: T, + image: VkImage, + view_type: VkImageViewType, + format: VkFormat, + components: VkComponentMapping, + subresourceRange: VkImageSubresourceRange, + ) -> Self + where + T: Into, + { + VkImageViewCreateInfo { + sType: VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + image, + viewType: view_type, + format, + components, + subresourceRange, + } + } +} diff --git a/vulkan-sys/src/structs/core/instancecreateinfo.rs b/vulkan-sys/src/structs/core/instancecreateinfo.rs new file mode 100644 index 0000000..1da8f14 --- /dev/null +++ b/vulkan-sys/src/structs/core/instancecreateinfo.rs @@ -0,0 +1,93 @@ +use crate::prelude::*; + +use std::ffi::CStr; +use std::fmt; +use std::marker::PhantomData; +use std::os::raw::{c_char, c_void}; +use std::ptr; +use std::slice; + +#[repr(C)] +pub struct VkInstanceCreateInfo<'a> { + lt: PhantomData<&'a ()>, + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkInstanceCreateFlagBits, + pub pApplicationInfo: *const VkApplicationInfo<'a>, + pub enabledLayerCount: u32, + pub ppEnabledLayerNames: *const *const c_char, + pub enabledExtensionCount: u32, + pub ppEnabledExtensionNames: *const *const c_char, +} + +impl<'a> VkInstanceCreateInfo<'a> { + pub fn new( + flags: T, + application_info: &VkApplicationInfo<'a>, + enabled_layer_names: &VkNames, + enabled_extension_names: &VkNames, + ) -> Self + where + T: Into, + { + VkInstanceCreateInfo { + lt: PhantomData, + sType: VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + pApplicationInfo: application_info as *const _, + enabledLayerCount: enabled_layer_names.c_names().len() as u32, + ppEnabledLayerNames: enabled_layer_names.c_names().as_ptr(), + enabledExtensionCount: enabled_extension_names.c_names().len() as u32, + ppEnabledExtensionNames: enabled_extension_names.c_names().as_ptr(), + } + } + + pub fn extension_names(&self) -> Vec { + let mut names = Vec::new(); + let extensions: &[*const c_char] = unsafe { + slice::from_raw_parts( + self.ppEnabledExtensionNames, + self.enabledExtensionCount as usize, + ) + }; + + for extension in extensions { + names.push(VkString::try_from(*extension).unwrap()); + } + + names + } +} + +impl<'a> fmt::Debug for VkInstanceCreateInfo<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut enabled_layers_string = String::from("{{"); + let layers_slice: &[*const c_char] = unsafe { + slice::from_raw_parts(self.ppEnabledLayerNames, self.enabledLayerCount as usize) + }; + + for layer in layers_slice { + let cstr_layer = unsafe { CStr::from_ptr(*layer) }; + + if let Ok(layer) = cstr_layer.to_str() { + enabled_layers_string = format!("{} {},", enabled_layers_string, layer); + } + } + + enabled_layers_string = format!("{} }}", enabled_layers_string); + + let enabled_extensions_string = String::new(); + + write!( + f, + "{{ sType: {:?}, pNext: {:?}, flags: {:?}, pApplicationInfo: {:?}, enabledLayers: {}, enabledExtensions: {} }}", + self.sType, + self.pNext, + self.flags, + self.pApplicationInfo, + enabled_layers_string, + enabled_extensions_string + ) + } +} diff --git a/vulkan-sys/src/structs/core/iossurfacecreateinfomvk.rs b/vulkan-sys/src/structs/core/iossurfacecreateinfomvk.rs new file mode 100644 index 0000000..409251f --- /dev/null +++ b/vulkan-sys/src/structs/core/iossurfacecreateinfomvk.rs @@ -0,0 +1,27 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkIOSSurfaceCreateInfoMVK { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkIOSSurfaceCreateFlagBitsMVK, + pub pView: *const c_void, +} + +impl VkIOSSurfaceCreateInfoMVK { + pub fn new(flags: T, view: &U) -> Self + where + T: Into, + { + VkIOSSurfaceCreateInfoMVK { + sType: VK_STRUCTURE_TYPE_IOS_SURFACE_CREATE_INFO_MVK, + pNext: ptr::null(), + flags: flags.into(), + pView: view as *const U as *const c_void, + } + } +} diff --git a/vulkan-sys/src/structs/core/layerproperties.rs b/vulkan-sys/src/structs/core/layerproperties.rs new file mode 100644 index 0000000..d62e0c1 --- /dev/null +++ b/vulkan-sys/src/structs/core/layerproperties.rs @@ -0,0 +1,22 @@ +use crate::prelude::*; + + +use std::os::raw::c_char; + +#[repr(C)] +pub struct VkLayerProperties { + pub layerName: [c_char; VK_MAX_EXTENSION_NAME_SIZE as usize], + pub specVersion: u32, + pub implementationVersion: u32, + pub description: [c_char; VK_MAX_DESCRIPTION_SIZE as usize], +} + +impl VkLayerProperties { + pub fn layer_name(&self) -> Result { + VkString::try_from(&self.layerName as *const c_char) + } + + pub fn description(&self) -> Result { + VkString::try_from(&self.description as *const c_char) + } +} diff --git a/vulkan-sys/src/structs/core/macossurfacecreateinfomvk.rs b/vulkan-sys/src/structs/core/macossurfacecreateinfomvk.rs new file mode 100644 index 0000000..dc5b132 --- /dev/null +++ b/vulkan-sys/src/structs/core/macossurfacecreateinfomvk.rs @@ -0,0 +1,28 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkMacOSSurfaceCreateInfoMVK { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkMacOSSurfaceCreateFlagBitsMVK, + pub pView: *const c_void, +} + +impl VkMacOSSurfaceCreateInfoMVK { + // TODO: replace 'U' with the actual type of a macos view + pub fn new(flags: T, view: &U) -> Self + where + T: Into, + { + VkMacOSSurfaceCreateInfoMVK { + sType: VK_STRUCTURE_TYPE_MACOS_SURFACE_CREATE_INFO_MVK, + pNext: ptr::null(), + flags: flags.into(), + pView: view as *const U as *const c_void, + } + } +} diff --git a/vulkan-sys/src/structs/core/mappedmemoryrange.rs b/vulkan-sys/src/structs/core/mappedmemoryrange.rs new file mode 100644 index 0000000..9c3a8f3 --- /dev/null +++ b/vulkan-sys/src/structs/core/mappedmemoryrange.rs @@ -0,0 +1,26 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkMappedMemoryRange { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub memory: VkDeviceMemory, + pub offset: VkDeviceSize, + pub size: VkDeviceSize, +} + +impl VkMappedMemoryRange { + pub fn new(memory: VkDeviceMemory, offset: VkDeviceSize, size: VkDeviceSize) -> Self { + VkMappedMemoryRange { + sType: VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE, + pNext: ptr::null(), + memory, + offset, + size, + } + } +} diff --git a/vulkan-sys/src/structs/core/memoryallocateinfo.rs b/vulkan-sys/src/structs/core/memoryallocateinfo.rs new file mode 100644 index 0000000..f2b5cfb --- /dev/null +++ b/vulkan-sys/src/structs/core/memoryallocateinfo.rs @@ -0,0 +1,24 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkMemoryAllocateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub allocationSize: VkDeviceSize, + pub memoryTypeIndex: u32, +} + +impl VkMemoryAllocateInfo { + pub fn new(allocation_size: VkDeviceSize, memory_type_index: u32) -> Self { + VkMemoryAllocateInfo { + sType: VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, + pNext: ptr::null(), + allocationSize: allocation_size, + memoryTypeIndex: memory_type_index, + } + } +} diff --git a/vulkan-sys/src/structs/core/memorybarrier.rs b/vulkan-sys/src/structs/core/memorybarrier.rs new file mode 100644 index 0000000..8b5d0fb --- /dev/null +++ b/vulkan-sys/src/structs/core/memorybarrier.rs @@ -0,0 +1,28 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkMemoryBarrier { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub srcAccessMask: VkAccessFlagBits, + pub dstAccessMask: VkAccessFlagBits, +} + +impl VkMemoryBarrier { + pub fn new(src_access_mask: S, dst_access_mask: T) -> VkMemoryBarrier + where + S: Into, + T: Into, + { + VkMemoryBarrier { + sType: VK_STRUCTURE_TYPE_MEMORY_BARRIER, + pNext: ptr::null(), + srcAccessMask: src_access_mask.into(), + dstAccessMask: dst_access_mask.into(), + } + } +} diff --git a/vulkan-sys/src/structs/core/memoryheap.rs b/vulkan-sys/src/structs/core/memoryheap.rs new file mode 100644 index 0000000..e587228 --- /dev/null +++ b/vulkan-sys/src/structs/core/memoryheap.rs @@ -0,0 +1,8 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug, Clone, Default, Copy)] +pub struct VkMemoryHeap { + pub size: VkDeviceSize, + pub flags: VkMemoryHeapFlagBits, +} diff --git a/vulkan-sys/src/structs/core/memoryrequirements.rs b/vulkan-sys/src/structs/core/memoryrequirements.rs new file mode 100644 index 0000000..f8c9b4b --- /dev/null +++ b/vulkan-sys/src/structs/core/memoryrequirements.rs @@ -0,0 +1,23 @@ +use crate::prelude::*; + +use std::cmp::Ordering; + +#[repr(C)] +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct VkMemoryRequirements { + pub size: VkDeviceSize, + pub alignment: VkDeviceSize, + pub memoryTypeBits: VkMemoryPropertyFlagBits, +} + +impl Ord for VkMemoryRequirements { + fn cmp(&self, other: &Self) -> Ordering { + self.size.cmp(&other.size) + } +} + +impl PartialOrd for VkMemoryRequirements { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} diff --git a/vulkan-sys/src/structs/core/memorytype.rs b/vulkan-sys/src/structs/core/memorytype.rs new file mode 100644 index 0000000..91a89c1 --- /dev/null +++ b/vulkan-sys/src/structs/core/memorytype.rs @@ -0,0 +1,8 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug, Clone, Default, Copy)] +pub struct VkMemoryType { + pub propertyFlagBits: VkMemoryPropertyFlagBits, + pub heapIndex: u32, +} diff --git a/vulkan-sys/src/structs/core/mod.rs b/vulkan-sys/src/structs/core/mod.rs new file mode 100644 index 0000000..b77f1f4 --- /dev/null +++ b/vulkan-sys/src/structs/core/mod.rs @@ -0,0 +1,133 @@ +pub mod allocationcallback; +pub mod applicationinfo; +pub mod attachmentdescription; +pub mod attachmentreference; +pub mod base_in_structure; +pub mod base_out_structure; +pub mod bind_buffer_memory_info; +pub mod bind_image_memory_info; +pub mod bindsparseinfo; +pub mod buffer_device_address_info; +pub mod buffer_memory_requirements_info_2; +pub mod buffercopy; +pub mod buffercreateinfo; +pub mod bufferdeviceaddresscreateinfoext; +pub mod bufferimagecopy; +pub mod buffermemorybarrier; +pub mod bufferviewcreateinfo; +pub mod clearattachment; +pub mod clearcolorvalue; +pub mod cleardepthstencilvalue; +pub mod clearrect; +pub mod clearvalue; +pub mod commandbufferallocateinfo; +pub mod commandbufferbegininfo; +pub mod commandbufferinheritanceinfo; +pub mod commandpoolcreateinfo; +pub mod componentmapping; +pub mod computepipelinecreateinfo; +pub mod copydescriptorset; +pub mod debugreportcallbackcreateinfoext; +pub mod debugutilmessengercallbackdataext; +pub mod debugutilslabelext; +pub mod debugutilsmessengercreateinfoext; +pub mod debugutilsobjectnameinfoext; +pub mod descriptorbufferinfo; +pub mod descriptorimageinfo; +pub mod descriptorpoolcreateinfo; +pub mod descriptorpoolsize; +pub mod descriptorsetallocateinfo; +pub mod descriptorsetlayoutbinding; +pub mod descriptorsetlayoutcreateinfo; +pub mod descriptorsetlayoutsupport; +pub mod devicecreateinfo; +pub mod devicequeuecreateinfo; +pub mod dispatchindirectcommand; +pub mod displayplanecapabilities; +pub mod displayplaneproperties; +pub mod displayproperties; +pub mod drawindexedindirectcommand; +pub mod drawindirectcommand; +pub mod eventcreateinfo; +pub mod extensionproperties; +pub mod extent2d; +pub mod extent3d; +pub mod externalmemorybuffercreateinfo; +pub mod fencecreateinfo; +pub mod formatproperties; +pub mod framebuffercreateinfo; +pub mod graphicspipelinecreateinfo; +pub mod image_memory_requirements_info_2; +pub mod imageblit; +pub mod imagecopy; +pub mod imagecreateinfo; +pub mod imageformatproperties; +pub mod imagememorybarrier; +pub mod imageresolve; +pub mod imagesubresource; +pub mod imagesubresourcelayers; +pub mod imagesubresourcerange; +pub mod imageviewcreateinfo; +pub mod instancecreateinfo; +pub mod iossurfacecreateinfomvk; +pub mod layerproperties; +pub mod macossurfacecreateinfomvk; +pub mod mappedmemoryrange; +pub mod memoryallocateinfo; +pub mod memorybarrier; +pub mod memoryheap; +pub mod memoryrequirements; +pub mod memorytype; +pub mod mvkdisplayconfiguration; +pub mod mvkphysicaldevicemetalfeatures; +pub mod mvkswapchainperformance; +pub mod offset2d; +pub mod offset3d; +pub mod physicaldevicefeatures; +pub mod physicaldevicelimits; +pub mod physicaldevicemaintanence3properties; +pub mod physicaldevicememoryproperties; +pub mod physicaldeviceproperties; +pub mod physicaldevicesparseproperties; +pub mod pipelinecachecreateinfo; +pub mod pipelinecolorblendattachmentstate; +pub mod pipelinecolorblendstatecreateinfo; +pub mod pipelinedepthstencilstatecreateinfo; +pub mod pipelinedynamicstatecreateinfo; +pub mod pipelineinputassemblystatecreateinfo; +pub mod pipelinelayoutcreateinfo; +pub mod pipelinemultisamplestatecreateinfo; +pub mod pipelinerasterizationstatecreateinfo; +pub mod pipelineshaderstagecreateinfo; +pub mod pipelinetesselationstatecreateinfo; +pub mod pipelinevertexinputstatecreateinfo; +pub mod pipelineviewportstatecreateinfo; +pub mod pushconstantrange; +pub mod querypoolcreateinfo; +pub mod queuefamilyproperties; +pub mod rect2d; +pub mod renderpassbegininfo; +pub mod renderpasscreateinfo; +pub mod samplercreateinfo; +pub mod semaphorecreateinfo; +pub mod shadermodulecreateinfo; +pub mod sparsebuffermemorybindinfo; +pub mod sparseimageformatproperties; +pub mod sparseimagememorybind; +pub mod sparseimagememorybindinfo; +pub mod sparseimagememoryrequirements; +pub mod sparseimageopaquememorybindinfo; +pub mod sparsememorybind; +pub mod specializationinfo; +pub mod specializationmapentry; +pub mod stencilopstate; +pub mod submitinfo; +pub mod subpassdependency; +pub mod subpassdescription; +pub mod subresourcelayout; +pub mod vertexinputattributedescription; +pub mod vertexinputbindingdescription; +pub mod viewport; +pub mod writedescriptorset; + +pub mod prelude; diff --git a/vulkan-sys/src/structs/core/mvkdisplayconfiguration.rs b/vulkan-sys/src/structs/core/mvkdisplayconfiguration.rs new file mode 100644 index 0000000..bd15be9 --- /dev/null +++ b/vulkan-sys/src/structs/core/mvkdisplayconfiguration.rs @@ -0,0 +1,13 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkMVKDeviceConfiguration { + pub supportDisplayContentsScale: VkBool32, + pub imageFlipY: VkBool32, + pub shaderConversionFlipFragmentY: VkBool32, + pub shaderConversionFlipVertexY: VkBool32, + pub shaderConversionLogging: VkBool32, + pub performanceTracking: VkBool32, + pub performanceLoggingFrameCount: u32, +} diff --git a/vulkan-sys/src/structs/core/mvkphysicaldevicemetalfeatures.rs b/vulkan-sys/src/structs/core/mvkphysicaldevicemetalfeatures.rs new file mode 100644 index 0000000..379e3ca --- /dev/null +++ b/vulkan-sys/src/structs/core/mvkphysicaldevicemetalfeatures.rs @@ -0,0 +1,13 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkMVKPhysicalDeviceMetalFeatures { + pub depthClipMode: VkBool32, + pub indirectDrawing: VkBool32, + pub baseVertexInstanceDrawing: VkBool32, + pub maxVertexBufferCount: u32, + pub maxFragmentBufferCount: u32, + pub bufferAlignment: VkDeviceSize, + pub pushConstantsAlignment: VkDeviceSize, +} diff --git a/vulkan-sys/src/structs/core/mvkswapchainperformance.rs b/vulkan-sys/src/structs/core/mvkswapchainperformance.rs new file mode 100644 index 0000000..538f362 --- /dev/null +++ b/vulkan-sys/src/structs/core/mvkswapchainperformance.rs @@ -0,0 +1,9 @@ +use std::os::raw::c_double; + +#[repr(C)] +#[derive(Debug)] +pub struct VkMVKSwapchainPerformance { + pub lastFrameInterval: c_double, + pub averageFrameInterval: c_double, + pub averageFramesPerSecond: c_double, +} diff --git a/vulkan-sys/src/structs/core/offset2d.rs b/vulkan-sys/src/structs/core/offset2d.rs new file mode 100644 index 0000000..4b08028 --- /dev/null +++ b/vulkan-sys/src/structs/core/offset2d.rs @@ -0,0 +1,6 @@ +#[repr(C)] +#[derive(Debug, Copy, Clone, Default)] +pub struct VkOffset2D { + pub x: i32, + pub y: i32, +} diff --git a/vulkan-sys/src/structs/core/offset3d.rs b/vulkan-sys/src/structs/core/offset3d.rs new file mode 100644 index 0000000..65532d2 --- /dev/null +++ b/vulkan-sys/src/structs/core/offset3d.rs @@ -0,0 +1,7 @@ +#[repr(C)] +#[derive(Debug)] +pub struct VkOffset3D { + pub x: i32, + pub y: i32, + pub z: i32, +} diff --git a/vulkan-sys/src/structs/core/physicaldevicefeatures.rs b/vulkan-sys/src/structs/core/physicaldevicefeatures.rs new file mode 100644 index 0000000..b9e0b3c --- /dev/null +++ b/vulkan-sys/src/structs/core/physicaldevicefeatures.rs @@ -0,0 +1,131 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug, Clone, Copy, Default)] +pub struct VkPhysicalDeviceFeatures { + pub robustBufferAccess: VkBool32, + pub fullDrawIndexUint32: VkBool32, + pub imageCubeArray: VkBool32, + pub independentBlend: VkBool32, + pub geometryShader: VkBool32, + pub tessellationShader: VkBool32, + pub sampleRateShading: VkBool32, + pub dualSrcBlend: VkBool32, + pub logicOp: VkBool32, + pub multiDrawIndirect: VkBool32, + pub drawIndirectFirstInstance: VkBool32, + pub depthClamp: VkBool32, + pub depthBiasClamp: VkBool32, + pub fillModeNonSolid: VkBool32, + pub depthBounds: VkBool32, + pub wideLines: VkBool32, + pub largePoints: VkBool32, + pub alphaToOne: VkBool32, + pub multiViewport: VkBool32, + pub samplerAnisotropy: VkBool32, + pub textureCompressionETC2: VkBool32, + pub textureCompressionASTC_LDR: VkBool32, + pub textureCompressionBC: VkBool32, + pub occlusionQueryPrecise: VkBool32, + pub pipelineStatisticsQuery: VkBool32, + pub vertexPipelineStoresAndAtomics: VkBool32, + pub fragmentStoresAndAtomics: VkBool32, + pub shaderTessellationAndGeometryPointSize: VkBool32, + pub shaderImageGatherExtended: VkBool32, + pub shaderStorageImageExtendedFormats: VkBool32, + pub shaderStorageImageMultisample: VkBool32, + pub shaderStorageImageReadWithoutFormat: VkBool32, + pub shaderStorageImageWriteWithoutFormat: VkBool32, + pub shaderUniformBufferArrayDynamicIndexing: VkBool32, + pub shaderSampledImageArrayDynamicIndexing: VkBool32, + pub shaderStorageBufferArrayDynamicIndexing: VkBool32, + pub shaderStorageImageArrayDynamicIndexing: VkBool32, + pub shaderClipDistance: VkBool32, + pub shaderCullDistance: VkBool32, + pub shaderf3264: VkBool32, + pub shaderInt64: VkBool32, + pub shaderInt16: VkBool32, + pub shaderResourceResidency: VkBool32, + pub shaderResourceMinLod: VkBool32, + pub sparseBinding: VkBool32, + pub sparseResidencyBuffer: VkBool32, + pub sparseResidencyImage2D: VkBool32, + pub sparseResidencyImage3D: VkBool32, + pub sparseResidency2Samples: VkBool32, + pub sparseResidency4Samples: VkBool32, + pub sparseResidency8Samples: VkBool32, + pub sparseResidency16Samples: VkBool32, + pub sparseResidencyAliased: VkBool32, + pub variableMultisampleRate: VkBool32, + pub inheritedQueries: VkBool32, +} + +macro_rules! check_flag { + ($me: ident, $other: ident, $flag_name: ident) => { + if $me.$flag_name == VK_TRUE && $other.$flag_name == VK_FALSE { + return false; + } + }; +} + +impl VkPhysicalDeviceFeatures { + pub fn is_subset_of(&self, other: &Self) -> bool { + check_flag!(self, other, robustBufferAccess); + check_flag!(self, other, fullDrawIndexUint32); + check_flag!(self, other, imageCubeArray); + check_flag!(self, other, independentBlend); + check_flag!(self, other, geometryShader); + check_flag!(self, other, tessellationShader); + check_flag!(self, other, sampleRateShading); + check_flag!(self, other, dualSrcBlend); + check_flag!(self, other, logicOp); + check_flag!(self, other, multiDrawIndirect); + check_flag!(self, other, drawIndirectFirstInstance); + check_flag!(self, other, depthClamp); + check_flag!(self, other, depthBiasClamp); + check_flag!(self, other, fillModeNonSolid); + check_flag!(self, other, depthBounds); + check_flag!(self, other, wideLines); + check_flag!(self, other, largePoints); + check_flag!(self, other, alphaToOne); + check_flag!(self, other, multiViewport); + check_flag!(self, other, samplerAnisotropy); + check_flag!(self, other, textureCompressionETC2); + check_flag!(self, other, textureCompressionASTC_LDR); + check_flag!(self, other, textureCompressionBC); + check_flag!(self, other, occlusionQueryPrecise); + check_flag!(self, other, pipelineStatisticsQuery); + check_flag!(self, other, vertexPipelineStoresAndAtomics); + check_flag!(self, other, fragmentStoresAndAtomics); + check_flag!(self, other, shaderTessellationAndGeometryPointSize); + check_flag!(self, other, shaderImageGatherExtended); + check_flag!(self, other, shaderStorageImageExtendedFormats); + check_flag!(self, other, shaderStorageImageMultisample); + check_flag!(self, other, shaderStorageImageReadWithoutFormat); + check_flag!(self, other, shaderStorageImageWriteWithoutFormat); + check_flag!(self, other, shaderUniformBufferArrayDynamicIndexing); + check_flag!(self, other, shaderSampledImageArrayDynamicIndexing); + check_flag!(self, other, shaderStorageBufferArrayDynamicIndexing); + check_flag!(self, other, shaderStorageImageArrayDynamicIndexing); + check_flag!(self, other, shaderClipDistance); + check_flag!(self, other, shaderCullDistance); + check_flag!(self, other, shaderf3264); + check_flag!(self, other, shaderInt64); + check_flag!(self, other, shaderInt16); + check_flag!(self, other, shaderResourceResidency); + check_flag!(self, other, shaderResourceMinLod); + check_flag!(self, other, sparseBinding); + check_flag!(self, other, sparseResidencyBuffer); + check_flag!(self, other, sparseResidencyImage2D); + check_flag!(self, other, sparseResidencyImage3D); + check_flag!(self, other, sparseResidency2Samples); + check_flag!(self, other, sparseResidency4Samples); + check_flag!(self, other, sparseResidency8Samples); + check_flag!(self, other, sparseResidency16Samples); + check_flag!(self, other, sparseResidencyAliased); + check_flag!(self, other, variableMultisampleRate); + check_flag!(self, other, inheritedQueries); + + true + } +} diff --git a/vulkan-sys/src/structs/core/physicaldevicelimits.rs b/vulkan-sys/src/structs/core/physicaldevicelimits.rs new file mode 100644 index 0000000..4640ad0 --- /dev/null +++ b/vulkan-sys/src/structs/core/physicaldevicelimits.rs @@ -0,0 +1,112 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug, Default)] +pub struct VkPhysicalDeviceLimits { + pub maxImageDimension1D: u32, + pub maxImageDimension2D: u32, + pub maxImageDimension3D: u32, + pub maxImageDimensionCube: u32, + pub maxImageArrayLayers: u32, + pub maxTexelBufferElements: u32, + pub maxUniformBufferRange: u32, + pub maxStorageBufferRange: u32, + pub maxPushConstantsSize: u32, + pub maxMemoryAllocationCount: u32, + pub maxSamplerAllocationCount: u32, + pub bufferImageGranularity: VkDeviceSize, + pub sparseAddressSpaceSize: VkDeviceSize, + pub maxBoundDescriptorSets: u32, + pub maxPerStageDescriptorSamplers: u32, + pub maxPerStageDescriptorUniformBuffers: u32, + pub maxPerStageDescriptorStorageBuffers: u32, + pub maxPerStageDescriptorSampledImages: u32, + pub maxPerStageDescriptorStorageImages: u32, + pub maxPerStageDescriptorInputAttachments: u32, + pub maxPerStageResources: u32, + pub maxDescriptorSetSamplers: u32, + pub maxDescriptorSetUniformBuffers: u32, + pub maxDescriptorSetUniformBuffersDynamic: u32, + pub maxDescriptorSetStorageBuffers: u32, + pub maxDescriptorSetStorageBuffersDynamic: u32, + pub maxDescriptorSetSampledImages: u32, + pub maxDescriptorSetStorageImages: u32, + pub maxDescriptorSetInputAttachments: u32, + pub maxVertexInputAttributes: u32, + pub maxVertexInputBindings: u32, + pub maxVertexInputAttributeOffset: u32, + pub maxVertexInputBindingStride: u32, + pub maxVertexOutputComponents: u32, + pub maxTessellationGenerationLevel: u32, + pub maxTessellationPatchSize: u32, + pub maxTessellationControlPerVertexInputComponents: u32, + pub maxTessellationControlPerVertexOutputComponents: u32, + pub maxTessellationControlPerPatchOutputComponents: u32, + pub maxTessellationControlTotalOutputComponents: u32, + pub maxTessellationEvaluationInputComponents: u32, + pub maxTessellationEvaluationOutputComponents: u32, + pub maxGeometryShaderInvocations: u32, + pub maxGeometryInputComponents: u32, + pub maxGeometryOutputComponents: u32, + pub maxGeometryOutputVertices: u32, + pub maxGeometryTotalOutputComponents: u32, + pub maxFragmentInputComponents: u32, + pub maxFragmentOutputAttachments: u32, + pub maxFragmentDualSrcAttachments: u32, + pub maxFragmentCombinedOutputResources: u32, + pub maxComputeSharedMemorySize: u32, + pub maxComputeWorkGroupCount: [u32; 3], + pub maxComputeWorkGroupInvocations: u32, + pub maxComputeWorkGroupSize: [u32; 3], + pub subPixelPrecisionBits: u32, + pub subTexelPrecisionBits: u32, + pub mipmapPrecisionBits: u32, + pub maxDrawIndexedIndexValue: u32, + pub maxDrawIndirectCount: u32, + pub maxSamplerLodBias: f32, + pub maxSamplerAnisotropy: f32, + pub maxViewports: u32, + pub maxViewportDimensions: [u32; 2], + pub viewportBoundsRange: [f32; 2], + pub viewportSubPixelBits: u32, + pub minMemoryMapAlignment: usize, + pub minTexelBufferOffsetAlignment: VkDeviceSize, + pub minUniformBufferOffsetAlignment: VkDeviceSize, + pub minStorageBufferOffsetAlignment: VkDeviceSize, + pub minTexelOffset: i32, + pub maxTexelOffset: u32, + pub minTexelGatherOffset: i32, + pub maxTexelGatherOffset: u32, + pub minInterpolationOffset: f32, + pub maxInterpolationOffset: f32, + pub subPixelInterpolationOffsetBits: u32, + pub maxFramebufferWidth: u32, + pub maxFramebufferHeight: u32, + pub maxFramebufferLayers: u32, + pub framebufferColorSampleCounts: VkSampleCountFlagBits, + pub framebufferDepthSampleCounts: VkSampleCountFlagBits, + pub framebufferStencilSampleCounts: VkSampleCountFlagBits, + pub framebufferNoAttachmentsSampleCounts: VkSampleCountFlagBits, + pub maxColorAttachments: u32, + pub sampledImageColorSampleCounts: VkSampleCountFlagBits, + pub sampledImageIntegerSampleCounts: VkSampleCountFlagBits, + pub sampledImageDepthSampleCounts: VkSampleCountFlagBits, + pub sampledImageStencilSampleCounts: VkSampleCountFlagBits, + pub storageImageSampleCounts: VkSampleCountFlagBits, + pub maxSampleMaskWords: u32, + pub timestampComputeAndGraphics: VkBool32, + pub timestampPeriod: f32, + pub maxClipDistances: u32, + pub maxCullDistances: u32, + pub maxCombinedClipAndCullDistances: u32, + pub discreteQueuePriorities: u32, + pub pointSizeRange: [f32; 2], + pub lineWidthRange: [f32; 2], + pub pointSizeGranularity: f32, + pub lineWidthGranularity: f32, + pub strictLines: VkBool32, + pub standardSampleLocations: VkBool32, + pub optimalBufferCopyOffsetAlignment: VkDeviceSize, + pub optimalBufferCopyRowPitchAlignment: VkDeviceSize, + pub nonCoherentAtomSize: VkDeviceSize, +} diff --git a/vulkan-sys/src/structs/core/physicaldevicemaintanence3properties.rs b/vulkan-sys/src/structs/core/physicaldevicemaintanence3properties.rs new file mode 100644 index 0000000..a88dc89 --- /dev/null +++ b/vulkan-sys/src/structs/core/physicaldevicemaintanence3properties.rs @@ -0,0 +1,22 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +pub struct VkPhysicalDeviceMaintenance3PropertiesKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub maxPerSetDescriptors: u32, + pub maxMemoryAllocationSize: VkDeviceSize, +} + +impl VkPhysicalDeviceMaintenance3PropertiesKHR { + pub fn new(max_per_set_descriptors: u32, max_memory_allocation_size: VkDeviceSize) -> Self { + VkPhysicalDeviceMaintenance3PropertiesKHR { + sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES, + pNext: ptr::null(), + maxPerSetDescriptors: max_per_set_descriptors, + maxMemoryAllocationSize: max_memory_allocation_size, + } + } +} diff --git a/vulkan-sys/src/structs/core/physicaldevicememoryproperties.rs b/vulkan-sys/src/structs/core/physicaldevicememoryproperties.rs new file mode 100644 index 0000000..d3dee75 --- /dev/null +++ b/vulkan-sys/src/structs/core/physicaldevicememoryproperties.rs @@ -0,0 +1,33 @@ +use crate::prelude::*; + +use super::super::raw_to_slice; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPhysicalDeviceMemoryProperties { + pub memoryTypeCount: u32, + pub memoryTypes: [VkMemoryType; VK_MAX_MEMORY_TYPES as usize], + pub memoryHeapCount: u32, + pub memoryHeaps: [VkMemoryHeap; VK_MAX_MEMORY_HEAPS as usize], +} + +impl VkPhysicalDeviceMemoryProperties { + pub fn memory_types(&self) -> &[VkMemoryType] { + raw_to_slice(self.memoryTypes.as_ptr(), self.memoryTypeCount) + } + + pub fn memory_heaps(&self) -> &[VkMemoryHeap] { + raw_to_slice(self.memoryHeaps.as_ptr(), self.memoryHeapCount) + } +} + +impl Default for VkPhysicalDeviceMemoryProperties { + fn default() -> Self { + VkPhysicalDeviceMemoryProperties { + memoryTypeCount: 0, + memoryTypes: [VkMemoryType::default(); VK_MAX_MEMORY_TYPES as usize], + memoryHeapCount: 0, + memoryHeaps: [VkMemoryHeap::default(); VK_MAX_MEMORY_HEAPS as usize], + } + } +} diff --git a/vulkan-sys/src/structs/core/physicaldeviceproperties.rs b/vulkan-sys/src/structs/core/physicaldeviceproperties.rs new file mode 100644 index 0000000..e03cddd --- /dev/null +++ b/vulkan-sys/src/structs/core/physicaldeviceproperties.rs @@ -0,0 +1,52 @@ +use crate::prelude::*; + +use std::ffi::CStr; +use std::fmt; + +use std::os::raw::c_char; + +#[repr(C)] +pub struct VkPhysicalDeviceProperties { + pub apiVersion: u32, + pub driverVersion: u32, + pub vendorID: u32, + pub deviceID: u32, + pub deviceType: VkPhysicalDeviceType, + pub deviceName: [c_char; VK_MAX_PHYSICAL_DEVICE_NAME_SIZE as usize], + pub pipelineCacheUUID: [u8; VK_UUID_SIZE as usize], + pub limits: VkPhysicalDeviceLimits, + pub sparseProperties: VkPhysicalDeviceSparseProperties, +} + +impl VkPhysicalDeviceProperties { + pub fn device_name(&self) -> String { + let device_name_cstr = unsafe { CStr::from_ptr(self.deviceName.as_ptr()) }; + device_name_cstr.to_str().unwrap().to_string() + } +} + +impl Default for VkPhysicalDeviceProperties { + fn default() -> Self { + VkPhysicalDeviceProperties { + apiVersion: 0, + driverVersion: 0, + vendorID: 0, + deviceID: 0, + deviceType: VK_PHYSICAL_DEVICE_TYPE_OTHER, + deviceName: [0; VK_MAX_PHYSICAL_DEVICE_NAME_SIZE as usize], + pipelineCacheUUID: [0; VK_UUID_SIZE as usize], + limits: VkPhysicalDeviceLimits::default(), + sparseProperties: VkPhysicalDeviceSparseProperties::default(), + } + } +} + +impl fmt::Debug for VkPhysicalDeviceProperties { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "VkPhysicalDeviceProperties {{ apiVersion: {:?}, driverVersion: {:?}, vendorID: {:?}, deviceID: {:?}, deviceType: {:?}, deviceName: {:?}, pipelineCacheUUID: {:?}, limits: {:?}, sparseProperties: {:?} }}", + self.apiVersion, self.driverVersion, self.vendorID, self.deviceID, self.deviceType, self.device_name(), self.pipelineCacheUUID, self.limits, self.sparseProperties + ) + } +} diff --git a/vulkan-sys/src/structs/core/physicaldevicesparseproperties.rs b/vulkan-sys/src/structs/core/physicaldevicesparseproperties.rs new file mode 100644 index 0000000..a2d1f64 --- /dev/null +++ b/vulkan-sys/src/structs/core/physicaldevicesparseproperties.rs @@ -0,0 +1,11 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug, Default)] +pub struct VkPhysicalDeviceSparseProperties { + pub residencyStandard2DBlockShape: VkBool32, + pub residencyStandard2DMultisampleBlockShape: VkBool32, + pub residencyStandard3DBlockShape: VkBool32, + pub residencyAlignedMipSize: VkBool32, + pub residencyNonResidentStrict: VkBool32, +} diff --git a/vulkan-sys/src/structs/core/pipelinecachecreateinfo.rs b/vulkan-sys/src/structs/core/pipelinecachecreateinfo.rs new file mode 100644 index 0000000..ab943d8 --- /dev/null +++ b/vulkan-sys/src/structs/core/pipelinecachecreateinfo.rs @@ -0,0 +1,35 @@ +use crate::prelude::*; + +use std::mem; +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPipelineCacheCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkPipelineCacheCreateFlagBits, + pub initialDataSize: usize, + pub pInitialData: *const c_void, +} + +impl VkPipelineCacheCreateInfo { + pub fn new(flags: T) -> Self + where + T: Into, + { + VkPipelineCacheCreateInfo { + sType: VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + initialDataSize: 0, + pInitialData: ptr::null(), + } + } + + pub fn set_data(&mut self, data: &T) { + self.initialDataSize = mem::size_of::(); + self.pInitialData = data as *const T as *const c_void; + } +} diff --git a/vulkan-sys/src/structs/core/pipelinecolorblendattachmentstate.rs b/vulkan-sys/src/structs/core/pipelinecolorblendattachmentstate.rs new file mode 100644 index 0000000..571b375 --- /dev/null +++ b/vulkan-sys/src/structs/core/pipelinecolorblendattachmentstate.rs @@ -0,0 +1,32 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPipelineColorBlendAttachmentState { + pub blendEnable: VkBool32, + pub srcColorBlendFactor: VkBlendFactor, + pub dstColorBlendFactor: VkBlendFactor, + pub colorBlendOp: VkBlendOp, + pub srcAlphaBlendFactor: VkBlendFactor, + pub dstAlphaBlendFactor: VkBlendFactor, + pub alphaBlendOp: VkBlendOp, + pub colorWriteMask: VkColorComponentFlagBits, +} + +impl Default for VkPipelineColorBlendAttachmentState { + fn default() -> Self { + VkPipelineColorBlendAttachmentState { + blendEnable: VK_TRUE, + srcColorBlendFactor: VK_BLEND_FACTOR_SRC_ALPHA, + dstColorBlendFactor: VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA, + colorBlendOp: VK_BLEND_OP_ADD, + srcAlphaBlendFactor: VK_BLEND_FACTOR_ONE, + dstAlphaBlendFactor: VK_BLEND_FACTOR_ZERO, + alphaBlendOp: VK_BLEND_OP_ADD, + colorWriteMask: VK_COLOR_COMPONENT_R_BIT + | VK_COLOR_COMPONENT_G_BIT + | VK_COLOR_COMPONENT_B_BIT + | VK_COLOR_COMPONENT_A_BIT, + } + } +} diff --git a/vulkan-sys/src/structs/core/pipelinecolorblendstatecreateinfo.rs b/vulkan-sys/src/structs/core/pipelinecolorblendstatecreateinfo.rs new file mode 100644 index 0000000..0f07594 --- /dev/null +++ b/vulkan-sys/src/structs/core/pipelinecolorblendstatecreateinfo.rs @@ -0,0 +1,46 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPipelineColorBlendStateCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkPipelineColorBlendStateCreateFlagBits, + pub logicOpEnable: VkBool32, + pub logicOp: VkLogicOp, + pub attachmentCount: u32, + pub pAttachments: *const VkPipelineColorBlendAttachmentState, + pub blendConstants: [f32; 4], +} + +impl VkPipelineColorBlendStateCreateInfo { + pub fn new( + flags: T, + logic_op_enable: bool, + logic_op: VkLogicOp, + attachments: &[VkPipelineColorBlendAttachmentState], + blend_constants: [f32; 4], + ) -> VkPipelineColorBlendStateCreateInfo + where + T: Into, + { + VkPipelineColorBlendStateCreateInfo { + sType: VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + logicOpEnable: logic_op_enable.into(), + logicOp: logic_op, + attachmentCount: attachments.len() as u32, + pAttachments: attachments.as_ptr(), + blendConstants: blend_constants, + } + } + + pub fn set_attachments(&mut self, attachments: &[VkPipelineColorBlendAttachmentState]) { + self.attachmentCount = attachments.len() as u32; + self.pAttachments = attachments.as_ptr(); + } +} diff --git a/vulkan-sys/src/structs/core/pipelinedepthstencilstatecreateinfo.rs b/vulkan-sys/src/structs/core/pipelinedepthstencilstatecreateinfo.rs new file mode 100644 index 0000000..44f88a6 --- /dev/null +++ b/vulkan-sys/src/structs/core/pipelinedepthstencilstatecreateinfo.rs @@ -0,0 +1,54 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPipelineDepthStencilStateCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkPipelineDepthStencilStateCreateFlagBits, + pub depthTestEnable: VkBool32, + pub depthWriteEnable: VkBool32, + pub depthCompareOp: VkCompareOp, + pub depthBoundsTestEnable: VkBool32, + pub stencilTestEnable: VkBool32, + pub front: VkStencilOpState, + pub back: VkStencilOpState, + pub minDepthBounds: f32, + pub maxDepthBounds: f32, +} + +impl VkPipelineDepthStencilStateCreateInfo { + pub fn new( + flags: T, + depth_test_enable: bool, + depth_write_enable: bool, + depth_compare_op: VkCompareOp, + depth_bounds_test_enable: bool, + stencil_test_enable: bool, + front: VkStencilOpState, + back: VkStencilOpState, + min_depth_bounds: f32, + max_depth_bounds: f32, + ) -> VkPipelineDepthStencilStateCreateInfo + where + T: Into, + { + VkPipelineDepthStencilStateCreateInfo { + sType: VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + depthTestEnable: depth_test_enable.into(), + depthWriteEnable: depth_write_enable.into(), + depthCompareOp: depth_compare_op, + depthBoundsTestEnable: depth_bounds_test_enable.into(), + stencilTestEnable: stencil_test_enable.into(), + front, + back, + minDepthBounds: min_depth_bounds, + maxDepthBounds: max_depth_bounds, + } + } +} diff --git a/vulkan-sys/src/structs/core/pipelinedynamicstatecreateinfo.rs b/vulkan-sys/src/structs/core/pipelinedynamicstatecreateinfo.rs new file mode 100644 index 0000000..2ee4cc4 --- /dev/null +++ b/vulkan-sys/src/structs/core/pipelinedynamicstatecreateinfo.rs @@ -0,0 +1,29 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPipelineDynamicStateCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkPipelineDynamicStateCreateFlagBits, + pub dynamicStateCount: u32, + pub pDynamicStates: *const VkDynamicState, +} + +impl VkPipelineDynamicStateCreateInfo { + pub fn new(flags: T, dynamic_states: &[VkDynamicState]) -> VkPipelineDynamicStateCreateInfo + where + T: Into, + { + VkPipelineDynamicStateCreateInfo { + sType: VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + dynamicStateCount: dynamic_states.len() as u32, + pDynamicStates: dynamic_states.as_ptr(), + } + } +} diff --git a/vulkan-sys/src/structs/core/pipelineinputassemblystatecreateinfo.rs b/vulkan-sys/src/structs/core/pipelineinputassemblystatecreateinfo.rs new file mode 100644 index 0000000..775c3e1 --- /dev/null +++ b/vulkan-sys/src/structs/core/pipelineinputassemblystatecreateinfo.rs @@ -0,0 +1,33 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPipelineInputAssemblyStateCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkPipelineInputAssemblyStateCreateFlagBits, + pub topology: VkPrimitiveTopology, + pub primitiveRestartEnable: VkBool32, +} + +impl VkPipelineInputAssemblyStateCreateInfo { + pub fn new( + flags: T, + topology: VkPrimitiveTopology, + primitive_restart_enable: bool, + ) -> VkPipelineInputAssemblyStateCreateInfo + where + T: Into, + { + VkPipelineInputAssemblyStateCreateInfo { + sType: VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + topology, + primitiveRestartEnable: primitive_restart_enable.into(), + } + } +} diff --git a/vulkan-sys/src/structs/core/pipelinelayoutcreateinfo.rs b/vulkan-sys/src/structs/core/pipelinelayoutcreateinfo.rs new file mode 100644 index 0000000..54f875b --- /dev/null +++ b/vulkan-sys/src/structs/core/pipelinelayoutcreateinfo.rs @@ -0,0 +1,37 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPipelineLayoutCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkPipelineLayoutCreateFlagBits, + pub setLayoutCount: u32, + pub pSetLayouts: *const VkDescriptorSetLayout, + pub pushConstantRangeCount: u32, + pub pPushConstantRanges: *const VkPushConstantRange, +} + +impl VkPipelineLayoutCreateInfo { + pub fn new( + flags: T, + set_layouts: &[VkDescriptorSetLayout], + push_constant_ranges: &[VkPushConstantRange], + ) -> Self + where + T: Into, + { + VkPipelineLayoutCreateInfo { + sType: VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + setLayoutCount: set_layouts.len() as u32, + pSetLayouts: set_layouts.as_ptr(), + pushConstantRangeCount: push_constant_ranges.len() as u32, + pPushConstantRanges: push_constant_ranges.as_ptr(), + } + } +} diff --git a/vulkan-sys/src/structs/core/pipelinemultisamplestatecreateinfo.rs b/vulkan-sys/src/structs/core/pipelinemultisamplestatecreateinfo.rs new file mode 100644 index 0000000..7d353e6 --- /dev/null +++ b/vulkan-sys/src/structs/core/pipelinemultisamplestatecreateinfo.rs @@ -0,0 +1,55 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPipelineMultisampleStateCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkPipelineMultisampleStateCreateFlagBits, + pub rasterizationSamples: VkSampleCountFlags, + pub sampleShadingEnable: VkBool32, + pub minSampleShading: f32, + pub pSampleMask: *const VkSampleMask, + pub alphaToCoverageEnable: VkBool32, + pub alphaToOneEnable: VkBool32, +} + +impl VkPipelineMultisampleStateCreateInfo { + pub fn new( + flags: T, + rasterization_samples: VkSampleCountFlags, + sample_shading_enable: bool, + min_sample_shading: f32, + sample_masks: &[VkSampleMask], + alpha_to_coverage_enable: bool, + alpha_to_one_enable: bool, + ) -> VkPipelineMultisampleStateCreateInfo + where + T: Into, + { + VkPipelineMultisampleStateCreateInfo { + sType: VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + rasterizationSamples: rasterization_samples, + sampleShadingEnable: sample_shading_enable.into(), + minSampleShading: min_sample_shading, + pSampleMask: if sample_masks.is_empty() { + ptr::null() + } else { + let cmp = (sample_masks.len() as f32 / 32.0).ceil() as u32; + let raster_samples = rasterization_samples as u32; + + debug_assert!(cmp == raster_samples); + + sample_masks.as_ptr() + }, + + alphaToCoverageEnable: alpha_to_coverage_enable.into(), + alphaToOneEnable: alpha_to_one_enable.into(), + } + } +} diff --git a/vulkan-sys/src/structs/core/pipelinerasterizationstatecreateinfo.rs b/vulkan-sys/src/structs/core/pipelinerasterizationstatecreateinfo.rs new file mode 100644 index 0000000..4c40060 --- /dev/null +++ b/vulkan-sys/src/structs/core/pipelinerasterizationstatecreateinfo.rs @@ -0,0 +1,62 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPipelineRasterizationStateCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkPipelineRasterizationStateCreateFlagBits, + pub depthClampEnable: VkBool32, + pub rasterizerDiscardEnable: VkBool32, + pub polygonMode: VkPolygonMode, + pub cullMode: VkCullModeFlags, + pub frontFace: VkFrontFace, + pub depthBiasEnable: VkBool32, + pub depthBiasConstantFactor: f32, + pub depthBiasClamp: f32, + pub depthBiasSlopeFactor: f32, + pub lineWidth: f32, +} + +impl VkPipelineRasterizationStateCreateInfo { + pub fn new( + flags: T, + depth_clamp_enable: bool, + rasterization_discard_enable: bool, + polygon_mode: VkPolygonMode, + cull_mode: VkCullModeFlags, + front_face: VkFrontFace, + depth_bias_enable: bool, + depth_bias_constant_factor: f32, + depth_bias_clamp: f32, + depth_bias_slope_factor: f32, + line_width: f32, + ) -> VkPipelineRasterizationStateCreateInfo + where + T: Into, + { + VkPipelineRasterizationStateCreateInfo { + sType: VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + depthClampEnable: depth_clamp_enable.into(), + rasterizerDiscardEnable: rasterization_discard_enable.into(), + polygonMode: polygon_mode, + cullMode: cull_mode, + frontFace: front_face, + depthBiasEnable: depth_bias_enable.into(), + depthBiasConstantFactor: depth_bias_constant_factor, + depthBiasClamp: depth_bias_clamp, + depthBiasSlopeFactor: depth_bias_slope_factor, + lineWidth: line_width, + } + } +} + +impl_pnext_in!( + VkPipelineRasterizationStateCreateInfo, + VkPipelineRasterizationStateRasterizationOrderAMD +); diff --git a/vulkan-sys/src/structs/core/pipelineshaderstagecreateinfo.rs b/vulkan-sys/src/structs/core/pipelineshaderstagecreateinfo.rs new file mode 100644 index 0000000..19f853b --- /dev/null +++ b/vulkan-sys/src/structs/core/pipelineshaderstagecreateinfo.rs @@ -0,0 +1,153 @@ +use crate::prelude::*; + +use std::os::raw::{c_char, c_void}; +use std::ptr; + +const SHADER_ENTRY: &str = "main\0"; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPipelineShaderStageCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkPipelineShaderStageCreateFlagBits, + pub stage: VkShaderStageFlags, + pub module: VkShaderModule, + pub pName: *const c_char, + pub pSpecializationInfo: *const VkSpecializationInfo, +} + +impl VkPipelineShaderStageCreateInfo { + pub fn new( + flags: T, + stage: VkShaderStageFlags, + shader_module: VkShaderModule, + entry_function_name: &VkString, + ) -> VkPipelineShaderStageCreateInfo + where + T: Into, + { + VkPipelineShaderStageCreateInfo { + sType: VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + stage, + module: shader_module, + pName: entry_function_name.as_ptr(), + pSpecializationInfo: ptr::null(), + } + } + + pub fn main( + flags: T, + stage: VkShaderStageFlags, + shader_module: VkShaderModule, + ) -> VkPipelineShaderStageCreateInfo + where + T: Into, + { + VkPipelineShaderStageCreateInfo { + sType: VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + stage, + module: shader_module, + pName: SHADER_ENTRY.as_ptr() as *const c_char, + pSpecializationInfo: ptr::null(), + } + } + + pub fn vertex(shader_module: VkShaderModule) -> VkPipelineShaderStageCreateInfo { + Self::main( + VK_PIPELINE_SHADER_STAGE_CREATE_NULL_BIT, + VK_SHADER_STAGE_VERTEX_BIT, + shader_module, + ) + } + + pub fn fragment(shader_module: VkShaderModule) -> VkPipelineShaderStageCreateInfo { + Self::main( + VK_PIPELINE_SHADER_STAGE_CREATE_NULL_BIT, + VK_SHADER_STAGE_FRAGMENT_BIT, + shader_module, + ) + } + + pub fn geometry(shader_module: VkShaderModule) -> VkPipelineShaderStageCreateInfo { + Self::main( + VK_PIPELINE_SHADER_STAGE_CREATE_NULL_BIT, + VK_SHADER_STAGE_GEOMETRY_BIT, + shader_module, + ) + } + + pub fn tesselation_control(shader_module: VkShaderModule) -> VkPipelineShaderStageCreateInfo { + Self::main( + VK_PIPELINE_SHADER_STAGE_CREATE_NULL_BIT, + VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, + shader_module, + ) + } + + pub fn tesselation_evaluation( + shader_module: VkShaderModule, + ) -> VkPipelineShaderStageCreateInfo { + Self::main( + VK_PIPELINE_SHADER_STAGE_CREATE_NULL_BIT, + VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT, + shader_module, + ) + } + + pub fn compute(shader_module: VkShaderModule) -> VkPipelineShaderStageCreateInfo { + Self::main( + VK_PIPELINE_SHADER_STAGE_CREATE_NULL_BIT, + VK_SHADER_STAGE_COMPUTE_BIT, + shader_module, + ) + } + + pub fn ray_generation(shader_module: VkShaderModule) -> VkPipelineShaderStageCreateInfo { + Self::main( + VK_PIPELINE_SHADER_STAGE_CREATE_NULL_BIT, + VK_SHADER_STAGE_RAYGEN_BIT_KHR, + shader_module, + ) + } + + pub fn closest_hit(shader_module: VkShaderModule) -> VkPipelineShaderStageCreateInfo { + Self::main( + VK_PIPELINE_SHADER_STAGE_CREATE_NULL_BIT, + VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR, + shader_module, + ) + } + + pub fn miss(shader_module: VkShaderModule) -> VkPipelineShaderStageCreateInfo { + Self::main( + VK_PIPELINE_SHADER_STAGE_CREATE_NULL_BIT, + VK_SHADER_STAGE_MISS_BIT_KHR, + shader_module, + ) + } + + pub fn intersection(shader_module: VkShaderModule) -> VkPipelineShaderStageCreateInfo { + Self::main( + VK_PIPELINE_SHADER_STAGE_CREATE_NULL_BIT, + VK_SHADER_STAGE_INTERSECTION_BIT_KHR, + shader_module, + ) + } + + pub fn any_hit(shader_module: VkShaderModule) -> VkPipelineShaderStageCreateInfo { + Self::main( + VK_PIPELINE_SHADER_STAGE_CREATE_NULL_BIT, + VK_SHADER_STAGE_ANY_HIT_BIT_KHR, + shader_module, + ) + } + + pub fn set_specialization_info(&mut self, specialization_info: &VkSpecializationInfo) { + self.pSpecializationInfo = specialization_info as *const VkSpecializationInfo; + } +} diff --git a/vulkan-sys/src/structs/core/pipelinetesselationstatecreateinfo.rs b/vulkan-sys/src/structs/core/pipelinetesselationstatecreateinfo.rs new file mode 100644 index 0000000..0081a56 --- /dev/null +++ b/vulkan-sys/src/structs/core/pipelinetesselationstatecreateinfo.rs @@ -0,0 +1,27 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPipelineTessellationStateCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkPipelineTessellationStateCreateFlagBits, + pub patchControlPoints: u32, +} + +impl VkPipelineTessellationStateCreateInfo { + pub fn new(flags: T, patch_control_points: u32) -> VkPipelineTessellationStateCreateInfo + where + T: Into, + { + VkPipelineTessellationStateCreateInfo { + sType: VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + patchControlPoints: patch_control_points, + } + } +} diff --git a/vulkan-sys/src/structs/core/pipelinevertexinputstatecreateinfo.rs b/vulkan-sys/src/structs/core/pipelinevertexinputstatecreateinfo.rs new file mode 100644 index 0000000..6ebc3bb --- /dev/null +++ b/vulkan-sys/src/structs/core/pipelinevertexinputstatecreateinfo.rs @@ -0,0 +1,37 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPipelineVertexInputStateCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkPipelineVertexInputStateCreateFlagBits, + pub vertexBindingDescriptionCount: u32, + pub pVertexBindingDescriptions: *const VkVertexInputBindingDescription, + pub vertexAttributeDescriptionCount: u32, + pub pVertexAttributeDescriptions: *const VkVertexInputAttributeDescription, +} + +impl VkPipelineVertexInputStateCreateInfo { + pub fn new( + flags: T, + vertex_binding_descriptions: &[VkVertexInputBindingDescription], + vertex_attrbiute_descriptions: &[VkVertexInputAttributeDescription], + ) -> VkPipelineVertexInputStateCreateInfo + where + T: Into, + { + VkPipelineVertexInputStateCreateInfo { + sType: VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + vertexBindingDescriptionCount: vertex_binding_descriptions.len() as u32, + pVertexBindingDescriptions: vertex_binding_descriptions.as_ptr(), + vertexAttributeDescriptionCount: vertex_attrbiute_descriptions.len() as u32, + pVertexAttributeDescriptions: vertex_attrbiute_descriptions.as_ptr(), + } + } +} diff --git a/vulkan-sys/src/structs/core/pipelineviewportstatecreateinfo.rs b/vulkan-sys/src/structs/core/pipelineviewportstatecreateinfo.rs new file mode 100644 index 0000000..89f803b --- /dev/null +++ b/vulkan-sys/src/structs/core/pipelineviewportstatecreateinfo.rs @@ -0,0 +1,37 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPipelineViewportStateCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkPipelineViewportStateCreateFlagBits, + pub viewportCount: u32, + pub pViewports: *const VkViewport, + pub scissorCount: u32, + pub pScissors: *const VkRect2D, +} + +impl VkPipelineViewportStateCreateInfo { + pub fn new( + flags: T, + viewports: &[VkViewport], + scissors: &[VkRect2D], + ) -> VkPipelineViewportStateCreateInfo + where + T: Into, + { + VkPipelineViewportStateCreateInfo { + sType: VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + viewportCount: viewports.len() as u32, + pViewports: viewports.as_ptr(), + scissorCount: scissors.len() as u32, + pScissors: scissors.as_ptr(), + } + } +} diff --git a/vulkan-sys/src/structs/core/prelude.rs b/vulkan-sys/src/structs/core/prelude.rs new file mode 100644 index 0000000..f945043 --- /dev/null +++ b/vulkan-sys/src/structs/core/prelude.rs @@ -0,0 +1,137 @@ +pub use super::allocationcallback::*; +pub use super::applicationinfo::*; +pub use super::attachmentdescription::*; +pub use super::attachmentreference::*; +pub use super::base_in_structure::*; +pub use super::base_out_structure::*; +pub use super::bind_buffer_memory_info::*; +pub use super::bind_image_memory_info::*; +pub use super::bindsparseinfo::*; +pub use super::buffer_device_address_info::*; +pub use super::buffer_memory_requirements_info_2::*; +pub use super::buffercopy::*; +pub use super::buffercreateinfo::*; +pub use super::bufferdeviceaddresscreateinfoext::*; +pub use super::bufferimagecopy::*; +pub use super::buffermemorybarrier::*; +pub use super::bufferviewcreateinfo::*; +pub use super::clearattachment::*; +pub use super::clearcolorvalue::*; +pub use super::cleardepthstencilvalue::*; +pub use super::clearrect::*; +pub use super::clearvalue::*; +pub use super::commandbufferallocateinfo::*; +pub use super::commandbufferbegininfo::*; +pub use super::commandbufferinheritanceinfo::*; +pub use super::commandpoolcreateinfo::*; +pub use super::componentmapping::*; +pub use super::computepipelinecreateinfo::*; +pub use super::copydescriptorset::*; +pub use super::debugreportcallbackcreateinfoext::*; +pub use super::debugutilmessengercallbackdataext::*; +pub use super::debugutilmessengercallbackdataext::*; +pub use super::debugutilslabelext::*; +pub use super::debugutilslabelext::*; +pub use super::debugutilsmessengercreateinfoext::*; +pub use super::debugutilsmessengercreateinfoext::*; +pub use super::debugutilsobjectnameinfoext::*; +pub use super::debugutilsobjectnameinfoext::*; +pub use super::descriptorbufferinfo::*; +pub use super::descriptorimageinfo::*; +pub use super::descriptorpoolcreateinfo::*; +pub use super::descriptorpoolsize::*; +pub use super::descriptorsetallocateinfo::*; +pub use super::descriptorsetlayoutbinding::*; +pub use super::descriptorsetlayoutcreateinfo::*; +pub use super::descriptorsetlayoutsupport::*; +pub use super::devicecreateinfo::*; +pub use super::devicequeuecreateinfo::*; +pub use super::dispatchindirectcommand::*; +pub use super::displayplanecapabilities::*; +pub use super::displayplaneproperties::*; +pub use super::displayproperties::*; +pub use super::drawindexedindirectcommand::*; +pub use super::drawindirectcommand::*; +pub use super::eventcreateinfo::*; +pub use super::extensionproperties::*; +pub use super::extent2d::*; +pub use super::extent3d::*; +pub use super::externalmemorybuffercreateinfo::*; +pub use super::fencecreateinfo::*; +pub use super::formatproperties::*; +pub use super::framebuffercreateinfo::*; +pub use super::graphicspipelinecreateinfo::*; +pub use super::image_memory_requirements_info_2::*; +pub use super::imageblit::*; +pub use super::imagecopy::*; +pub use super::imagecreateinfo::*; +pub use super::imageformatproperties::*; +pub use super::imagememorybarrier::*; +pub use super::imageresolve::*; +pub use super::imagesubresource::*; +pub use super::imagesubresourcelayers::*; +pub use super::imagesubresourcerange::*; +pub use super::imageviewcreateinfo::*; +pub use super::instancecreateinfo::*; +pub use super::instancecreateinfo::*; +pub use super::iossurfacecreateinfomvk::*; +pub use super::layerproperties::*; +pub use super::macossurfacecreateinfomvk::*; +pub use super::mappedmemoryrange::*; +pub use super::memoryallocateinfo::*; +pub use super::memorybarrier::*; +pub use super::memoryheap::*; +pub use super::memoryrequirements::*; +pub use super::memorytype::*; +pub use super::mvkdisplayconfiguration::*; +pub use super::mvkphysicaldevicemetalfeatures::*; +pub use super::mvkswapchainperformance::*; +pub use super::offset2d::*; +pub use super::offset3d::*; +pub use super::physicaldevicefeatures::*; +pub use super::physicaldevicefeatures::*; +pub use super::physicaldevicelimits::*; +pub use super::physicaldevicemaintanence3properties::*; +pub use super::physicaldevicememoryproperties::*; +pub use super::physicaldeviceproperties::*; +pub use super::physicaldevicesparseproperties::*; +pub use super::pipelinecachecreateinfo::*; +pub use super::pipelinecolorblendattachmentstate::*; +pub use super::pipelinecolorblendstatecreateinfo::*; +pub use super::pipelinedepthstencilstatecreateinfo::*; +pub use super::pipelinedynamicstatecreateinfo::*; +pub use super::pipelineinputassemblystatecreateinfo::*; +pub use super::pipelinelayoutcreateinfo::*; +pub use super::pipelinemultisamplestatecreateinfo::*; +pub use super::pipelinerasterizationstatecreateinfo::*; +pub use super::pipelineshaderstagecreateinfo::*; +pub use super::pipelinetesselationstatecreateinfo::*; +pub use super::pipelinevertexinputstatecreateinfo::*; +pub use super::pipelineviewportstatecreateinfo::*; +pub use super::pushconstantrange::*; +pub use super::querypoolcreateinfo::*; +pub use super::queuefamilyproperties::*; +pub use super::rect2d::*; +pub use super::renderpassbegininfo::*; +pub use super::renderpasscreateinfo::*; +pub use super::samplercreateinfo::*; +pub use super::semaphorecreateinfo::*; +pub use super::shadermodulecreateinfo::*; +pub use super::sparsebuffermemorybindinfo::*; +pub use super::sparseimageformatproperties::*; +pub use super::sparseimagememorybind::*; +pub use super::sparseimagememorybindinfo::*; +pub use super::sparseimagememoryrequirements::*; +pub use super::sparseimageopaquememorybindinfo::*; +pub use super::sparsememorybind::*; +pub use super::specializationinfo::*; +pub use super::specializationmapentry::*; +pub use super::stencilopstate::*; +pub use super::submitinfo::*; +pub use super::subpassdependency::*; +pub use super::subpassdescription::*; +pub use super::subresourcelayout::*; +pub use super::vertexinputattributedescription::*; +pub use super::vertexinputbindingdescription::*; +pub use super::viewport::*; +pub use super::writedescriptorset::*; diff --git a/vulkan-sys/src/structs/core/pushconstantrange.rs b/vulkan-sys/src/structs/core/pushconstantrange.rs new file mode 100644 index 0000000..e70e1f8 --- /dev/null +++ b/vulkan-sys/src/structs/core/pushconstantrange.rs @@ -0,0 +1,22 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug, Clone)] +pub struct VkPushConstantRange { + pub stageFlagBits: VkShaderStageFlagBits, + pub offset: u32, + pub size: u32, +} + +impl VkPushConstantRange { + pub fn new(flags: T, offset: u32, size: u32) -> Self + where + T: Into, + { + VkPushConstantRange { + stageFlagBits: flags.into(), + offset, + size, + } + } +} diff --git a/vulkan-sys/src/structs/core/querypoolcreateinfo.rs b/vulkan-sys/src/structs/core/querypoolcreateinfo.rs new file mode 100644 index 0000000..636760d --- /dev/null +++ b/vulkan-sys/src/structs/core/querypoolcreateinfo.rs @@ -0,0 +1,37 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkQueryPoolCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkQueryPoolCreateFlagBits, + pub queryType: VkQueryType, + pub queryCount: u32, + pub pipelineStatistics: VkQueryPipelineStatisticFlagBits, +} + +impl VkQueryPoolCreateInfo { + pub fn new( + flags: T, + query_type: VkQueryType, + query_count: u32, + pipeline_statistics: U, + ) -> Self + where + T: Into, + U: Into, + { + VkQueryPoolCreateInfo { + sType: VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + queryType: query_type, + queryCount: query_count, + pipelineStatistics: pipeline_statistics.into(), + } + } +} diff --git a/vulkan-sys/src/structs/core/queuefamilyproperties.rs b/vulkan-sys/src/structs/core/queuefamilyproperties.rs new file mode 100644 index 0000000..e703bdf --- /dev/null +++ b/vulkan-sys/src/structs/core/queuefamilyproperties.rs @@ -0,0 +1,10 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkQueueFamilyProperties { + pub queueFlagBits: VkQueueFlagBits, + pub queueCount: u32, + pub timestampValidBits: u32, + pub minImageTransferGranularity: VkExtent3D, +} diff --git a/vulkan-sys/src/structs/core/rect2d.rs b/vulkan-sys/src/structs/core/rect2d.rs new file mode 100644 index 0000000..0eb7d55 --- /dev/null +++ b/vulkan-sys/src/structs/core/rect2d.rs @@ -0,0 +1,8 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug, Copy, Clone, Default)] +pub struct VkRect2D { + pub offset: VkOffset2D, + pub extent: VkExtent2D, +} diff --git a/vulkan-sys/src/structs/core/renderpassbegininfo.rs b/vulkan-sys/src/structs/core/renderpassbegininfo.rs new file mode 100644 index 0000000..8a54bc0 --- /dev/null +++ b/vulkan-sys/src/structs/core/renderpassbegininfo.rs @@ -0,0 +1,35 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkRenderPassBeginInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub renderPass: VkRenderPass, + pub framebuffer: VkFramebuffer, + pub renderArea: VkRect2D, + pub clearValueCount: u32, + pub pClearValues: *const VkClearValue, +} + +impl VkRenderPassBeginInfo { + pub fn new( + renderpass: VkRenderPass, + framebuffer: VkFramebuffer, + render_area: VkRect2D, + clear_values: &[VkClearValue], + ) -> Self { + VkRenderPassBeginInfo { + sType: VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, + pNext: ptr::null(), + renderPass: renderpass, + framebuffer, + renderArea: render_area, + clearValueCount: clear_values.len() as u32, + pClearValues: clear_values.as_ptr(), + } + } +} diff --git a/vulkan-sys/src/structs/core/renderpasscreateinfo.rs b/vulkan-sys/src/structs/core/renderpasscreateinfo.rs new file mode 100644 index 0000000..0472a76 --- /dev/null +++ b/vulkan-sys/src/structs/core/renderpasscreateinfo.rs @@ -0,0 +1,42 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkRenderPassCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkRenderPassCreateFlagBits, + pub attachmentCount: u32, + pub pAttachments: *const VkAttachmentDescription, + pub subpassCount: u32, + pub pSubpasses: *const VkSubpassDescription, + pub dependencyCount: u32, + pub pDependencies: *const VkSubpassDependency, +} + +impl VkRenderPassCreateInfo { + pub fn new( + flags: T, + attachments: &[VkAttachmentDescription], + subpasses: &[VkSubpassDescription], + dependencies: &[VkSubpassDependency], + ) -> Self + where + T: Into, + { + VkRenderPassCreateInfo { + sType: VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + attachmentCount: attachments.len() as u32, + pAttachments: attachments.as_ptr(), + subpassCount: subpasses.len() as u32, + pSubpasses: subpasses.as_ptr(), + dependencyCount: dependencies.len() as u32, + pDependencies: dependencies.as_ptr(), + } + } +} diff --git a/vulkan-sys/src/structs/core/samplercreateinfo.rs b/vulkan-sys/src/structs/core/samplercreateinfo.rs new file mode 100644 index 0000000..1f24534 --- /dev/null +++ b/vulkan-sys/src/structs/core/samplercreateinfo.rs @@ -0,0 +1,100 @@ +use crate::prelude::*; + +use std::hash::{Hash, Hasher}; +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug, PartialEq)] +pub struct VkSamplerCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkSamplerCreateFlagBits, + pub magFilter: VkFilter, + pub minFilter: VkFilter, + pub mipmapMode: VkSamplerMipmapMode, + pub addressModeU: VkSamplerAddressMode, + pub addressModeV: VkSamplerAddressMode, + pub addressModeW: VkSamplerAddressMode, + pub mipLodBias: f32, + pub anisotropyEnable: VkBool32, + pub maxAnisotropy: f32, + pub compareEnable: VkBool32, + pub compareOp: VkCompareOp, + pub minLod: f32, + pub maxLod: f32, + pub borderColor: VkBorderColor, + pub unnormalizedCoordinates: VkBool32, +} + +impl VkSamplerCreateInfo { + pub fn new( + flags: T, + mag_filter: VkFilter, + min_filter: VkFilter, + mipmap_mode: VkSamplerMipmapMode, + address_mode_u: VkSamplerAddressMode, + address_mode_v: VkSamplerAddressMode, + address_mode_w: VkSamplerAddressMode, + mip_lod_bias: f32, + anisotropy_enable: bool, + max_anisotropy: f32, + compare_enable: bool, + compare_op: VkCompareOp, + min_lod: f32, + max_lod: f32, + border_color: VkBorderColor, + unnormalized_coordinates: bool, + ) -> Self + where + T: Into, + { + VkSamplerCreateInfo { + sType: VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + magFilter: mag_filter, + minFilter: min_filter, + mipmapMode: mipmap_mode, + addressModeU: address_mode_u, + addressModeV: address_mode_v, + addressModeW: address_mode_w, + mipLodBias: mip_lod_bias, + anisotropyEnable: anisotropy_enable.into(), + maxAnisotropy: max_anisotropy, + compareEnable: compare_enable.into(), + compareOp: compare_op, + minLod: min_lod, + maxLod: max_lod, + borderColor: border_color, + unnormalizedCoordinates: unnormalized_coordinates.into(), + } + } +} + +impl Eq for VkSamplerCreateInfo {} + +impl Hash for VkSamplerCreateInfo { + fn hash(&self, state: &mut H) { + // ignore pNext and structure type + + self.flags.hash(state); + self.magFilter.hash(state); + self.minFilter.hash(state); + self.mipmapMode.hash(state); + self.addressModeU.hash(state); + self.addressModeV.hash(state); + self.addressModeW.hash(state); + self.anisotropyEnable.hash(state); + self.compareEnable.hash(state); + self.compareOp.hash(state); + self.borderColor.hash(state); + self.unnormalizedCoordinates.hash(state); + + // cast f32 into native endian byte slice + self.mipLodBias.to_ne_bytes().hash(state); + self.maxAnisotropy.to_ne_bytes().hash(state); + self.minLod.to_ne_bytes().hash(state); + self.maxLod.to_ne_bytes().hash(state); + } +} diff --git a/vulkan-sys/src/structs/core/semaphorecreateinfo.rs b/vulkan-sys/src/structs/core/semaphorecreateinfo.rs new file mode 100644 index 0000000..a3d9400 --- /dev/null +++ b/vulkan-sys/src/structs/core/semaphorecreateinfo.rs @@ -0,0 +1,25 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkSemaphoreCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkSemaphoreCreateFlagBits, +} + +impl VkSemaphoreCreateInfo { + pub fn new(flags: T) -> Self + where + T: Into, + { + VkSemaphoreCreateInfo { + sType: VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + } + } +} diff --git a/vulkan-sys/src/structs/core/shadermodulecreateinfo.rs b/vulkan-sys/src/structs/core/shadermodulecreateinfo.rs new file mode 100644 index 0000000..e7a0a66 --- /dev/null +++ b/vulkan-sys/src/structs/core/shadermodulecreateinfo.rs @@ -0,0 +1,29 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkShaderModuleCreateInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkShaderModuleCreateFlagBits, + pub codeSize: usize, + pub pCode: *const u32, +} + +impl VkShaderModuleCreateInfo { + pub fn new(flags: T, code: &[u8]) -> Self + where + T: Into, + { + VkShaderModuleCreateInfo { + sType: VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO, + pNext: ptr::null(), + flags: flags.into(), + codeSize: code.len(), + pCode: code.as_ptr() as *const u32, + } + } +} diff --git a/vulkan-sys/src/structs/core/sparsebuffermemorybindinfo.rs b/vulkan-sys/src/structs/core/sparsebuffermemorybindinfo.rs new file mode 100644 index 0000000..d7b21aa --- /dev/null +++ b/vulkan-sys/src/structs/core/sparsebuffermemorybindinfo.rs @@ -0,0 +1,9 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkSparseBufferMemoryBindInfo { + pub buffer: VkBuffer, + pub bindCount: u32, + pub pBinds: *const VkSparseMemoryBind, +} diff --git a/vulkan-sys/src/structs/core/sparseimageformatproperties.rs b/vulkan-sys/src/structs/core/sparseimageformatproperties.rs new file mode 100644 index 0000000..8c80e2c --- /dev/null +++ b/vulkan-sys/src/structs/core/sparseimageformatproperties.rs @@ -0,0 +1,9 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkSparseImageFormatProperties { + pub aspectMask: VkImageAspectFlagBits, + pub imageGranularity: VkExtent3D, + pub flags: VkSparseImageFormatFlagBits, +} diff --git a/vulkan-sys/src/structs/core/sparseimagememorybind.rs b/vulkan-sys/src/structs/core/sparseimagememorybind.rs new file mode 100644 index 0000000..2f56364 --- /dev/null +++ b/vulkan-sys/src/structs/core/sparseimagememorybind.rs @@ -0,0 +1,12 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkSparseImageMemoryBind { + pub subresource: VkImageSubresource, + pub offset: VkOffset3D, + pub extent: VkExtent3D, + pub memory: VkDeviceMemory, + pub memoryOffset: VkDeviceSize, + pub flags: VkSparseMemoryBindFlagBits, +} diff --git a/vulkan-sys/src/structs/core/sparseimagememorybindinfo.rs b/vulkan-sys/src/structs/core/sparseimagememorybindinfo.rs new file mode 100644 index 0000000..a047cf0 --- /dev/null +++ b/vulkan-sys/src/structs/core/sparseimagememorybindinfo.rs @@ -0,0 +1,9 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkSparseImageMemoryBindInfo { + pub image: VkImage, + pub bindCount: u32, + pub pBinds: *const VkSparseImageMemoryBind, +} diff --git a/vulkan-sys/src/structs/core/sparseimagememoryrequirements.rs b/vulkan-sys/src/structs/core/sparseimagememoryrequirements.rs new file mode 100644 index 0000000..51c20fa --- /dev/null +++ b/vulkan-sys/src/structs/core/sparseimagememoryrequirements.rs @@ -0,0 +1,11 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkSparseImageMemoryRequirements { + pub formatProperties: VkSparseImageFormatProperties, + pub imageMipTailFirstLod: u32, + pub imageMipTailSize: VkDeviceSize, + pub imageMipTailOffset: VkDeviceSize, + pub imageMipTailStride: VkDeviceSize, +} diff --git a/vulkan-sys/src/structs/core/sparseimageopaquememorybindinfo.rs b/vulkan-sys/src/structs/core/sparseimageopaquememorybindinfo.rs new file mode 100644 index 0000000..207d4b1 --- /dev/null +++ b/vulkan-sys/src/structs/core/sparseimageopaquememorybindinfo.rs @@ -0,0 +1,9 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkSparseImageOpaqueMemoryBindInfo { + pub image: VkImage, + pub bindCount: u32, + pub pBinds: *const VkSparseMemoryBind, +} diff --git a/vulkan-sys/src/structs/core/sparsememorybind.rs b/vulkan-sys/src/structs/core/sparsememorybind.rs new file mode 100644 index 0000000..9020aec --- /dev/null +++ b/vulkan-sys/src/structs/core/sparsememorybind.rs @@ -0,0 +1,11 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkSparseMemoryBind { + pub resourceOffset: VkDeviceSize, + pub size: VkDeviceSize, + pub memory: VkDeviceMemory, + pub memoryOffset: VkDeviceSize, + pub flags: VkSparseMemoryBindFlagBits, +} diff --git a/vulkan-sys/src/structs/core/specializationinfo.rs b/vulkan-sys/src/structs/core/specializationinfo.rs new file mode 100644 index 0000000..0d4a1cd --- /dev/null +++ b/vulkan-sys/src/structs/core/specializationinfo.rs @@ -0,0 +1,43 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkSpecializationInfo { + pub mapEntryCount: u32, + pub pMapEntries: *const VkSpecializationMapEntry, + pub dataSize: usize, + pub pData: *const c_void, +} + +impl VkSpecializationInfo { + pub fn empty() -> Self { + VkSpecializationInfo { + mapEntryCount: 0, + pMapEntries: ptr::null(), + dataSize: 0, + pData: ptr::null(), + } + } + + pub fn new(data: &T, map_entries: &[VkSpecializationMapEntry]) -> Self { + VkSpecializationInfo { + mapEntryCount: map_entries.len() as u32, + pMapEntries: map_entries.as_ptr(), + dataSize: std::mem::size_of::(), + pData: data as *const T as *const c_void, + } + } + + pub fn set_map_entries(&mut self, map_entries: &[VkSpecializationMapEntry]) { + self.mapEntryCount = map_entries.len() as u32; + self.pMapEntries = map_entries.as_ptr(); + } + + pub fn set_data(&mut self, data: &T) { + self.dataSize = std::mem::size_of::(); + self.pData = data as *const T as *const c_void; + } +} diff --git a/vulkan-sys/src/structs/core/specializationmapentry.rs b/vulkan-sys/src/structs/core/specializationmapentry.rs new file mode 100644 index 0000000..6f8110f --- /dev/null +++ b/vulkan-sys/src/structs/core/specializationmapentry.rs @@ -0,0 +1,7 @@ +#[repr(C)] +#[derive(Debug, Clone)] +pub struct VkSpecializationMapEntry { + pub constantID: u32, + pub offset: u32, + pub size: usize, +} diff --git a/vulkan-sys/src/structs/core/stencilopstate.rs b/vulkan-sys/src/structs/core/stencilopstate.rs new file mode 100644 index 0000000..01eefae --- /dev/null +++ b/vulkan-sys/src/structs/core/stencilopstate.rs @@ -0,0 +1,13 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug, Clone)] +pub struct VkStencilOpState { + pub failOp: VkStencilOp, + pub passOp: VkStencilOp, + pub depthFailOp: VkStencilOp, + pub compareOp: VkCompareOp, + pub compareMask: u32, + pub writeMask: u32, + pub reference: u32, +} diff --git a/vulkan-sys/src/structs/core/submitinfo.rs b/vulkan-sys/src/structs/core/submitinfo.rs new file mode 100644 index 0000000..2674b21 --- /dev/null +++ b/vulkan-sys/src/structs/core/submitinfo.rs @@ -0,0 +1,39 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug, Clone)] +pub struct VkSubmitInfo { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub waitSemaphoreCount: u32, + pub pWaitSemaphores: *const VkSemaphore, + pub pWaitDstStageMask: *const VkPipelineStageFlagBits, + pub commandBufferCount: u32, + pub pCommandBuffers: *const VkCommandBuffer, + pub signalSemaphoreCount: u32, + pub pSignalSemaphores: *const VkSemaphore, +} + +impl VkSubmitInfo { + pub fn new( + wait_semaphores: &[VkSemaphore], + wait_stages: &[VkPipelineStageFlagBits], + command_buffers: &[VkCommandBuffer], + signal_semaphores: &[VkSemaphore], + ) -> Self { + VkSubmitInfo { + sType: VK_STRUCTURE_TYPE_SUBMIT_INFO, + pNext: ptr::null(), + waitSemaphoreCount: wait_semaphores.len() as u32, + pWaitSemaphores: wait_semaphores.as_ptr(), + pWaitDstStageMask: wait_stages.as_ptr(), + commandBufferCount: command_buffers.len() as u32, + pCommandBuffers: command_buffers.as_ptr(), + signalSemaphoreCount: signal_semaphores.len() as u32, + pSignalSemaphores: signal_semaphores.as_ptr(), + } + } +} diff --git a/vulkan-sys/src/structs/core/subpassdependency.rs b/vulkan-sys/src/structs/core/subpassdependency.rs new file mode 100644 index 0000000..2b055bd --- /dev/null +++ b/vulkan-sys/src/structs/core/subpassdependency.rs @@ -0,0 +1,42 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkSubpassDependency { + pub srcSubpass: u32, + pub dstSubpass: u32, + pub srcStageMask: VkPipelineStageFlagBits, + pub dstStageMask: VkPipelineStageFlagBits, + pub srcAccessMask: VkAccessFlagBits, + pub dstAccessMask: VkAccessFlagBits, + pub dependencyFlagBits: VkDependencyFlagBits, +} + +impl VkSubpassDependency { + pub fn new( + src_subpass: u32, + dst_subpass: u32, + src_stage_mask: S, + dst_stage_mask: T, + src_access_mask: U, + dst_access_mask: V, + dependency_flags: W, + ) -> Self + where + S: Into, + T: Into, + U: Into, + V: Into, + W: Into, + { + VkSubpassDependency { + srcSubpass: src_subpass, + dstSubpass: dst_subpass, + srcStageMask: src_stage_mask.into(), + dstStageMask: dst_stage_mask.into(), + srcAccessMask: src_access_mask.into(), + dstAccessMask: dst_access_mask.into(), + dependencyFlagBits: dependency_flags.into(), + } + } +} diff --git a/vulkan-sys/src/structs/core/subpassdescription.rs b/vulkan-sys/src/structs/core/subpassdescription.rs new file mode 100644 index 0000000..8b51702 --- /dev/null +++ b/vulkan-sys/src/structs/core/subpassdescription.rs @@ -0,0 +1,57 @@ +use crate::prelude::*; + +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkSubpassDescription { + pub flags: VkSubpassDescriptionFlagBits, + pub pipelineBindPoint: VkPipelineBindPoint, + pub inputAttachmentCount: u32, + pub pInputAttachments: *const VkAttachmentReference, + pub colorAttachmentCount: u32, + pub pColorAttachments: *const VkAttachmentReference, + pub pResolveAttachments: *const VkAttachmentReference, + pub pDepthStencilAttachment: *const VkAttachmentReference, + pub preserveAttachmentCount: u32, + pub pPreserveAttachments: *const u32, +} + +impl VkSubpassDescription { + pub fn new( + flags: T, + input_attachments: &[VkAttachmentReference], + color_attachments: &[VkAttachmentReference], + resolve_attachments: &[VkAttachmentReference], + depth_stencil_attachment: Option<&VkAttachmentReference>, + preserve_attachments: &[u32], + ) -> Self + where + T: Into, + { + VkSubpassDescription { + flags: flags.into(), + // the only bit currently supported + pipelineBindPoint: VK_PIPELINE_BIND_POINT_GRAPHICS, + inputAttachmentCount: input_attachments.len() as u32, + pInputAttachments: input_attachments.as_ptr(), + colorAttachmentCount: color_attachments.len() as u32, + pColorAttachments: color_attachments.as_ptr(), + pResolveAttachments: if resolve_attachments.is_empty() { + ptr::null() + } else { + debug_assert!( + resolve_attachments.len() == color_attachments.len(), + "there must be as much resolve attachements as color attachments, if used" + ); + resolve_attachments.as_ptr() + }, + pDepthStencilAttachment: match depth_stencil_attachment { + Some(attachment) => attachment as *const _, + None => ptr::null(), + }, + preserveAttachmentCount: preserve_attachments.len() as u32, + pPreserveAttachments: preserve_attachments.as_ptr(), + } + } +} diff --git a/vulkan-sys/src/structs/core/subresourcelayout.rs b/vulkan-sys/src/structs/core/subresourcelayout.rs new file mode 100644 index 0000000..62d2ed4 --- /dev/null +++ b/vulkan-sys/src/structs/core/subresourcelayout.rs @@ -0,0 +1,11 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug, Clone)] +pub struct VkSubresourceLayout { + pub offset: VkDeviceSize, + pub size: VkDeviceSize, + pub rowPitch: VkDeviceSize, + pub arrayPitch: VkDeviceSize, + pub depthPitch: VkDeviceSize, +} diff --git a/vulkan-sys/src/structs/core/vertexinputattributedescription.rs b/vulkan-sys/src/structs/core/vertexinputattributedescription.rs new file mode 100644 index 0000000..78a9bd8 --- /dev/null +++ b/vulkan-sys/src/structs/core/vertexinputattributedescription.rs @@ -0,0 +1,10 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkVertexInputAttributeDescription { + pub location: u32, + pub binding: u32, + pub format: VkFormat, + pub offset: u32, +} diff --git a/vulkan-sys/src/structs/core/vertexinputbindingdescription.rs b/vulkan-sys/src/structs/core/vertexinputbindingdescription.rs new file mode 100644 index 0000000..a8adb17 --- /dev/null +++ b/vulkan-sys/src/structs/core/vertexinputbindingdescription.rs @@ -0,0 +1,9 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkVertexInputBindingDescription { + pub binding: u32, + pub stride: u32, + pub inputRate: VkVertexInputRate, +} diff --git a/vulkan-sys/src/structs/core/viewport.rs b/vulkan-sys/src/structs/core/viewport.rs new file mode 100644 index 0000000..6c488dd --- /dev/null +++ b/vulkan-sys/src/structs/core/viewport.rs @@ -0,0 +1,10 @@ +#[repr(C)] +#[derive(Debug, Copy, Clone, Default)] +pub struct VkViewport { + pub x: f32, + pub y: f32, + pub width: f32, + pub height: f32, + pub minDepth: f32, + pub maxDepth: f32, +} diff --git a/vulkan-sys/src/structs/core/writedescriptorset.rs b/vulkan-sys/src/structs/core/writedescriptorset.rs new file mode 100644 index 0000000..c0a6e1b --- /dev/null +++ b/vulkan-sys/src/structs/core/writedescriptorset.rs @@ -0,0 +1,64 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug, Clone)] +pub struct VkWriteDescriptorSet { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub dstSet: VkDescriptorSet, + pub dstBinding: u32, + pub dstArrayElement: u32, + pub descriptorCount: u32, + pub descriptorType: VkDescriptorType, + pub pImageInfo: *const VkDescriptorImageInfo, + pub pBufferInfo: *const VkDescriptorBufferInfo, + pub pTexelBufferView: *const VkBufferView, +} + +impl VkWriteDescriptorSet { + pub fn new( + set: VkDescriptorSet, + binding: u32, + array_element: u32, + descriptor_type: VkDescriptorType, + ) -> Self { + VkWriteDescriptorSet { + sType: VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, + pNext: ptr::null(), + dstSet: set, + dstBinding: binding, + dstArrayElement: array_element, + descriptorCount: 0, + descriptorType: descriptor_type, + pImageInfo: ptr::null(), + pBufferInfo: ptr::null(), + pTexelBufferView: ptr::null(), + } + } + + pub fn set_image_infos<'a, 'b: 'a>(&'a mut self, image_infos: &'b [VkDescriptorImageInfo]) { + self.descriptorCount = image_infos.len() as u32; + self.pImageInfo = image_infos.as_ptr() as *const _; + } + + pub fn set_buffer_infos<'a, 'b: 'a>(&'a mut self, buffer_infos: &'b [VkDescriptorBufferInfo]) { + self.descriptorCount = buffer_infos.len() as u32; + self.pBufferInfo = buffer_infos.as_ptr() as *const _; + } + + pub fn set_texel_buffer_views<'a, 'b: 'a>( + &'a mut self, + texel_buffer_views: &'b [VkBufferView], + ) { + self.descriptorCount = texel_buffer_views.len() as u32; + self.pTexelBufferView = texel_buffer_views.as_ptr() as *const _; + } +} + +impl_pnext_in!( + VkWriteDescriptorSet, + VkWriteDescriptorSetAccelerationStructureKHR +); diff --git a/vulkan-sys/src/structs/ext/descriptorsetlayoutbindingflagscreateinfoext.rs b/vulkan-sys/src/structs/ext/descriptorsetlayoutbindingflagscreateinfoext.rs new file mode 100644 index 0000000..c7feafc --- /dev/null +++ b/vulkan-sys/src/structs/ext/descriptorsetlayoutbindingflagscreateinfoext.rs @@ -0,0 +1,24 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkDescriptorSetLayoutBindingFlagsCreateInfoEXT { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub bindingCount: u32, + pub pBindingFlags: *const VkDescriptorBindingFlagBitsEXT, +} + +impl VkDescriptorSetLayoutBindingFlagsCreateInfoEXT { + pub fn new(binding_flags: &[VkDescriptorBindingFlagBitsEXT]) -> Self { + VkDescriptorSetLayoutBindingFlagsCreateInfoEXT { + sType: VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO, + pNext: ptr::null(), + bindingCount: binding_flags.len() as u32, + pBindingFlags: binding_flags.as_ptr(), + } + } +} diff --git a/vulkan-sys/src/structs/ext/descriptorsetvariabledescriptorcountallocationinfoext.rs b/vulkan-sys/src/structs/ext/descriptorsetvariabledescriptorcountallocationinfoext.rs new file mode 100644 index 0000000..3e87f18 --- /dev/null +++ b/vulkan-sys/src/structs/ext/descriptorsetvariabledescriptorcountallocationinfoext.rs @@ -0,0 +1,29 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkDescriptorSetVariableDescriptorCountAllocateInfoEXT { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub descriptorSetCount: u32, + pub pDescriptorCounts: *const u32, +} + +impl VkDescriptorSetVariableDescriptorCountAllocateInfoEXT { + pub fn new(descriptor_counts: &[u32]) -> Self { + VkDescriptorSetVariableDescriptorCountAllocateInfoEXT { + sType: VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO, + pNext: ptr::null(), + descriptorSetCount: descriptor_counts.len() as u32, + pDescriptorCounts: descriptor_counts.as_ptr(), + } + } + + pub fn set_descriptor_counts(&mut self, descriptor_counts: &[u32]) { + self.descriptorSetCount = descriptor_counts.len() as u32; + self.pDescriptorCounts = descriptor_counts.as_ptr(); + } +} diff --git a/vulkan-sys/src/structs/ext/descriptorsetvariabledescriptorcountlayoutsupportext.rs b/vulkan-sys/src/structs/ext/descriptorsetvariabledescriptorcountlayoutsupportext.rs new file mode 100644 index 0000000..80da720 --- /dev/null +++ b/vulkan-sys/src/structs/ext/descriptorsetvariabledescriptorcountlayoutsupportext.rs @@ -0,0 +1,28 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkDescriptorSetVariableDescriptorCountLayoutSupportEXT { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub maxVariableDescriptorCount: u32, +} + +impl VkDescriptorSetVariableDescriptorCountLayoutSupportEXT { + pub fn new(max_variable_descriptor_count: u32) -> Self { + VkDescriptorSetVariableDescriptorCountLayoutSupportEXT { + sType: VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_LAYOUT_SUPPORT, + pNext: ptr::null(), + maxVariableDescriptorCount: max_variable_descriptor_count, + } + } +} + +impl Default for VkDescriptorSetVariableDescriptorCountLayoutSupportEXT { + fn default() -> Self { + VkDescriptorSetVariableDescriptorCountLayoutSupportEXT::new(0) + } +} diff --git a/vulkan-sys/src/structs/ext/mod.rs b/vulkan-sys/src/structs/ext/mod.rs new file mode 100644 index 0000000..8b526d0 --- /dev/null +++ b/vulkan-sys/src/structs/ext/mod.rs @@ -0,0 +1,8 @@ +pub mod descriptorsetlayoutbindingflagscreateinfoext; +pub mod descriptorsetvariabledescriptorcountallocationinfoext; +pub mod descriptorsetvariabledescriptorcountlayoutsupportext; +pub mod physicaldevicedescriptorindexingfeaturesext; +pub mod physicaldevicedescriptorindexingpropertiesext; +pub mod physicaldevicememorybudgetpropertiesext; + +pub mod prelude; diff --git a/vulkan-sys/src/structs/ext/physicaldevicedescriptorindexingfeaturesext.rs b/vulkan-sys/src/structs/ext/physicaldevicedescriptorindexingfeaturesext.rs new file mode 100644 index 0000000..018ddde --- /dev/null +++ b/vulkan-sys/src/structs/ext/physicaldevicedescriptorindexingfeaturesext.rs @@ -0,0 +1,65 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +pub type VkPhysicalDeviceDescriptorIndexingFeatures = VkPhysicalDeviceDescriptorIndexingFeaturesEXT; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPhysicalDeviceDescriptorIndexingFeaturesEXT { + pub sType: VkStructureType, + pub pNext: *mut c_void, + pub shaderInputAttachmentArrayDynamicIndexing: VkBool32, + pub shaderUniformTexelBufferArrayDynamicIndexing: VkBool32, + pub shaderStorageTexelBufferArrayDynamicIndexing: VkBool32, + pub shaderUniformBufferArrayNonUniformIndexing: VkBool32, + pub shaderSampledImageArrayNonUniformIndexing: VkBool32, + pub shaderStorageBufferArrayNonUniformIndexing: VkBool32, + pub shaderStorageImageArrayNonUniformIndexing: VkBool32, + pub shaderInputAttachmentArrayNonUniformIndexing: VkBool32, + pub shaderUniformTexelBufferArrayNonUniformIndexing: VkBool32, + pub shaderStorageTexelBufferArrayNonUniformIndexing: VkBool32, + pub descriptorBindingUniformBufferUpdateAfterBind: VkBool32, + pub descriptorBindingSampledImageUpdateAfterBind: VkBool32, + pub descriptorBindingStorageImageUpdateAfterBind: VkBool32, + pub descriptorBindingStorageBufferUpdateAfterBind: VkBool32, + pub descriptorBindingUniformTexelBufferUpdateAfterBind: VkBool32, + pub descriptorBindingStorageTexelBufferUpdateAfterBind: VkBool32, + pub descriptorBindingUpdateUnusedWhilePending: VkBool32, + pub descriptorBindingPartiallyBound: VkBool32, + pub descriptorBindingVariableDescriptorCount: VkBool32, + pub runtimeDescriptorArray: VkBool32, +} + +impl Default for VkPhysicalDeviceDescriptorIndexingFeaturesEXT { + fn default() -> Self { + VkPhysicalDeviceDescriptorIndexingFeaturesEXT { + sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES, + pNext: ptr::null_mut(), + shaderInputAttachmentArrayDynamicIndexing: VK_FALSE, + shaderUniformTexelBufferArrayDynamicIndexing: VK_FALSE, + shaderStorageTexelBufferArrayDynamicIndexing: VK_FALSE, + shaderUniformBufferArrayNonUniformIndexing: VK_FALSE, + shaderSampledImageArrayNonUniformIndexing: VK_FALSE, + shaderStorageBufferArrayNonUniformIndexing: VK_FALSE, + shaderStorageImageArrayNonUniformIndexing: VK_FALSE, + shaderInputAttachmentArrayNonUniformIndexing: VK_FALSE, + shaderUniformTexelBufferArrayNonUniformIndexing: VK_FALSE, + shaderStorageTexelBufferArrayNonUniformIndexing: VK_FALSE, + descriptorBindingUniformBufferUpdateAfterBind: VK_FALSE, + descriptorBindingSampledImageUpdateAfterBind: VK_FALSE, + descriptorBindingStorageImageUpdateAfterBind: VK_FALSE, + descriptorBindingStorageBufferUpdateAfterBind: VK_FALSE, + descriptorBindingUniformTexelBufferUpdateAfterBind: VK_FALSE, + descriptorBindingStorageTexelBufferUpdateAfterBind: VK_FALSE, + descriptorBindingUpdateUnusedWhilePending: VK_FALSE, + descriptorBindingPartiallyBound: VK_FALSE, + descriptorBindingVariableDescriptorCount: VK_FALSE, + runtimeDescriptorArray: VK_FALSE, + } + } +} + +unsafe impl Sync for VkPhysicalDeviceDescriptorIndexingFeaturesEXT {} +unsafe impl Send for VkPhysicalDeviceDescriptorIndexingFeaturesEXT {} diff --git a/vulkan-sys/src/structs/ext/physicaldevicedescriptorindexingpropertiesext.rs b/vulkan-sys/src/structs/ext/physicaldevicedescriptorindexingpropertiesext.rs new file mode 100644 index 0000000..759ce6c --- /dev/null +++ b/vulkan-sys/src/structs/ext/physicaldevicedescriptorindexingpropertiesext.rs @@ -0,0 +1,69 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPhysicalDeviceDescriptorIndexingPropertiesEXT { + pub sType: VkStructureType, + pub pNext: *mut c_void, + pub maxUpdateAfterBindDescriptorsInAllPools: u32, + pub shaderUniformBufferArrayNonUniformIndexingNative: VkBool32, + pub shaderSampledImageArrayNonUniformIndexingNative: VkBool32, + pub shaderStorageBufferArrayNonUniformIndexingNative: VkBool32, + pub shaderStorageImageArrayNonUniformIndexingNative: VkBool32, + pub shaderInputAttachmentArrayNonUniformIndexingNative: VkBool32, + pub robustBufferAccessUpdateAfterBind: VkBool32, + pub quadDivergentImplicitLod: VkBool32, + pub maxPerStageDescriptorUpdateAfterBindSamplers: u32, + pub maxPerStageDescriptorUpdateAfterBindUniformBuffers: u32, + pub maxPerStageDescriptorUpdateAfterBindStorageBuffers: u32, + pub maxPerStageDescriptorUpdateAfterBindSampledImages: u32, + pub maxPerStageDescriptorUpdateAfterBindStorageImages: u32, + pub maxPerStageDescriptorUpdateAfterBindInputAttachments: u32, + pub maxPerStageUpdateAfterBindResources: u32, + pub maxDescriptorSetUpdateAfterBindSamplers: u32, + pub maxDescriptorSetUpdateAfterBindUniformBuffers: u32, + pub maxDescriptorSetUpdateAfterBindUniformBuffersDynamic: u32, + pub maxDescriptorSetUpdateAfterBindStorageBuffers: u32, + pub maxDescriptorSetUpdateAfterBindStorageBuffersDynamic: u32, + pub maxDescriptorSetUpdateAfterBindSampledImages: u32, + pub maxDescriptorSetUpdateAfterBindStorageImages: u32, + pub maxDescriptorSetUpdateAfterBindInputAttachments: u32, +} + +impl Default for VkPhysicalDeviceDescriptorIndexingPropertiesEXT { + fn default() -> Self { + VkPhysicalDeviceDescriptorIndexingPropertiesEXT { + sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES, + pNext: ptr::null_mut(), + maxUpdateAfterBindDescriptorsInAllPools: 0, + shaderUniformBufferArrayNonUniformIndexingNative: VK_FALSE, + shaderSampledImageArrayNonUniformIndexingNative: VK_FALSE, + shaderStorageBufferArrayNonUniformIndexingNative: VK_FALSE, + shaderStorageImageArrayNonUniformIndexingNative: VK_FALSE, + shaderInputAttachmentArrayNonUniformIndexingNative: VK_FALSE, + robustBufferAccessUpdateAfterBind: VK_FALSE, + quadDivergentImplicitLod: VK_FALSE, + maxPerStageDescriptorUpdateAfterBindSamplers: 0, + maxPerStageDescriptorUpdateAfterBindUniformBuffers: 0, + maxPerStageDescriptorUpdateAfterBindStorageBuffers: 0, + maxPerStageDescriptorUpdateAfterBindSampledImages: 0, + maxPerStageDescriptorUpdateAfterBindStorageImages: 0, + maxPerStageDescriptorUpdateAfterBindInputAttachments: 0, + maxPerStageUpdateAfterBindResources: 0, + maxDescriptorSetUpdateAfterBindSamplers: 0, + maxDescriptorSetUpdateAfterBindUniformBuffers: 0, + maxDescriptorSetUpdateAfterBindUniformBuffersDynamic: 0, + maxDescriptorSetUpdateAfterBindStorageBuffers: 0, + maxDescriptorSetUpdateAfterBindStorageBuffersDynamic: 0, + maxDescriptorSetUpdateAfterBindSampledImages: 0, + maxDescriptorSetUpdateAfterBindStorageImages: 0, + maxDescriptorSetUpdateAfterBindInputAttachments: 0, + } + } +} + +unsafe impl Sync for VkPhysicalDeviceDescriptorIndexingPropertiesEXT {} +unsafe impl Send for VkPhysicalDeviceDescriptorIndexingPropertiesEXT {} diff --git a/vulkan-sys/src/structs/ext/physicaldevicememorybudgetpropertiesext.rs b/vulkan-sys/src/structs/ext/physicaldevicememorybudgetpropertiesext.rs new file mode 100644 index 0000000..b48b877 --- /dev/null +++ b/vulkan-sys/src/structs/ext/physicaldevicememorybudgetpropertiesext.rs @@ -0,0 +1,44 @@ +use crate::prelude::*; + +use super::super::raw_to_slice; + +use std::os::raw::c_void; +use std::ptr; + +pub struct VkPhysicalDeviceMemoryBudgetPropertiesEXT { + pub sType: VkStructureType, + pub pNext: *mut c_void, + pub heapBudget: [VkDeviceSize; VK_MAX_MEMORY_HEAPS as usize], + pub heapUsage: [VkDeviceSize; VK_MAX_MEMORY_HEAPS as usize], +} + +impl VkPhysicalDeviceMemoryBudgetPropertiesEXT { + pub fn new( + heap_budget: [VkDeviceSize; VK_MAX_MEMORY_HEAPS as usize], + heap_usage: [VkDeviceSize; VK_MAX_MEMORY_HEAPS as usize], + ) -> Self { + VkPhysicalDeviceMemoryBudgetPropertiesEXT { + sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT, + pNext: ptr::null_mut(), + heapBudget: heap_budget, + heapUsage: heap_usage, + } + } + + pub fn heap_budgets(&self, count: u32) -> &[VkDeviceSize] { + raw_to_slice(self.heapBudget.as_ptr(), count) + } + + pub fn heap_usages(&self, count: u32) -> &[VkDeviceSize] { + raw_to_slice(self.heapUsage.as_ptr(), count) + } +} + +impl Default for VkPhysicalDeviceMemoryBudgetPropertiesEXT { + fn default() -> Self { + Self::new( + [0; VK_MAX_MEMORY_HEAPS as usize], + [0; VK_MAX_MEMORY_HEAPS as usize], + ) + } +} diff --git a/vulkan-sys/src/structs/ext/prelude.rs b/vulkan-sys/src/structs/ext/prelude.rs new file mode 100644 index 0000000..c66597a --- /dev/null +++ b/vulkan-sys/src/structs/ext/prelude.rs @@ -0,0 +1,6 @@ +pub use super::descriptorsetlayoutbindingflagscreateinfoext::*; +pub use super::descriptorsetvariabledescriptorcountallocationinfoext::*; +pub use super::descriptorsetvariabledescriptorcountlayoutsupportext::*; +pub use super::physicaldevicedescriptorindexingfeaturesext::*; +pub use super::physicaldevicedescriptorindexingpropertiesext::*; +pub use super::physicaldevicememorybudgetpropertiesext::*; diff --git a/vulkan-sys/src/structs/khr/androidsurfacecreateinfokhr.rs b/vulkan-sys/src/structs/khr/androidsurfacecreateinfokhr.rs new file mode 100644 index 0000000..f673323 --- /dev/null +++ b/vulkan-sys/src/structs/khr/androidsurfacecreateinfokhr.rs @@ -0,0 +1,30 @@ +use crate::prelude::*; + +use std::marker::PhantomData; +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkAndroidSurfaceCreateInfoKHR<'a> { + lt: PhantomData<&'a ()>, + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkAndroidSurfaceCreateFlagBitsKHR, + pub window: *mut c_void, +} + +impl<'a> VkAndroidSurfaceCreateInfoKHR<'a> { + pub fn new(flags: T, window: &mut U) -> Self + where + T: Into, + { + VkAndroidSurfaceCreateInfoKHR { + lt: PhantomData, + sType: VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR, + pNext: ptr::null(), + flags: flags.into(), + window: window as *mut U as *mut c_void, + } + } +} diff --git a/vulkan-sys/src/structs/khr/descriptorupdatetemplateentrykhr.rs b/vulkan-sys/src/structs/khr/descriptorupdatetemplateentrykhr.rs new file mode 100644 index 0000000..8f340f5 --- /dev/null +++ b/vulkan-sys/src/structs/khr/descriptorupdatetemplateentrykhr.rs @@ -0,0 +1,12 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkDescriptorUpdateTemplateEntryKHR { + pub dstBinding: u32, + pub dstArrayElement: u32, + pub descriptorCount: u32, + pub descriptorType: VkDescriptorType, + pub offset: usize, + pub stride: usize, +} diff --git a/vulkan-sys/src/structs/khr/displaymodecreateinfokhr.rs b/vulkan-sys/src/structs/khr/displaymodecreateinfokhr.rs new file mode 100644 index 0000000..b216c81 --- /dev/null +++ b/vulkan-sys/src/structs/khr/displaymodecreateinfokhr.rs @@ -0,0 +1,12 @@ +use crate::prelude::*; + +use std::os::raw::c_void; + +#[repr(C)] +#[derive(Debug)] +pub struct VkDisplayModeCreateInfoKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkDisplayModeCreateFlagBitsKHR, + pub parameters: VkDisplayModeParametersKHR, +} diff --git a/vulkan-sys/src/structs/khr/displaymodeparameterkhr.rs b/vulkan-sys/src/structs/khr/displaymodeparameterkhr.rs new file mode 100644 index 0000000..5c0b508 --- /dev/null +++ b/vulkan-sys/src/structs/khr/displaymodeparameterkhr.rs @@ -0,0 +1,8 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkDisplayModeParametersKHR { + pub visibleRegion: VkExtent2D, + pub refreshRate: u32, +} diff --git a/vulkan-sys/src/structs/khr/displaymodepropertieskhr.rs b/vulkan-sys/src/structs/khr/displaymodepropertieskhr.rs new file mode 100644 index 0000000..c710343 --- /dev/null +++ b/vulkan-sys/src/structs/khr/displaymodepropertieskhr.rs @@ -0,0 +1,8 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkDisplayModePropertiesKHR { + pub displayMode: VkDisplayModeKHR, + pub parameters: VkDisplayModeParametersKHR, +} diff --git a/vulkan-sys/src/structs/khr/displaypresentinfokhr.rs b/vulkan-sys/src/structs/khr/displaypresentinfokhr.rs new file mode 100644 index 0000000..b6f18e2 --- /dev/null +++ b/vulkan-sys/src/structs/khr/displaypresentinfokhr.rs @@ -0,0 +1,13 @@ +use crate::prelude::*; + +use std::os::raw::c_void; + +#[repr(C)] +#[derive(Debug)] +pub struct VkDisplayPresentInfoKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub srcRect: VkRect2D, + pub dstRect: VkRect2D, + pub persistent: VkBool32, +} diff --git a/vulkan-sys/src/structs/khr/displaysurfacecreateinfokhr.rs b/vulkan-sys/src/structs/khr/displaysurfacecreateinfokhr.rs new file mode 100644 index 0000000..8301f6f --- /dev/null +++ b/vulkan-sys/src/structs/khr/displaysurfacecreateinfokhr.rs @@ -0,0 +1,18 @@ +use crate::prelude::*; + +use std::os::raw::c_void; + +#[repr(C)] +#[derive(Debug)] +pub struct VkDisplaySurfaceCreateInfoKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkDisplaySurfaceCreateFlagBitsKHR, + pub displayMode: VkDisplayModeKHR, + pub planeIndex: u32, + pub planeStackIndex: u32, + pub transform: VkSurfaceTransformFlagBitsKHR, + pub globalAlpha: f32, + pub alphaMode: VkDisplayPlaneAlphaFlagBitsKHR, + pub imageExtent: VkExtent2D, +} diff --git a/vulkan-sys/src/structs/khr/formatproperties2khr.rs b/vulkan-sys/src/structs/khr/formatproperties2khr.rs new file mode 100644 index 0000000..944689a --- /dev/null +++ b/vulkan-sys/src/structs/khr/formatproperties2khr.rs @@ -0,0 +1,26 @@ +use crate::prelude::*; + +use std::marker::PhantomData; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkFormatProperties2KHR<'a> { + lt: PhantomData<&'a ()>, + pub sType: VkStructureType, + pub pNext: *const c_void, + pub formatProperties: VkFormatProperties, +} + +impl<'a> VkFormatProperties2KHR<'a> { + pub fn new(format_properties: VkFormatProperties) -> Self { + VkFormatProperties2KHR { + lt: PhantomData, + sType: VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, + pNext: ptr::null(), + formatProperties: format_properties, + } + } +} diff --git a/vulkan-sys/src/structs/khr/imageformatproperties2khr.rs b/vulkan-sys/src/structs/khr/imageformatproperties2khr.rs new file mode 100644 index 0000000..40228c1 --- /dev/null +++ b/vulkan-sys/src/structs/khr/imageformatproperties2khr.rs @@ -0,0 +1,26 @@ +use crate::prelude::*; + +use std::marker::PhantomData; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkImageFormatProperties2KHR<'a> { + lt: PhantomData<&'a ()>, + pub sType: VkStructureType, + pub pNext: *const c_void, + pub imageFormatProperties: VkImageFormatProperties, +} + +impl<'a> VkImageFormatProperties2KHR<'a> { + pub fn new(image_format_properties: VkImageFormatProperties) -> Self { + VkImageFormatProperties2KHR { + lt: PhantomData, + sType: VK_STRUCTURE_TYPE_IMAGE_FORMAT_PROPERTIES_2, + pNext: ptr::null(), + imageFormatProperties: image_format_properties, + } + } +} diff --git a/vulkan-sys/src/structs/khr/memoryrequirements2khr.rs b/vulkan-sys/src/structs/khr/memoryrequirements2khr.rs new file mode 100644 index 0000000..bd2ee21 --- /dev/null +++ b/vulkan-sys/src/structs/khr/memoryrequirements2khr.rs @@ -0,0 +1,13 @@ +use crate::prelude::*; + +use std::os::raw::c_void; + +#[repr(C)] +#[derive(Debug)] +pub struct VkMemoryRequirements2 { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub memoryRequirements: VkMemoryRequirements, +} + +pub type VkMemoryRequirements2KHR = VkMemoryRequirements2; diff --git a/vulkan-sys/src/structs/khr/mod.rs b/vulkan-sys/src/structs/khr/mod.rs new file mode 100644 index 0000000..cb68cf7 --- /dev/null +++ b/vulkan-sys/src/structs/khr/mod.rs @@ -0,0 +1,32 @@ +pub mod androidsurfacecreateinfokhr; +pub mod descriptorupdatetemplateentrykhr; +pub mod displaymodecreateinfokhr; +pub mod displaymodeparameterkhr; +pub mod displaymodepropertieskhr; +pub mod displaypresentinfokhr; +pub mod displaysurfacecreateinfokhr; +pub mod formatproperties2khr; +pub mod imageformatproperties2khr; +pub mod memoryrequirements2khr; +pub mod physical_device_buffer_device_address_features; +pub mod physicaldevicefeatures2khr; +pub mod physicaldeviceimageformatinfo2khr; +pub mod physicaldevicememoryproperties2khr; +pub mod physicaldeviceproperties2khr; +pub mod physicaldevicepushdescriptorpropertieskhr; +pub mod physicaldevicesparseimageformatinfo2khr; +pub mod pipeline_library_create_info; +pub mod presentinfokhr; +pub mod queuefamilyproperties2khr; +pub mod sparseimageformatproperties2khr; +pub mod surfacecapabilitieskhr; +pub mod surfaceformatkhr; +pub mod swapchaincreateinfokhr; +pub mod waylandsurfacecreateinfokhr; +pub mod win32surfacecreateinfokhr; +pub mod xcbsurfacecreateinfokhr; +pub mod xlibsurfacecreateinfokhr; + +pub mod ray_tracing; + +pub mod prelude; diff --git a/vulkan-sys/src/structs/khr/physical_device_buffer_device_address_features.rs b/vulkan-sys/src/structs/khr/physical_device_buffer_device_address_features.rs new file mode 100644 index 0000000..2704099 --- /dev/null +++ b/vulkan-sys/src/structs/khr/physical_device_buffer_device_address_features.rs @@ -0,0 +1,29 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPhysicalDeviceBufferDeviceAddressFeaturesEXT { + pub sType: VkStructureType, + pub pNext: *mut c_void, + pub bufferDeviceAddress: VkBool32, + pub bufferDeviceAddressCaptureReplay: VkBool32, + pub bufferDeviceAddressMultiDevice: VkBool32, +} + +impl Default for VkPhysicalDeviceBufferDeviceAddressFeaturesEXT { + fn default() -> Self { + VkPhysicalDeviceBufferDeviceAddressFeaturesEXT { + sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES, + pNext: ptr::null_mut(), + bufferDeviceAddress: VK_FALSE, + bufferDeviceAddressCaptureReplay: VK_FALSE, + bufferDeviceAddressMultiDevice: VK_FALSE, + } + } +} + +unsafe impl Sync for VkPhysicalDeviceBufferDeviceAddressFeaturesEXT {} +unsafe impl Send for VkPhysicalDeviceBufferDeviceAddressFeaturesEXT {} diff --git a/vulkan-sys/src/structs/khr/physicaldevicefeatures2khr.rs b/vulkan-sys/src/structs/khr/physicaldevicefeatures2khr.rs new file mode 100644 index 0000000..a1d3f2c --- /dev/null +++ b/vulkan-sys/src/structs/khr/physicaldevicefeatures2khr.rs @@ -0,0 +1,52 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPhysicalDeviceFeatures2KHR { + pub sType: VkStructureType, + pub pNext: *mut c_void, + pub features: VkPhysicalDeviceFeatures, +} + +impl VkPhysicalDeviceFeatures2KHR { + pub fn new(features: VkPhysicalDeviceFeatures) -> Self { + VkPhysicalDeviceFeatures2KHR { + sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2, + pNext: ptr::null_mut(), + features, + } + } + + pub fn features(&self) -> VkPhysicalDeviceFeatures { + self.features + } +} + +impl Default for VkPhysicalDeviceFeatures2KHR { + fn default() -> Self { + VkPhysicalDeviceFeatures2KHR::new(VkPhysicalDeviceFeatures::default()) + } +} + +impl_pnext_out!( + VkPhysicalDeviceFeatures2KHR, + VkPhysicalDeviceDescriptorIndexingFeaturesEXT +); + +impl_pnext_out!( + VkPhysicalDeviceFeatures2KHR, + VkPhysicalDeviceRayTracingFeaturesKHR +); + +impl_pnext_out!( + VkPhysicalDeviceFeatures2KHR, + VkPhysicalDeviceBufferDeviceAddressFeaturesEXT +); + +impl_pnext_out!( + VkPhysicalDeviceFeatures2KHR, + VkPhysicalDeviceAccelerationStructureFeaturesKHR +); diff --git a/vulkan-sys/src/structs/khr/physicaldeviceimageformatinfo2khr.rs b/vulkan-sys/src/structs/khr/physicaldeviceimageformatinfo2khr.rs new file mode 100644 index 0000000..9e06937 --- /dev/null +++ b/vulkan-sys/src/structs/khr/physicaldeviceimageformatinfo2khr.rs @@ -0,0 +1,40 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPhysicalDeviceImageFormatInfo2KHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub format: VkFormat, + pub imageType: VkImageType, + pub tiling: VkImageTiling, + pub usage: VkImageUsageFlagBits, + pub flags: VkImageCreateFlagBits, +} + +impl VkPhysicalDeviceImageFormatInfo2KHR { + pub fn new( + format: VkFormat, + image_type: VkImageType, + tiling: VkImageTiling, + usage: T, + flags: U, + ) -> Self + where + T: Into, + U: Into, + { + VkPhysicalDeviceImageFormatInfo2KHR { + sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_IMAGE_FORMAT_INFO_2, + pNext: ptr::null(), + format, + imageType: image_type, + tiling, + usage: usage.into(), + flags: flags.into(), + } + } +} diff --git a/vulkan-sys/src/structs/khr/physicaldevicememoryproperties2khr.rs b/vulkan-sys/src/structs/khr/physicaldevicememoryproperties2khr.rs new file mode 100644 index 0000000..a03f65f --- /dev/null +++ b/vulkan-sys/src/structs/khr/physicaldevicememoryproperties2khr.rs @@ -0,0 +1,35 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +pub type VkPhysicalDeviceMemoryProperties2 = VkPhysicalDeviceMemoryProperties2KHR; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPhysicalDeviceMemoryProperties2KHR { + pub sType: VkStructureType, + pub pNext: *mut c_void, + pub memoryProperties: VkPhysicalDeviceMemoryProperties, +} + +impl VkPhysicalDeviceMemoryProperties2KHR { + pub fn new(memory_properties: VkPhysicalDeviceMemoryProperties) -> Self { + VkPhysicalDeviceMemoryProperties2KHR { + sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2, + pNext: ptr::null_mut(), + memoryProperties: memory_properties, + } + } +} + +impl Default for VkPhysicalDeviceMemoryProperties2KHR { + fn default() -> Self { + Self::new(VkPhysicalDeviceMemoryProperties::default()) + } +} + +impl_pnext_out!( + VkPhysicalDeviceMemoryProperties2KHR, + VkPhysicalDeviceMemoryBudgetPropertiesEXT +); diff --git a/vulkan-sys/src/structs/khr/physicaldeviceproperties2khr.rs b/vulkan-sys/src/structs/khr/physicaldeviceproperties2khr.rs new file mode 100644 index 0000000..6feb333 --- /dev/null +++ b/vulkan-sys/src/structs/khr/physicaldeviceproperties2khr.rs @@ -0,0 +1,45 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +pub struct VkPhysicalDeviceProperties2KHR { + pub sType: VkStructureType, + pub pNext: *mut c_void, + pub properties: VkPhysicalDeviceProperties, +} + +impl VkPhysicalDeviceProperties2KHR { + pub fn new(properties: VkPhysicalDeviceProperties) -> Self { + VkPhysicalDeviceProperties2KHR { + sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2, + pNext: ptr::null_mut(), + properties, + } + } +} + +impl Default for VkPhysicalDeviceProperties2KHR { + fn default() -> Self { + Self::new(VkPhysicalDeviceProperties::default()) + } +} + +unsafe impl Sync for VkPhysicalDeviceProperties2KHR {} +unsafe impl Send for VkPhysicalDeviceProperties2KHR {} + +impl_pnext_out!( + VkPhysicalDeviceProperties2KHR, + VkPhysicalDeviceRayTracingPropertiesKHR +); + +impl_pnext_out!( + VkPhysicalDeviceProperties2KHR, + VkPhysicalDeviceDescriptorIndexingPropertiesEXT +); + +impl_pnext_out!( + VkPhysicalDeviceProperties2KHR, + VkPhysicalDeviceAccelerationStructurePropertiesKHR +); diff --git a/vulkan-sys/src/structs/khr/physicaldevicepushdescriptorpropertieskhr.rs b/vulkan-sys/src/structs/khr/physicaldevicepushdescriptorpropertieskhr.rs new file mode 100644 index 0000000..fc985b5 --- /dev/null +++ b/vulkan-sys/src/structs/khr/physicaldevicepushdescriptorpropertieskhr.rs @@ -0,0 +1,22 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPhysicalDevicePushDescriptorPropertiesKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub maxPushDescriptors: u32, +} + +impl VkPhysicalDevicePushDescriptorPropertiesKHR { + pub fn new(max_push_descriptors: u32) -> Self { + VkPhysicalDevicePushDescriptorPropertiesKHR { + sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR, + pNext: ptr::null(), + maxPushDescriptors: max_push_descriptors, + } + } +} diff --git a/vulkan-sys/src/structs/khr/physicaldevicesparseimageformatinfo2khr.rs b/vulkan-sys/src/structs/khr/physicaldevicesparseimageformatinfo2khr.rs new file mode 100644 index 0000000..1c7dfce --- /dev/null +++ b/vulkan-sys/src/structs/khr/physicaldevicesparseimageformatinfo2khr.rs @@ -0,0 +1,40 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPhysicalDeviceSparseImageFormatInfo2KHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub format: VkFormat, + pub imageType: VkImageType, + pub samples: VkSampleCountFlagBits, + pub usage: VkImageUsageFlagBits, + pub tiling: VkImageTiling, +} + +impl VkPhysicalDeviceSparseImageFormatInfo2KHR { + pub fn new( + format: VkFormat, + image_type: VkImageType, + samples: T, + usage: U, + tiling: VkImageTiling, + ) -> Self + where + T: Into, + U: Into, + { + VkPhysicalDeviceSparseImageFormatInfo2KHR { + sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SPARSE_IMAGE_FORMAT_INFO_2, + pNext: ptr::null(), + format, + imageType: image_type, + samples: samples.into(), + usage: usage.into(), + tiling, + } + } +} diff --git a/vulkan-sys/src/structs/khr/pipeline_library_create_info.rs b/vulkan-sys/src/structs/khr/pipeline_library_create_info.rs new file mode 100644 index 0000000..bf146a4 --- /dev/null +++ b/vulkan-sys/src/structs/khr/pipeline_library_create_info.rs @@ -0,0 +1,28 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPipelineLibraryCreateInfoKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub libraryCount: u32, + pub pLibrary: *const VkPipeline, +} + +impl VkPipelineLibraryCreateInfoKHR { + pub fn new(libraries: &[VkPipeline]) -> Self { + VkPipelineLibraryCreateInfoKHR { + sType: VK_STRUCTURE_TYPE_PIPELINE_LIBRARY_CREATE_INFO_KHR, + pNext: ptr::null(), + libraryCount: libraries.len() as u32, + pLibrary: if libraries.is_empty() { + ptr::null() + } else { + libraries.as_ptr() + }, + } + } +} diff --git a/vulkan-sys/src/structs/khr/prelude.rs b/vulkan-sys/src/structs/khr/prelude.rs new file mode 100644 index 0000000..1407022 --- /dev/null +++ b/vulkan-sys/src/structs/khr/prelude.rs @@ -0,0 +1,30 @@ +pub use super::androidsurfacecreateinfokhr::*; +pub use super::descriptorupdatetemplateentrykhr::*; +pub use super::displaymodecreateinfokhr::*; +pub use super::displaymodeparameterkhr::*; +pub use super::displaymodepropertieskhr::*; +pub use super::displaypresentinfokhr::*; +pub use super::displaysurfacecreateinfokhr::*; +pub use super::formatproperties2khr::*; +pub use super::imageformatproperties2khr::*; +pub use super::memoryrequirements2khr::*; +pub use super::physical_device_buffer_device_address_features::*; +pub use super::physicaldevicefeatures2khr::*; +pub use super::physicaldeviceimageformatinfo2khr::*; +pub use super::physicaldevicememoryproperties2khr::*; +pub use super::physicaldeviceproperties2khr::*; +pub use super::physicaldevicepushdescriptorpropertieskhr::*; +pub use super::physicaldevicesparseimageformatinfo2khr::*; +pub use super::pipeline_library_create_info::*; +pub use super::presentinfokhr::*; +pub use super::queuefamilyproperties2khr::*; +pub use super::sparseimageformatproperties2khr::*; +pub use super::surfacecapabilitieskhr::*; +pub use super::surfaceformatkhr::*; +pub use super::swapchaincreateinfokhr::*; +pub use super::waylandsurfacecreateinfokhr::*; +pub use super::win32surfacecreateinfokhr::*; +pub use super::xcbsurfacecreateinfokhr::*; +pub use super::xlibsurfacecreateinfokhr::*; + +pub use super::ray_tracing::prelude::*; diff --git a/vulkan-sys/src/structs/khr/presentinfokhr.rs b/vulkan-sys/src/structs/khr/presentinfokhr.rs new file mode 100644 index 0000000..5fd1345 --- /dev/null +++ b/vulkan-sys/src/structs/khr/presentinfokhr.rs @@ -0,0 +1,44 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPresentInfoKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub waitSemaphoreCount: u32, + pub pWaitSemaphores: *const VkSemaphore, + pub swapchainCount: u32, + pub pSwapchains: *const VkSwapchainKHR, + pub pImageIndices: *const u32, + pub pResults: *mut VkResult, +} + +impl VkPresentInfoKHR { + pub fn new( + wait_semaphores: &[VkSemaphore], + swapchains: &[VkSwapchainKHR], + image_indices: &[u32], + results: &mut [VkResult], + ) -> Self { + debug_assert!(image_indices.len() == swapchains.len()); + + VkPresentInfoKHR { + sType: VK_STRUCTURE_TYPE_PRESENT_INFO_KHR, + pNext: ptr::null(), + waitSemaphoreCount: wait_semaphores.len() as u32, + pWaitSemaphores: wait_semaphores.as_ptr(), + swapchainCount: swapchains.len() as u32, + pSwapchains: swapchains.as_ptr(), + pImageIndices: image_indices.as_ptr(), + pResults: if results.is_empty() { + ptr::null_mut() + } else { + debug_assert!(results.len() == swapchains.len()); + results.as_mut_ptr() + }, + } + } +} diff --git a/vulkan-sys/src/structs/khr/queuefamilyproperties2khr.rs b/vulkan-sys/src/structs/khr/queuefamilyproperties2khr.rs new file mode 100644 index 0000000..48f5edc --- /dev/null +++ b/vulkan-sys/src/structs/khr/queuefamilyproperties2khr.rs @@ -0,0 +1,22 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkQueueFamilyProperties2KHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub queueFamilyProperties: VkQueueFamilyProperties, +} + +impl VkQueueFamilyProperties2KHR { + pub fn new(queue_family_properties: VkQueueFamilyProperties) -> Self { + VkQueueFamilyProperties2KHR { + sType: VK_STRUCTURE_TYPE_QUEUE_FAMILY_PROPERTIES_2, + pNext: ptr::null(), + queueFamilyProperties: queue_family_properties, + } + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/aabb_positions.rs b/vulkan-sys/src/structs/khr/ray_tracing/aabb_positions.rs new file mode 100644 index 0000000..439d1c5 --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/aabb_positions.rs @@ -0,0 +1,36 @@ +#[repr(C)] +#[derive(Debug, Clone, PartialEq)] +pub struct VkAabbPositionsKHR { + pub minX: f32, + pub minY: f32, + pub minZ: f32, + pub maxX: f32, + pub maxY: f32, + pub maxZ: f32, +} + +impl VkAabbPositionsKHR { + pub fn new(min: [f32; 3], max: [f32; 3]) -> Self { + VkAabbPositionsKHR { + minX: min[0], + minY: min[1], + minZ: min[2], + maxX: max[0], + maxY: max[1], + maxZ: max[2], + } + } +} + +impl Default for VkAabbPositionsKHR { + fn default() -> Self { + VkAabbPositionsKHR { + minX: 0.0, + minY: 0.0, + minZ: 0.0, + maxX: 0.0, + maxY: 0.0, + maxZ: 0.0, + } + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_build_geometry_info.rs b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_build_geometry_info.rs new file mode 100644 index 0000000..1822b6e --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_build_geometry_info.rs @@ -0,0 +1,85 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +pub struct VkAccelerationStructureBuildGeometryInfoKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub r#type: VkAccelerationStructureTypeKHR, + pub flags: VkBuildAccelerationStructureFlagBitsKHR, + pub mode: VkBuildAccelerationStructureModeKHR, + pub srcAccelerationStructure: VkAccelerationStructureKHR, + pub dstAccelerationStructure: VkAccelerationStructureKHR, + pub geometryCount: u32, + pub pGeometries: *const VkAccelerationStructureGeometryKHR, + pub ppGeometries: *const *const VkAccelerationStructureGeometryKHR, + pub scratchData: VkDeviceOrHostAddressKHR, +} + +impl VkAccelerationStructureBuildGeometryInfoKHR { + pub fn new( + r#type: VkAccelerationStructureTypeKHR, + flags: impl Into, + mode: VkBuildAccelerationStructureModeKHR, + src_acceleration_structure: VkAccelerationStructureKHR, + dst_acceleration_structure: VkAccelerationStructureKHR, + scratch_data: VkDeviceOrHostAddressKHR, + ) -> Self { + VkAccelerationStructureBuildGeometryInfoKHR { + sType: VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_GEOMETRY_INFO_KHR, + pNext: ptr::null(), + r#type, + flags: flags.into(), + mode, + srcAccelerationStructure: src_acceleration_structure, + dstAccelerationStructure: dst_acceleration_structure, + geometryCount: 0, + pGeometries: ptr::null(), + ppGeometries: ptr::null(), + scratchData: scratch_data, + } + } + + pub fn minimal( + r#type: VkAccelerationStructureTypeKHR, + flags: impl Into, + mode: VkBuildAccelerationStructureModeKHR, + ) -> Self { + Self::new( + r#type, + flags, + mode, + VkAccelerationStructureKHR::NULL_HANDLE, + VkAccelerationStructureKHR::NULL_HANDLE, + VkDeviceOrHostAddressKHR::null(), + ) + } +} + +pub trait SetGeometry { + fn set_geometry(&mut self, geometries: T); +} + +impl<'a> SetGeometry<&'a [&'a VkAccelerationStructureGeometryKHR]> + for VkAccelerationStructureBuildGeometryInfoKHR +{ + fn set_geometry(&mut self, geometries: &'a [&'a VkAccelerationStructureGeometryKHR]) { + debug_assert_eq!(self.pGeometries, ptr::null()); + + self.geometryCount = geometries.len() as u32; + self.ppGeometries = geometries.as_ptr() as *const *const _; + } +} + +impl<'a> SetGeometry<&'a [VkAccelerationStructureGeometryKHR]> + for VkAccelerationStructureBuildGeometryInfoKHR +{ + fn set_geometry(&mut self, geometries: &'a [VkAccelerationStructureGeometryKHR]) { + debug_assert_eq!(self.ppGeometries, ptr::null()); + + self.geometryCount = geometries.len() as u32; + self.pGeometries = geometries.as_ptr() as *const _; + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_build_range_info.rs b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_build_range_info.rs new file mode 100644 index 0000000..2d14cce --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_build_range_info.rs @@ -0,0 +1,24 @@ +#[repr(C)] +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct VkAccelerationStructureBuildRangeInfoKHR { + pub primitiveCount: u32, + pub primitiveOffset: u32, + pub firstVertex: u32, + pub transformOffset: u32, +} + +impl VkAccelerationStructureBuildRangeInfoKHR { + pub fn new( + primitive_count: u32, + primitive_offset: u32, + first_vertex: u32, + transform_offset: u32, + ) -> Self { + VkAccelerationStructureBuildRangeInfoKHR { + primitiveCount: primitive_count, + primitiveOffset: primitive_offset, + firstVertex: first_vertex, + transformOffset: transform_offset, + } + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_build_sizes_info.rs b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_build_sizes_info.rs new file mode 100644 index 0000000..a1f6b90 --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_build_sizes_info.rs @@ -0,0 +1,30 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug, Clone)] +pub struct VkAccelerationStructureBuildSizesInfoKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub accelerationStructureSize: VkDeviceSize, + pub updateScratchSize: VkDeviceSize, + pub buildScratchSize: VkDeviceSize, +} + +impl VkAccelerationStructureBuildSizesInfoKHR { + pub fn new( + acceleration_structure_size: VkDeviceSize, + update_scratch_size: VkDeviceSize, + build_scratch_size: VkDeviceSize, + ) -> Self { + VkAccelerationStructureBuildSizesInfoKHR { + sType: VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_BUILD_SIZES_INFO_KHR, + pNext: ptr::null(), + accelerationStructureSize: acceleration_structure_size, + updateScratchSize: update_scratch_size, + buildScratchSize: build_scratch_size, + } + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_create_info.rs b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_create_info.rs new file mode 100644 index 0000000..57ad240 --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_create_info.rs @@ -0,0 +1,38 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +pub struct VkAccelerationStructureCreateInfoKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub createFlags: VkAccelerationStructureCreateFlagBitsKHR, + pub buffer: VkBuffer, + pub offset: VkDeviceSize, + pub size: VkDeviceSize, + pub r#type: VkAccelerationStructureTypeKHR, + pub deviceAddress: VkDeviceAddress, +} + +impl VkAccelerationStructureCreateInfoKHR { + pub fn new( + create_flags: impl Into, + buffer: VkBuffer, + offset: VkDeviceSize, + size: VkDeviceSize, + r#type: VkAccelerationStructureTypeKHR, + device_address: VkDeviceAddress, + ) -> Self { + VkAccelerationStructureCreateInfoKHR { + sType: VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR, + pNext: ptr::null(), + createFlags: create_flags.into(), + buffer, + offset, + size, + r#type, + deviceAddress: device_address, + } + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_device_address_info.rs b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_device_address_info.rs new file mode 100644 index 0000000..a5f950b --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_device_address_info.rs @@ -0,0 +1,21 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +pub struct VkAccelerationStructureDeviceAddressInfoKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub accelerationStructure: VkAccelerationStructureKHR, +} + +impl VkAccelerationStructureDeviceAddressInfoKHR { + pub fn new(acceleration_structure: VkAccelerationStructureKHR) -> Self { + VkAccelerationStructureDeviceAddressInfoKHR { + sType: VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_DEVICE_ADDRESS_INFO_KHR, + pNext: ptr::null(), + accelerationStructure: acceleration_structure, + } + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_geometry.rs b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_geometry.rs new file mode 100644 index 0000000..5da31b4 --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_geometry.rs @@ -0,0 +1,32 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +pub struct VkAccelerationStructureGeometryKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub geometryType: VkGeometryTypeKHR, + pub geometry: VkAccelerationStructureGeometryDataKHR, + pub flags: VkGeometryFlagBitsKHR, +} + +impl VkAccelerationStructureGeometryKHR { + pub fn new( + geometry_type: VkGeometryTypeKHR, + geometry: VkAccelerationStructureGeometryDataKHR, + flags: impl Into, + ) -> Self { + VkAccelerationStructureGeometryKHR { + sType: VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_KHR, + pNext: ptr::null(), + geometryType: geometry_type, + geometry, + flags: flags.into(), + } + } +} + +unsafe impl Send for VkAccelerationStructureGeometryKHR {} +unsafe impl Sync for VkAccelerationStructureGeometryKHR {} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_geometry_aabbs_data.rs b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_geometry_aabbs_data.rs new file mode 100644 index 0000000..b2d5380 --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_geometry_aabbs_data.rs @@ -0,0 +1,24 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Copy, Clone)] +pub struct VkAccelerationStructureGeometryAabbsDataKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub data: VkDeviceOrHostAddressConstKHR, + pub stride: VkDeviceSize, +} + +impl VkAccelerationStructureGeometryAabbsDataKHR { + pub fn new(data: VkDeviceOrHostAddressConstKHR, stride: VkDeviceSize) -> Self { + VkAccelerationStructureGeometryAabbsDataKHR { + sType: VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_AABBS_DATA_KHR, + pNext: ptr::null(), + data, + stride, + } + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_geometry_data.rs b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_geometry_data.rs new file mode 100644 index 0000000..1d5d0d4 --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_geometry_data.rs @@ -0,0 +1,30 @@ +use crate::prelude::*; + +#[repr(C)] +pub union VkAccelerationStructureGeometryDataKHR { + pub triangles: VkAccelerationStructureGeometryTrianglesDataKHR, + pub aabbs: VkAccelerationStructureGeometryAabbsDataKHR, + pub instances: VkAccelerationStructureGeometryInstancesDataKHR, +} + +impl From + for VkAccelerationStructureGeometryDataKHR +{ + fn from(triangles: VkAccelerationStructureGeometryTrianglesDataKHR) -> Self { + VkAccelerationStructureGeometryDataKHR { triangles } + } +} + +impl From for VkAccelerationStructureGeometryDataKHR { + fn from(aabbs: VkAccelerationStructureGeometryAabbsDataKHR) -> Self { + VkAccelerationStructureGeometryDataKHR { aabbs } + } +} + +impl From + for VkAccelerationStructureGeometryDataKHR +{ + fn from(instances: VkAccelerationStructureGeometryInstancesDataKHR) -> Self { + VkAccelerationStructureGeometryDataKHR { instances } + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_geometry_instances_data.rs b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_geometry_instances_data.rs new file mode 100644 index 0000000..c477907 --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_geometry_instances_data.rs @@ -0,0 +1,47 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Copy, Clone)] +pub struct VkAccelerationStructureGeometryInstancesDataKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub arrayOfPointers: VkBool32, + pub data: VkDeviceOrHostAddressConstKHR, +} + +impl VkAccelerationStructureGeometryInstancesDataKHR { + #[inline] + fn new(array_of_pointers: impl Into, data: VkDeviceOrHostAddressConstKHR) -> Self { + VkAccelerationStructureGeometryInstancesDataKHR { + sType: VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_INSTANCES_DATA_KHR, + pNext: ptr::null(), + arrayOfPointers: array_of_pointers.into(), + data, + } + } +} + +impl<'a> From<&'a [VkAccelerationStructureInstanceKHR]> + for VkAccelerationStructureGeometryInstancesDataKHR +{ + fn from(instances: &'a [VkAccelerationStructureInstanceKHR]) -> Self { + Self::new(false, VkDeviceOrHostAddressConstKHR::from(instances)) + } +} + +impl<'a> From<&'a [&'a VkAccelerationStructureInstanceKHR]> + for VkAccelerationStructureGeometryInstancesDataKHR +{ + fn from(instances: &'a [&'a VkAccelerationStructureInstanceKHR]) -> Self { + Self::new(true, VkDeviceOrHostAddressConstKHR::from(instances)) + } +} + +impl From for VkAccelerationStructureGeometryInstancesDataKHR { + fn from(address: VkDeviceOrHostAddressConstKHR) -> Self { + Self::new(false, address) + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_geometry_triangles_data.rs b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_geometry_triangles_data.rs new file mode 100644 index 0000000..eba9c7e --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_geometry_triangles_data.rs @@ -0,0 +1,42 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Copy, Clone)] +pub struct VkAccelerationStructureGeometryTrianglesDataKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub vertexFormat: VkFormat, + pub vertexData: VkDeviceOrHostAddressConstKHR, + pub vertexStride: VkDeviceSize, + pub maxVertex: u32, + pub indexType: VkIndexType, + pub indexData: VkDeviceOrHostAddressConstKHR, + pub transformData: VkDeviceOrHostAddressConstKHR, +} + +impl VkAccelerationStructureGeometryTrianglesDataKHR { + pub fn new( + vertex_format: VkFormat, + vertex_data: VkDeviceOrHostAddressConstKHR, + vertex_stride: VkDeviceSize, + max_vertex: u32, + index_type: VkIndexType, + index_data: VkDeviceOrHostAddressConstKHR, + transform_data: VkDeviceOrHostAddressConstKHR, + ) -> Self { + VkAccelerationStructureGeometryTrianglesDataKHR { + sType: VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_GEOMETRY_TRIANGLES_DATA_KHR, + pNext: ptr::null(), + vertexFormat: vertex_format, + vertexData: vertex_data, + vertexStride: vertex_stride, + maxVertex: max_vertex, + indexType: index_type, + indexData: index_data, + transformData: transform_data, + } + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_instance.rs b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_instance.rs new file mode 100644 index 0000000..8fe91aa --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_instance.rs @@ -0,0 +1,86 @@ +use crate::prelude::*; +use std::fmt; + +#[repr(C)] +#[derive(Copy, Clone, PartialEq)] +pub struct VkAccelerationStructureInstanceKHR { + pub transform: VkTransformMatrixKHR, + instance_id_mask: u32, + instance_offset_flags: u32, + pub accelerationStructureReference: u64, +} + +impl VkAccelerationStructureInstanceKHR { + pub fn new( + transform: VkTransformMatrixKHR, + instance_custom_index: u32, + mask: u8, + instance_shader_binding_table_record_offset: u32, + flags: impl Into, + acceleration_structure_reference: u64, + ) -> Self { + let instance_id = Self::u24_to_u32(instance_custom_index); + let instance_offset = Self::u24_to_u32(instance_shader_binding_table_record_offset); + let flags: u32 = flags.into().into(); + let flags = Self::u32_to_u8(flags); + let mask = Self::u32_to_u8(mask as u32); + + VkAccelerationStructureInstanceKHR { + transform, + instance_id_mask: (instance_id | mask), + instance_offset_flags: (instance_offset | flags), + accelerationStructureReference: acceleration_structure_reference, + } + } + + pub fn instance_id(&self) -> u32 { + Self::u32_to_u24(self.instance_id_mask) + } + + pub fn mask(&self) -> u32 { + Self::u8_to_u32(self.instance_id_mask) + } + + pub fn instance_offset(&self) -> u32 { + Self::u32_to_u24(self.instance_offset_flags) + } + + pub fn flags(&self) -> VkGeometryInstanceFlagBitsKHR { + Self::u8_to_u32(self.instance_offset_flags).into() + } + + #[inline] + fn u32_to_u24(bits: u32) -> u32 { + bits & 0x00FF_FFFF + } + + #[inline] + fn u24_to_u32(bits: u32) -> u32 { + bits & 0x00FF_FFFF + } + + #[inline] + fn u32_to_u8(bits: u32) -> u32 { + (bits & 0x0000_00FF) << 24 + } + + #[inline] + fn u8_to_u32(bits: u32) -> u32 { + (bits & 0xFF00_0000) >> 24 + } +} + +impl fmt::Debug for VkAccelerationStructureInstanceKHR { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "VkGeometryInstanceNV {{ transform: {:?}, instanceID: {}, mask: {}, instanceOffset: {}, flags: {:?}, accelerationStructureHandle {} }}", + self.transform, + self.instance_id(), + self.mask(), + self.instance_offset(), + self.flags(), + self.accelerationStructureReference + ) + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_version_info.rs b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_version_info.rs new file mode 100644 index 0000000..ad10ba2 --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/acceleration_structure_version_info.rs @@ -0,0 +1,26 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +pub struct VkAccelerationStructureVersionInfoKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub versionData: *const u8, +} + +impl VkAccelerationStructureVersionInfoKHR { + pub fn new(version_data: &[u8]) -> Self { + debug_assert_eq!( + version_data.len() as u32, 2 * VK_UUID_SIZE, + "Spec says: versionData must be a valid pointer to an array of 2*VK_UUID_SIZE uint8_t values" + ); + + VkAccelerationStructureVersionInfoKHR { + sType: VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_VERSION_INFO_KHR, + pNext: ptr::null(), + versionData: version_data.as_ptr(), + } + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/copy_acceleration_structure_info.rs b/vulkan-sys/src/structs/khr/ray_tracing/copy_acceleration_structure_info.rs new file mode 100644 index 0000000..a3bc280 --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/copy_acceleration_structure_info.rs @@ -0,0 +1,29 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +pub struct VkCopyAccelerationStructureInfoKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub src: VkAccelerationStructureKHR, + pub dst: VkAccelerationStructureKHR, + pub mode: VkCopyAccelerationStructureModeKHR, +} + +impl VkCopyAccelerationStructureInfoKHR { + pub fn new( + src: VkAccelerationStructureKHR, + dst: VkAccelerationStructureKHR, + mode: VkCopyAccelerationStructureModeKHR, + ) -> Self { + VkCopyAccelerationStructureInfoKHR { + sType: VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_INFO_KHR, + pNext: ptr::null(), + src, + dst, + mode, + } + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/copy_acceleration_structure_to_memory_info.rs b/vulkan-sys/src/structs/khr/ray_tracing/copy_acceleration_structure_to_memory_info.rs new file mode 100644 index 0000000..1473538 --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/copy_acceleration_structure_to_memory_info.rs @@ -0,0 +1,29 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +pub struct VkCopyAccelerationStructureToMemoryInfoKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub src: VkAccelerationStructureKHR, + pub dst: VkDeviceOrHostAddressKHR, + pub mode: VkCopyAccelerationStructureModeKHR, +} + +impl VkCopyAccelerationStructureToMemoryInfoKHR { + pub fn new( + src: VkAccelerationStructureKHR, + dst: VkDeviceOrHostAddressKHR, + mode: VkCopyAccelerationStructureModeKHR, + ) -> Self { + VkCopyAccelerationStructureToMemoryInfoKHR { + sType: VK_STRUCTURE_TYPE_COPY_ACCELERATION_STRUCTURE_TO_MEMORY_INFO_KHR, + pNext: ptr::null(), + src, + dst, + mode, + } + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/copy_memory_to_acceleration_structure_info.rs b/vulkan-sys/src/structs/khr/ray_tracing/copy_memory_to_acceleration_structure_info.rs new file mode 100644 index 0000000..d2252d5 --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/copy_memory_to_acceleration_structure_info.rs @@ -0,0 +1,29 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +pub struct VkCopyMemoryToAccelerationStructureInfoKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub src: VkDeviceOrHostAddressConstKHR, + pub dst: VkAccelerationStructureKHR, + pub mode: VkCopyAccelerationStructureModeKHR, +} + +impl VkCopyMemoryToAccelerationStructureInfoKHR { + pub fn new( + src: VkDeviceOrHostAddressConstKHR, + dst: VkAccelerationStructureKHR, + mode: VkCopyAccelerationStructureModeKHR, + ) -> Self { + VkCopyMemoryToAccelerationStructureInfoKHR { + sType: VK_STRUCTURE_TYPE_COPY_MEMORY_TO_ACCELERATION_STRUCTURE_INFO_KHR, + pNext: ptr::null(), + src, + dst, + mode, + } + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/device_or_host_address.rs b/vulkan-sys/src/structs/khr/ray_tracing/device_or_host_address.rs new file mode 100644 index 0000000..c5f0c43 --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/device_or_host_address.rs @@ -0,0 +1,51 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Clone, Copy)] +pub union VkDeviceOrHostAddressKHR { + deviceAddress: VkDeviceAddress, + hostAddress: *mut c_void, +} + +impl VkDeviceOrHostAddressKHR { + pub fn null() -> Self { + VkDeviceOrHostAddressKHR { + hostAddress: ptr::null_mut(), + } + } + + pub fn device_address(&self) -> VkDeviceAddress { + unsafe { self.deviceAddress } + } + + pub fn host_address(&mut self) -> &mut T { + unsafe { &mut *(self.hostAddress as *mut T) } + } +} + +impl<'a, T> From<&'a mut T> for VkDeviceOrHostAddressKHR { + fn from(host_address: &'a mut T) -> Self { + VkDeviceOrHostAddressKHR { + hostAddress: host_address as *mut T as *mut c_void, + } + } +} + +impl<'a, T> From<&'a mut [T]> for VkDeviceOrHostAddressKHR { + fn from(host_address: &'a mut [T]) -> Self { + VkDeviceOrHostAddressKHR { + hostAddress: host_address.as_mut_ptr() as *mut c_void, + } + } +} + +impl From for VkDeviceOrHostAddressKHR { + fn from(device_address: VkDeviceAddress) -> Self { + VkDeviceOrHostAddressKHR { + deviceAddress: device_address, + } + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/device_or_host_address_const.rs b/vulkan-sys/src/structs/khr/ray_tracing/device_or_host_address_const.rs new file mode 100644 index 0000000..580c33d --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/device_or_host_address_const.rs @@ -0,0 +1,59 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Clone, Copy)] +pub union VkDeviceOrHostAddressConstKHR { + deviceAddress: VkDeviceAddress, + hostAddress: *const c_void, +} + +impl VkDeviceOrHostAddressConstKHR { + pub fn null() -> Self { + VkDeviceOrHostAddressConstKHR { + hostAddress: ptr::null(), + } + } + + pub fn device_address(&self) -> VkDeviceAddress { + unsafe { self.deviceAddress } + } + + pub fn host_address(&self) -> &T { + unsafe { &*(self.hostAddress as *const T) } + } +} + +impl From for VkDeviceOrHostAddressConstKHR { + fn from(device_address: VkDeviceAddress) -> Self { + VkDeviceOrHostAddressConstKHR { + deviceAddress: device_address, + } + } +} + +impl<'a, T> From<&'a T> for VkDeviceOrHostAddressConstKHR { + fn from(host_address: &'a T) -> Self { + VkDeviceOrHostAddressConstKHR { + hostAddress: host_address as *const T as *const c_void, + } + } +} + +impl<'a, T> From<&'a [T]> for VkDeviceOrHostAddressConstKHR { + fn from(host_address: &'a [T]) -> Self { + VkDeviceOrHostAddressConstKHR { + hostAddress: host_address.as_ptr() as *const c_void, + } + } +} + +impl From for VkDeviceOrHostAddressConstKHR { + fn from(address: VkDeviceOrHostAddressKHR) -> Self { + VkDeviceOrHostAddressConstKHR { + deviceAddress: address.device_address(), + } + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/mod.rs b/vulkan-sys/src/structs/khr/ray_tracing/mod.rs new file mode 100644 index 0000000..45f708c --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/mod.rs @@ -0,0 +1,32 @@ +pub mod aabb_positions; +pub mod acceleration_structure_build_geometry_info; +pub mod acceleration_structure_build_range_info; +pub mod acceleration_structure_build_sizes_info; +pub mod acceleration_structure_create_info; +pub mod acceleration_structure_device_address_info; +pub mod acceleration_structure_geometry; +pub mod acceleration_structure_geometry_aabbs_data; +pub mod acceleration_structure_geometry_data; +pub mod acceleration_structure_geometry_instances_data; +pub mod acceleration_structure_geometry_triangles_data; +pub mod acceleration_structure_instance; +pub mod acceleration_structure_version_info; +pub mod copy_acceleration_structure_info; +pub mod copy_acceleration_structure_to_memory_info; +pub mod copy_memory_to_acceleration_structure_info; +pub mod device_or_host_address; +pub mod device_or_host_address_const; +pub mod physical_device_acceleration_structure_features; +pub mod physical_device_acceleration_structure_properties; +pub mod physical_device_ray_tracing_features; +pub mod physical_device_ray_tracing_properties; +pub mod ray_tracing_pipeline_create_info; +pub mod ray_tracing_pipeline_interface_create_info; +pub mod ray_tracing_shader_group_create_info; +pub mod strided_buffer_region; +pub mod strided_device_address_region; +pub mod trace_rays_indirect_command; +pub mod transform_matrix; +pub mod write_descriptor_set_acceleration_structure; + +pub mod prelude; diff --git a/vulkan-sys/src/structs/khr/ray_tracing/physical_device_acceleration_structure_features.rs b/vulkan-sys/src/structs/khr/ray_tracing/physical_device_acceleration_structure_features.rs new file mode 100644 index 0000000..ba2c8b5 --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/physical_device_acceleration_structure_features.rs @@ -0,0 +1,33 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPhysicalDeviceAccelerationStructureFeaturesKHR { + pub sType: VkStructureType, + pub pNext: *mut c_void, + pub accelerationStructure: VkBool32, + pub accelerationStructureCaptureReplay: VkBool32, + pub accelerationStructureIndirectBuild: VkBool32, + pub accelerationStructureHostCommands: VkBool32, + pub descriptorBindingAccelerationStructureUpdateAfterBind: VkBool32, +} + +impl Default for VkPhysicalDeviceAccelerationStructureFeaturesKHR { + fn default() -> Self { + VkPhysicalDeviceAccelerationStructureFeaturesKHR { + sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR, + pNext: ptr::null_mut(), + accelerationStructure: VK_FALSE, + accelerationStructureCaptureReplay: VK_FALSE, + accelerationStructureIndirectBuild: VK_FALSE, + accelerationStructureHostCommands: VK_FALSE, + descriptorBindingAccelerationStructureUpdateAfterBind: VK_FALSE, + } + } +} + +unsafe impl Sync for VkPhysicalDeviceAccelerationStructureFeaturesKHR {} +unsafe impl Send for VkPhysicalDeviceAccelerationStructureFeaturesKHR {} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/physical_device_acceleration_structure_properties.rs b/vulkan-sys/src/structs/khr/ray_tracing/physical_device_acceleration_structure_properties.rs new file mode 100644 index 0000000..f4d2165 --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/physical_device_acceleration_structure_properties.rs @@ -0,0 +1,39 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPhysicalDeviceAccelerationStructurePropertiesKHR { + pub sType: VkStructureType, + pub pNext: *mut c_void, + pub maxGeometryCount: u64, + pub maxInstanceCount: u64, + pub maxPrimitiveCount: u64, + pub maxPerStageDescriptorAccelerationStructures: u32, + pub maxPerStageDescriptorUpdateAfterBindAccelerationStructures: u32, + pub maxDescriptorSetAccelerationStructures: u32, + pub maxDescriptorSetUpdateAfterBindAccelerationStructures: u32, + pub minAccelerationStructureScratchOffsetAlignment: u32, +} + +impl Default for VkPhysicalDeviceAccelerationStructurePropertiesKHR { + fn default() -> Self { + VkPhysicalDeviceAccelerationStructurePropertiesKHR { + sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_PROPERTIES_KHR, + pNext: ptr::null_mut(), + maxGeometryCount: 0, + maxInstanceCount: 0, + maxPrimitiveCount: 0, + maxPerStageDescriptorAccelerationStructures: 0, + maxPerStageDescriptorUpdateAfterBindAccelerationStructures: 0, + maxDescriptorSetAccelerationStructures: 0, + maxDescriptorSetUpdateAfterBindAccelerationStructures: 0, + minAccelerationStructureScratchOffsetAlignment: 0, + } + } +} + +unsafe impl Sync for VkPhysicalDeviceAccelerationStructurePropertiesKHR {} +unsafe impl Send for VkPhysicalDeviceAccelerationStructurePropertiesKHR {} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/physical_device_ray_tracing_features.rs b/vulkan-sys/src/structs/khr/ray_tracing/physical_device_ray_tracing_features.rs new file mode 100644 index 0000000..be66830 --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/physical_device_ray_tracing_features.rs @@ -0,0 +1,33 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPhysicalDeviceRayTracingFeaturesKHR { + pub sType: VkStructureType, + pub pNext: *mut c_void, + pub rayTracingPipeline: VkBool32, + pub rayTracingPipelineShaderGroupHandleCaptureReplay: VkBool32, + pub rayTracingPipelineShaderGroupHandleCaptureReplayMixed: VkBool32, + pub rayTracingPipelineTraceRaysIndirect: VkBool32, + pub rayTraversalPrimitiveCulling: VkBool32, +} + +impl Default for VkPhysicalDeviceRayTracingFeaturesKHR { + fn default() -> Self { + VkPhysicalDeviceRayTracingFeaturesKHR { + sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_FEATURES_KHR, + pNext: ptr::null_mut(), + rayTracingPipeline: VK_FALSE, + rayTracingPipelineShaderGroupHandleCaptureReplay: VK_FALSE, + rayTracingPipelineShaderGroupHandleCaptureReplayMixed: VK_FALSE, + rayTracingPipelineTraceRaysIndirect: VK_FALSE, + rayTraversalPrimitiveCulling: VK_FALSE, + } + } +} + +unsafe impl Sync for VkPhysicalDeviceRayTracingFeaturesKHR {} +unsafe impl Send for VkPhysicalDeviceRayTracingFeaturesKHR {} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/physical_device_ray_tracing_properties.rs b/vulkan-sys/src/structs/khr/ray_tracing/physical_device_ray_tracing_properties.rs new file mode 100644 index 0000000..944e1c5 --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/physical_device_ray_tracing_properties.rs @@ -0,0 +1,39 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkPhysicalDeviceRayTracingPropertiesKHR { + pub sType: VkStructureType, + pub pNext: *mut c_void, + pub shaderGroupHandleSize: u32, + pub maxRayRecursionDepth: u32, + pub maxShaderGroupStride: u32, + pub shaderGroupBaseAlignment: u32, + pub shaderGroupHandleCaptureReplaySize: u32, + pub maxRayDispatchInvocationCount: u32, + pub shaderGroupHandleAlignment: u32, + pub maxRayHitAttributeSize: u32, +} + +impl Default for VkPhysicalDeviceRayTracingPropertiesKHR { + fn default() -> Self { + VkPhysicalDeviceRayTracingPropertiesKHR { + sType: VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PIPELINE_PROPERTIES_KHR, + pNext: ptr::null_mut(), + shaderGroupHandleSize: 0, + maxRayRecursionDepth: 0, + maxShaderGroupStride: 0, + shaderGroupBaseAlignment: 0, + shaderGroupHandleCaptureReplaySize: 0, + maxRayDispatchInvocationCount: 0, + shaderGroupHandleAlignment: 0, + maxRayHitAttributeSize: 0, + } + } +} + +unsafe impl Sync for VkPhysicalDeviceRayTracingPropertiesKHR {} +unsafe impl Send for VkPhysicalDeviceRayTracingPropertiesKHR {} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/prelude.rs b/vulkan-sys/src/structs/khr/ray_tracing/prelude.rs new file mode 100644 index 0000000..ea4a2fe --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/prelude.rs @@ -0,0 +1,30 @@ +pub use super::aabb_positions::*; +pub use super::acceleration_structure_build_geometry_info::*; +pub use super::acceleration_structure_build_range_info::*; +pub use super::acceleration_structure_build_sizes_info::*; +pub use super::acceleration_structure_create_info::*; +pub use super::acceleration_structure_device_address_info::*; +pub use super::acceleration_structure_geometry::*; +pub use super::acceleration_structure_geometry_aabbs_data::*; +pub use super::acceleration_structure_geometry_data::*; +pub use super::acceleration_structure_geometry_instances_data::*; +pub use super::acceleration_structure_geometry_triangles_data::*; +pub use super::acceleration_structure_instance::*; +pub use super::acceleration_structure_version_info::*; +pub use super::copy_acceleration_structure_info::*; +pub use super::copy_acceleration_structure_to_memory_info::*; +pub use super::copy_memory_to_acceleration_structure_info::*; +pub use super::device_or_host_address::*; +pub use super::device_or_host_address_const::*; +pub use super::physical_device_acceleration_structure_features::*; +pub use super::physical_device_acceleration_structure_properties::*; +pub use super::physical_device_ray_tracing_features::*; +pub use super::physical_device_ray_tracing_properties::*; +pub use super::ray_tracing_pipeline_create_info::*; +pub use super::ray_tracing_pipeline_interface_create_info::*; +pub use super::ray_tracing_shader_group_create_info::*; +pub use super::strided_buffer_region::*; +pub use super::strided_device_address_region::*; +pub use super::trace_rays_indirect_command::*; +pub use super::transform_matrix::*; +pub use super::write_descriptor_set_acceleration_structure::*; diff --git a/vulkan-sys/src/structs/khr/ray_tracing/ray_tracing_pipeline_create_info.rs b/vulkan-sys/src/structs/khr/ray_tracing/ray_tracing_pipeline_create_info.rs new file mode 100644 index 0000000..10ceb1e --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/ray_tracing_pipeline_create_info.rs @@ -0,0 +1,57 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +pub struct VkRayTracingPipelineCreateInfoKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkPipelineCreateFlagBits, + pub stageCount: u32, + pub pStages: *const VkPipelineShaderStageCreateInfo, + pub groupCount: u32, + pub pGroups: *const VkRayTracingShaderGroupCreateInfoKHR, + pub maxPipelineRayRecursionDepth: u32, + pub pLibraryInfo: *const VkPipelineLibraryCreateInfoKHR, + pub pLibraryInterface: *const VkRayTracingPipelineInterfaceCreateInfoKHR, + pub pDynamicState: *const VkPipelineDynamicStateCreateInfo, + pub layout: VkPipelineLayout, + pub basePipelineHandle: VkPipeline, + pub basePipelineIndex: i32, +} + +impl VkRayTracingPipelineCreateInfoKHR { + pub fn new( + flags: impl Into, + stages: &[VkPipelineShaderStageCreateInfo], + groups: &[VkRayTracingShaderGroupCreateInfoKHR], + max_pipeline_ray_recursion_depth: u32, + libraries: &VkPipelineLibraryCreateInfoKHR, + library_interface: &VkRayTracingPipelineInterfaceCreateInfoKHR, + dynamic_state: &VkPipelineDynamicStateCreateInfo, + layout: VkPipelineLayout, + ) -> Self { + VkRayTracingPipelineCreateInfoKHR { + sType: VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_KHR, + pNext: ptr::null(), + flags: flags.into(), + stageCount: stages.len() as u32, + pStages: stages.as_ptr(), + groupCount: groups.len() as u32, + pGroups: groups.as_ptr(), + maxPipelineRayRecursionDepth: max_pipeline_ray_recursion_depth, + pLibraryInfo: libraries, + pLibraryInterface: library_interface, + pDynamicState: dynamic_state, + layout, + basePipelineHandle: VkPipeline::NULL_HANDLE, + basePipelineIndex: -1, + } + } + + pub fn set_base_pipeline(&mut self, pipeline: VkPipeline, index: i32) { + self.basePipelineHandle = pipeline; + self.basePipelineIndex = index; + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/ray_tracing_pipeline_interface_create_info.rs b/vulkan-sys/src/structs/khr/ray_tracing/ray_tracing_pipeline_interface_create_info.rs new file mode 100644 index 0000000..a83f8e4 --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/ray_tracing_pipeline_interface_create_info.rs @@ -0,0 +1,26 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +pub struct VkRayTracingPipelineInterfaceCreateInfoKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub maxPipelineRayPayloadSize: u32, + pub maxPipelineRayHitAttributeSize: u32, +} + +impl VkRayTracingPipelineInterfaceCreateInfoKHR { + pub fn new( + max_pipeline_ray_payload_size: u32, + max_pipeline_ray_hit_attribute_size: u32, + ) -> Self { + VkRayTracingPipelineInterfaceCreateInfoKHR { + sType: VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_INTERFACE_CREATE_INFO_KHR, + pNext: ptr::null(), + maxPipelineRayPayloadSize: max_pipeline_ray_payload_size, + maxPipelineRayHitAttributeSize: max_pipeline_ray_hit_attribute_size, + } + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/ray_tracing_shader_group_create_info.rs b/vulkan-sys/src/structs/khr/ray_tracing/ray_tracing_shader_group_create_info.rs new file mode 100644 index 0000000..b36efc8 --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/ray_tracing_shader_group_create_info.rs @@ -0,0 +1,41 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +pub struct VkRayTracingShaderGroupCreateInfoKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub r#type: VkRayTracingShaderGroupTypeKHR, + pub generalShader: u32, + pub closestHitShader: u32, + pub anyHitShader: u32, + pub intersectionShader: u32, + pub pShaderGroupCaptureReplayHandle: *const c_void, +} + +impl VkRayTracingShaderGroupCreateInfoKHR { + pub fn new( + r#type: VkRayTracingShaderGroupTypeKHR, + general_shader: u32, + closest_hit_shader: u32, + any_hit_shader: u32, + intersection_shader: u32, + ) -> Self { + VkRayTracingShaderGroupCreateInfoKHR { + sType: VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_KHR, + pNext: ptr::null(), + r#type, + generalShader: general_shader, + closestHitShader: closest_hit_shader, + anyHitShader: any_hit_shader, + intersectionShader: intersection_shader, + pShaderGroupCaptureReplayHandle: ptr::null(), + } + } + + pub fn set_replay_handle(&mut self, replay_handle: &T) { + self.pShaderGroupCaptureReplayHandle = replay_handle as *const T as *const _; + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/strided_buffer_region.rs b/vulkan-sys/src/structs/khr/ray_tracing/strided_buffer_region.rs new file mode 100644 index 0000000..5559f4a --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/strided_buffer_region.rs @@ -0,0 +1,9 @@ +use crate::prelude::*; + +#[repr(C)] +pub struct VkStridedBufferRegionKHR { + pub buffer: VkBuffer, + pub offset: VkDeviceSize, + pub stride: VkDeviceSize, + pub size: VkDeviceSize, +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/strided_device_address_region.rs b/vulkan-sys/src/structs/khr/ray_tracing/strided_device_address_region.rs new file mode 100644 index 0000000..f798609 --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/strided_device_address_region.rs @@ -0,0 +1,8 @@ +use crate::prelude::*; + +#[repr(C)] +pub struct VkStridedDeviceAddressRegionKHR { + pub deviceAddress: VkDeviceAddress, + pub stride: VkDeviceSize, + pub size: VkDeviceSize, +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/trace_rays_indirect_command.rs b/vulkan-sys/src/structs/khr/ray_tracing/trace_rays_indirect_command.rs new file mode 100644 index 0000000..b08e743 --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/trace_rays_indirect_command.rs @@ -0,0 +1,6 @@ +#[repr(C)] +pub struct VkTraceRaysIndirectCommandKHR { + pub width: u32, + pub height: u32, + pub depth: u32, +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/transform_matrix.rs b/vulkan-sys/src/structs/khr/ray_tracing/transform_matrix.rs new file mode 100644 index 0000000..42534cd --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/transform_matrix.rs @@ -0,0 +1,13 @@ +#[repr(C)] +#[derive(Debug, Copy, Clone, PartialEq)] +pub struct VkTransformMatrixKHR { + matrix: [[f32; 4]; 3], +} + +impl From<[[f32; 4]; 4]> for VkTransformMatrixKHR { + fn from(matrix: [[f32; 4]; 4]) -> Self { + VkTransformMatrixKHR { + matrix: [matrix[0], matrix[1], matrix[2]], + } + } +} diff --git a/vulkan-sys/src/structs/khr/ray_tracing/write_descriptor_set_acceleration_structure.rs b/vulkan-sys/src/structs/khr/ray_tracing/write_descriptor_set_acceleration_structure.rs new file mode 100644 index 0000000..abbf1ec --- /dev/null +++ b/vulkan-sys/src/structs/khr/ray_tracing/write_descriptor_set_acceleration_structure.rs @@ -0,0 +1,44 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug, PartialEq, Eq)] +pub struct VkWriteDescriptorSetAccelerationStructureKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub accelerationStructureCount: u32, + pub pAccelerationStructures: *const VkAccelerationStructureKHR, +} + +impl VkWriteDescriptorSetAccelerationStructureKHR { + pub fn new(acceleration_structures: &[VkAccelerationStructureKHR]) -> Self { + VkWriteDescriptorSetAccelerationStructureKHR { + sType: VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR, + pNext: ptr::null(), + accelerationStructureCount: acceleration_structures.len() as u32, + pAccelerationStructures: if acceleration_structures.is_empty() { + ptr::null() + } else { + acceleration_structures.as_ptr() + }, + } + } + + pub fn set_acceleration_structures<'a>( + &'a mut self, + acceleration_structures: &'a [VkAccelerationStructureKHR], + ) { + debug_assert!(!acceleration_structures.is_empty()); + + self.accelerationStructureCount = acceleration_structures.len() as u32; + self.pAccelerationStructures = acceleration_structures.as_ptr(); + } +} + +impl Default for VkWriteDescriptorSetAccelerationStructureKHR { + fn default() -> Self { + Self::new(&[]) + } +} diff --git a/vulkan-sys/src/structs/khr/sparseimageformatproperties2khr.rs b/vulkan-sys/src/structs/khr/sparseimageformatproperties2khr.rs new file mode 100644 index 0000000..83000a3 --- /dev/null +++ b/vulkan-sys/src/structs/khr/sparseimageformatproperties2khr.rs @@ -0,0 +1,22 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkSparseImageFormatProperties2KHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub properties: VkSparseImageFormatProperties, +} + +impl VkSparseImageFormatProperties2KHR { + pub fn new(properties: VkSparseImageFormatProperties) -> Self { + VkSparseImageFormatProperties2KHR { + sType: VK_STRUCTURE_TYPE_SPARSE_IMAGE_FORMAT_PROPERTIES_2, + pNext: ptr::null(), + properties, + } + } +} diff --git a/vulkan-sys/src/structs/khr/surfacecapabilitieskhr.rs b/vulkan-sys/src/structs/khr/surfacecapabilitieskhr.rs new file mode 100644 index 0000000..378c8bf --- /dev/null +++ b/vulkan-sys/src/structs/khr/surfacecapabilitieskhr.rs @@ -0,0 +1,16 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug, Default)] +pub struct VkSurfaceCapabilitiesKHR { + pub minImageCount: u32, + pub maxImageCount: u32, + pub currentExtent: VkExtent2D, + pub minImageExtent: VkExtent2D, + pub maxImageExtent: VkExtent2D, + pub maxImageArrayLayers: u32, + pub supportedTransforms: VkSurfaceTransformFlagBitsKHR, + pub currentTransform: VkSurfaceTransformFlagBitsKHR, + pub supportedCompositeAlpha: VkCompositeAlphaFlagBitsKHR, + pub supportedUsageFlagBits: VkImageUsageFlagBits, +} diff --git a/vulkan-sys/src/structs/khr/surfaceformatkhr.rs b/vulkan-sys/src/structs/khr/surfaceformatkhr.rs new file mode 100644 index 0000000..8224a86 --- /dev/null +++ b/vulkan-sys/src/structs/khr/surfaceformatkhr.rs @@ -0,0 +1,8 @@ +use crate::prelude::*; + +#[repr(C)] +#[derive(Debug)] +pub struct VkSurfaceFormatKHR { + pub format: VkFormat, + pub colorSpace: VkColorSpaceKHR, +} diff --git a/vulkan-sys/src/structs/khr/swapchaincreateinfokhr.rs b/vulkan-sys/src/structs/khr/swapchaincreateinfokhr.rs new file mode 100644 index 0000000..2018c58 --- /dev/null +++ b/vulkan-sys/src/structs/khr/swapchaincreateinfokhr.rs @@ -0,0 +1,81 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug, Clone)] +pub struct VkSwapchainCreateInfoKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkSwapchainCreateFlagBitsKHR, + pub surface: VkSurfaceKHR, + pub minImageCount: u32, + pub imageFormat: VkFormat, + pub imageColorSpace: VkColorSpaceKHR, + pub imageExtent: VkExtent2D, + pub imageArrayLayers: u32, + pub imageUsage: VkImageUsageFlagBits, + pub imageSharingMode: VkSharingMode, + pub queueFamilyIndexCount: u32, + pub pQueueFamilyIndices: *const u32, + pub preTransform: VkSurfaceTransformFlagBitsKHR, + pub compositeAlpha: VkCompositeAlphaFlagBitsKHR, + pub presentMode: VkPresentModeKHR, + pub clipped: VkBool32, + pub oldSwapchain: VkSwapchainKHR, +} + +impl VkSwapchainCreateInfoKHR { + pub fn new( + flags: T, + surface: VkSurfaceKHR, + min_image_count: u32, + format: VkFormat, + color_space: VkColorSpaceKHR, + extent: VkExtent2D, + array_layers: u32, + usage: U, + sharing_mode: VkSharingMode, + queue_family_indices: &[u32], + pre_transform: V, + composite_alpha: W, + present_mode: VkPresentModeKHR, + clipped: S, + ) -> Self + where + T: Into, + U: Into, + V: Into, + W: Into, + S: Into, + { + VkSwapchainCreateInfoKHR { + sType: VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR, + pNext: ptr::null(), + flags: flags.into(), + surface, + minImageCount: min_image_count, + imageFormat: format, + imageColorSpace: color_space, + imageExtent: extent, + imageArrayLayers: array_layers, + imageUsage: usage.into(), + imageSharingMode: sharing_mode, + queueFamilyIndexCount: queue_family_indices.len() as u32, + pQueueFamilyIndices: queue_family_indices.as_ptr(), + preTransform: pre_transform.into(), + compositeAlpha: composite_alpha.into(), + presentMode: present_mode, + clipped: clipped.into(), + oldSwapchain: VkSwapchainKHR::NULL_HANDLE, + } + } + + pub fn set_old_swapchain(&mut self, swapchain: VkSwapchainKHR) { + self.oldSwapchain = swapchain; + } +} + +unsafe impl Send for VkSwapchainCreateInfoKHR {} +unsafe impl Sync for VkSwapchainCreateInfoKHR {} diff --git a/vulkan-sys/src/structs/khr/waylandsurfacecreateinfokhr.rs b/vulkan-sys/src/structs/khr/waylandsurfacecreateinfokhr.rs new file mode 100644 index 0000000..203454d --- /dev/null +++ b/vulkan-sys/src/structs/khr/waylandsurfacecreateinfokhr.rs @@ -0,0 +1,29 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkWaylandSurfaceCreateInfoKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkWaylandSurfaceCreateFlagBitsKHR, + pub display: *mut c_void, + pub surface: *mut c_void, +} + +impl VkWaylandSurfaceCreateInfoKHR { + pub fn new(flags: T, display: &mut U, surface: &mut V) -> Self + where + T: Into, + { + VkWaylandSurfaceCreateInfoKHR { + sType: VK_STRUCTURE_TYPE_WAYLAND_SURFACE_CREATE_INFO_KHR, + pNext: ptr::null(), + flags: flags.into(), + display: display as *mut U as *mut c_void, + surface: surface as *mut V as *mut c_void, + } + } +} diff --git a/vulkan-sys/src/structs/khr/win32surfacecreateinfokhr.rs b/vulkan-sys/src/structs/khr/win32surfacecreateinfokhr.rs new file mode 100644 index 0000000..cf277b8 --- /dev/null +++ b/vulkan-sys/src/structs/khr/win32surfacecreateinfokhr.rs @@ -0,0 +1,29 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkWin32SurfaceCreateInfoKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkWin32SurfaceCreateFlagBitsKHR, + pub hinstance: *mut c_void, + pub hwnd: *mut c_void, +} + +impl VkWin32SurfaceCreateInfoKHR { + pub fn new(flags: T, hinstance: &mut U, hwnd: &mut V) -> Self + where + T: Into, + { + VkWin32SurfaceCreateInfoKHR { + sType: VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR, + pNext: ptr::null(), + flags: flags.into(), + hinstance: hinstance as *mut U as *mut c_void, + hwnd: hwnd as *mut V as *mut c_void, + } + } +} diff --git a/vulkan-sys/src/structs/khr/xcbsurfacecreateinfokhr.rs b/vulkan-sys/src/structs/khr/xcbsurfacecreateinfokhr.rs new file mode 100644 index 0000000..18010c3 --- /dev/null +++ b/vulkan-sys/src/structs/khr/xcbsurfacecreateinfokhr.rs @@ -0,0 +1,29 @@ +use crate::prelude::*; + +use std::os::raw::c_void; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkXcbSurfaceCreateInfoKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkXcbSurfaceCreateFlagBitsKHR, + pub connection: *const c_void, + pub window: u32, +} + +impl VkXcbSurfaceCreateInfoKHR { + pub fn new(flags: T, connection: &mut U, window: u32) -> Self + where + T: Into, + { + VkXcbSurfaceCreateInfoKHR { + sType: VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR, + pNext: ptr::null(), + flags: flags.into(), + connection: connection as *mut U as *mut c_void, + window, + } + } +} diff --git a/vulkan-sys/src/structs/khr/xlibsurfacecreateinfokhr.rs b/vulkan-sys/src/structs/khr/xlibsurfacecreateinfokhr.rs new file mode 100644 index 0000000..84ade21 --- /dev/null +++ b/vulkan-sys/src/structs/khr/xlibsurfacecreateinfokhr.rs @@ -0,0 +1,29 @@ +use crate::prelude::*; + +use std::os::raw::{c_ulong, c_void}; +use std::ptr; + +#[repr(C)] +#[derive(Debug)] +pub struct VkXlibSurfaceCreateInfoKHR { + pub sType: VkStructureType, + pub pNext: *const c_void, + pub flags: VkXlibSurfaceCreateFlagBitsKHR, + pub dpy: *mut c_void, + pub window: c_ulong, +} + +impl VkXlibSurfaceCreateInfoKHR { + pub fn new(flags: T, dpy: &mut U, window: c_ulong) -> Self + where + T: Into, + { + VkXlibSurfaceCreateInfoKHR { + sType: VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR, + pNext: ptr::null(), + flags: flags.into(), + dpy: dpy as *mut U as *mut c_void, + window, + } + } +} diff --git a/vulkan-sys/src/structs/macros.rs b/vulkan-sys/src/structs/macros.rs new file mode 100644 index 0000000..e6f5fa5 --- /dev/null +++ b/vulkan-sys/src/structs/macros.rs @@ -0,0 +1,37 @@ +macro_rules! impl_pnext_in { + ($implementor: ty, $struct_name: ident) => { + impl PNextIn<$struct_name> for $implementor { + fn chain(&mut self, p_next: &$struct_name) { + unsafe { + let self_as_base_structure = + self as *mut $implementor as *mut VkBaseInStructure; + + let last_next = VkBaseInStructure::ptr_chain_iter(self_as_base_structure) + .last() + .unwrap(); + + (*last_next).pNext = p_next as *const $struct_name as *const _; + } + } + } + }; +} + +macro_rules! impl_pnext_out { + ($implementor: ty, $struct_name: ident) => { + impl PNextOut<$struct_name> for $implementor { + fn chain(&mut self, p_next: &mut $struct_name) { + unsafe { + let self_as_base_structure = + self as *mut $implementor as *mut VkBaseOutStructure; + + let last_next = VkBaseOutStructure::ptr_chain_iter(self_as_base_structure) + .last() + .unwrap(); + + (*last_next).pNext = p_next as *mut $struct_name as *mut _; + } + } + } + }; +} diff --git a/vulkan-sys/src/structs/mod.rs b/vulkan-sys/src/structs/mod.rs new file mode 100644 index 0000000..2891649 --- /dev/null +++ b/vulkan-sys/src/structs/mod.rs @@ -0,0 +1,23 @@ +#[macro_use] +mod macros; + +pub mod amd; +pub mod core; +pub mod ext; +pub mod khr; + +pub mod prelude; + +pub trait PNextIn { + fn chain(&mut self, p_next: &T); +} + +pub trait PNextOut { + fn chain(&mut self, p_next: &mut T); +} + +use std::slice; + +fn raw_to_slice<'a, T: Clone>(pointer: *const T, size: u32) -> &'a [T] { + unsafe { slice::from_raw_parts(pointer, size as usize) } +} diff --git a/vulkan-sys/src/structs/prelude.rs b/vulkan-sys/src/structs/prelude.rs new file mode 100644 index 0000000..576ec52 --- /dev/null +++ b/vulkan-sys/src/structs/prelude.rs @@ -0,0 +1,7 @@ +pub use super::PNextIn; +pub use super::PNextOut; + +pub use super::amd::prelude::*; +pub use super::core::prelude::*; +pub use super::ext::prelude::*; +pub use super::khr::prelude::*; diff --git a/vulkan-sys/src/types/constants.rs b/vulkan-sys/src/types/constants.rs new file mode 100644 index 0000000..e5440b7 --- /dev/null +++ b/vulkan-sys/src/types/constants.rs @@ -0,0 +1,14 @@ +pub const VK_LOD_CLAMP_NONE: f32 = 1000.0; +pub const VK_REMAINING_MIP_LEVELS: u32 = 0xFFFF_FFFF; +pub const VK_REMAINING_ARRAY_LAYERS: u32 = 0xFFFF_FFFF; +pub const VK_WHOLE_SIZE: u64 = 0xFFFF_FFFF_FFFF_FFFF; +pub const VK_ATTACHMENT_UNUSED: u32 = 0xFFFF_FFFF; +pub const VK_QUEUE_FAMILY_IGNORED: u32 = 0xFFFF_FFFF; +pub const VK_SUBPASS_EXTERNAL: u32 = 0xFFFF_FFFF; +pub const VK_MAX_PHYSICAL_DEVICE_NAME_SIZE: u32 = 256; +pub const VK_UUID_SIZE: u32 = 16; +pub const VK_MAX_MEMORY_TYPES: u32 = 32; +pub const VK_MAX_MEMORY_HEAPS: u32 = 16; +pub const VK_MAX_EXTENSION_NAME_SIZE: u32 = 256; +pub const VK_MAX_DESCRIPTION_SIZE: u32 = 256; +pub const VK_SHADER_UNUSED_KHR: u32 = 0xFFFF_FFFF; // (~0) diff --git a/vulkan-sys/src/types/core.rs b/vulkan-sys/src/types/core.rs new file mode 100644 index 0000000..575f4cb --- /dev/null +++ b/vulkan-sys/src/types/core.rs @@ -0,0 +1,124 @@ +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkBuffer(u64); +SetupU64Conv!(VkBuffer); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkBufferView(u64); +SetupU64Conv!(VkBufferView); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkCommandBuffer(usize); +SetupUSizeConv!(VkCommandBuffer); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkCommandPool(u64); +SetupU64Conv!(VkCommandPool); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkDescriptorPool(u64); +SetupU64Conv!(VkDescriptorPool); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkDescriptorSet(u64); +SetupU64Conv!(VkDescriptorSet); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkDescriptorSetLayout(u64); +SetupU64Conv!(VkDescriptorSetLayout); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkDevice(usize); +SetupUSizeConv!(VkDevice); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkDeviceMemory(u64); +SetupU64Conv!(VkDeviceMemory); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkEvent(u64); +SetupU64Conv!(VkEvent); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkFence(u64); +SetupU64Conv!(VkFence); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkFramebuffer(u64); +SetupU64Conv!(VkFramebuffer); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkImage(u64); +SetupU64Conv!(VkImage); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkImageView(u64); +SetupU64Conv!(VkImageView); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkInstance(usize); +SetupUSizeConv!(VkInstance); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkPhysicalDevice(usize); +SetupUSizeConv!(VkPhysicalDevice); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkPipeline(u64); +SetupU64Conv!(VkPipeline); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkPipelineCache(u64); +SetupU64Conv!(VkPipelineCache); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkPipelineLayout(u64); +SetupU64Conv!(VkPipelineLayout); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkQueryPool(u64); +SetupU64Conv!(VkQueryPool); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkQueue(usize); +SetupUSizeConv!(VkQueue); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkRenderPass(u64); +SetupU64Conv!(VkRenderPass); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkSampler(u64); +SetupU64Conv!(VkSampler); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkSemaphore(u64); +SetupU64Conv!(VkSemaphore); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkShaderModule(u64); +SetupU64Conv!(VkShaderModule); diff --git a/vulkan-sys/src/types/ext.rs b/vulkan-sys/src/types/ext.rs new file mode 100644 index 0000000..a8de8ed --- /dev/null +++ b/vulkan-sys/src/types/ext.rs @@ -0,0 +1,9 @@ +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkDebugReportCallbackEXT(u64); +SetupU64Conv!(VkDebugReportCallbackEXT); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkDebugUtilsMessengerEXT(u64); +SetupU64Conv!(VkDebugUtilsMessengerEXT); diff --git a/vulkan-sys/src/types/khr.rs b/vulkan-sys/src/types/khr.rs new file mode 100644 index 0000000..3ffc1cb --- /dev/null +++ b/vulkan-sys/src/types/khr.rs @@ -0,0 +1,34 @@ +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkDescriptorUpdateTemplateKHR(u64); +SetupU64Conv!(VkDescriptorUpdateTemplateKHR); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkDisplayKHR(u64); +SetupU64Conv!(VkDisplayKHR); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkDisplayModeKHR(u64); +SetupU64Conv!(VkDisplayModeKHR); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkSurfaceKHR(u64); +SetupU64Conv!(VkSurfaceKHR); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkSwapchainKHR(u64); +SetupU64Conv!(VkSwapchainKHR); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkAccelerationStructureKHR(u64); +SetupU64Conv!(VkAccelerationStructureKHR); + +#[repr(C)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct VkDeferredOperationKHR(u64); +SetupU64Conv!(VkDeferredOperationKHR); diff --git a/vulkan-sys/src/types/macros.rs b/vulkan-sys/src/types/macros.rs new file mode 100644 index 0000000..ae0cb79 --- /dev/null +++ b/vulkan-sys/src/types/macros.rs @@ -0,0 +1,35 @@ +macro_rules! SetupU64Conv { + ($name: ident) => { + impl $name { + pub const NULL_HANDLE: $name = $name(0); + + pub fn raw(&self) -> u64 { + self.0 + } + } + + impl From for $name { + fn from(v: u64) -> Self { + $name(v) + } + } + }; +} + +macro_rules! SetupUSizeConv { + ($name: ident) => { + impl $name { + pub const NULL_HANDLE: $name = $name(0); + + pub fn raw(&self) -> usize { + self.0 + } + } + + impl From for $name { + fn from(v: usize) -> Self { + $name(v) + } + } + }; +} diff --git a/vulkan-sys/src/types/mod.rs b/vulkan-sys/src/types/mod.rs new file mode 100644 index 0000000..36e4668 --- /dev/null +++ b/vulkan-sys/src/types/mod.rs @@ -0,0 +1,12 @@ +#[macro_use] +mod macros; + +pub mod constants; +pub mod core; +pub mod ext; +pub mod khr; +pub mod nv; +pub mod types; +pub mod voidfunction; + +pub mod prelude; diff --git a/vulkan-sys/src/types/nv.rs b/vulkan-sys/src/types/nv.rs new file mode 100644 index 0000000..e80bda7 --- /dev/null +++ b/vulkan-sys/src/types/nv.rs @@ -0,0 +1,3 @@ +use super::khr::VkAccelerationStructureKHR; + +pub type VkAccelerationStructureNV = VkAccelerationStructureKHR; diff --git a/vulkan-sys/src/types/prelude.rs b/vulkan-sys/src/types/prelude.rs new file mode 100644 index 0000000..ea4f32a --- /dev/null +++ b/vulkan-sys/src/types/prelude.rs @@ -0,0 +1,7 @@ +pub use super::constants::*; +pub use super::core::*; +pub use super::ext::*; +pub use super::khr::*; +pub use super::nv::*; +pub use super::types::*; +pub use super::voidfunction::*; diff --git a/vulkan-sys/src/types/types.rs b/vulkan-sys/src/types/types.rs new file mode 100644 index 0000000..adc9a22 --- /dev/null +++ b/vulkan-sys/src/types/types.rs @@ -0,0 +1,3 @@ +pub type VkDeviceSize = u64; +pub type VkSampleMask = u32; +pub type VkDeviceAddress = u64; diff --git a/vulkan-sys/src/types/voidfunction.rs b/vulkan-sys/src/types/voidfunction.rs new file mode 100644 index 0000000..ed36b86 --- /dev/null +++ b/vulkan-sys/src/types/voidfunction.rs @@ -0,0 +1,4 @@ +pub type PFN_vkVoidFunction = extern "system" fn() -> (); + +// create dummy +pub extern "system" fn vkVoidFunction() {}