pub use super::memory::Memory; use anyhow::Result; use vma_rs::prelude::*; use crate::prelude::*; use crate::sampler_manager::SamplerManager; use std::cmp::min; use std::fmt; use std::mem::{size_of, MaybeUninit}; use std::ptr; use std::sync::{Arc, Mutex}; use std::time::Duration; use core::ffi::c_void; Extensions!(DeviceExtensions, { (amd_rasterization_order, "VK_AMD_rasterization_order"), (maintenance3, "VK_KHR_maintenance3"), (descriptor_indexing, "VK_EXT_descriptor_indexing"), (memory_requirements2, "VK_KHR_get_memory_requirements2"), (swapchain, "VK_KHR_swapchain"), (memory_budget, "VK_EXT_memory_budget"), (memory_priority, "VK_EXT_memory_priority"), (debug_marker, "VK_EXT_debug_marker"), (ray_tracing_pipeline, "VK_KHR_ray_tracing_pipeline"), (buffer_device_address, "VK_KHR_buffer_device_address"), (deferred_host_operations, "VK_KHR_deferred_host_operations"), (pipeline_library, "VK_KHR_pipeline_library"), (acceleration_structure, "VK_KHR_acceleration_structure"), (spirv_1_4, "VK_KHR_spirv_1_4"), (shader_float_controls, "VK_KHR_shader_float_controls"), }); pub use vulkan_sys::prelude::VkPhysicalDeviceFeatures as DeviceFeatures; pub struct MemoryHeap { pub usage: VkDeviceSize, pub budget: VkDeviceSize, } pub struct Device { device_functions: DeviceFunctions, device_wsi_functions: DeviceWSIFunctions, maintenance3_functions: Maintenance3Functions, _acceleration_structure_functions: AccelerationStructureFunctions, _ray_tracing_pipeline_functions: RayTracingPipelineFunctions, deferred_operation_functions: DeferredOperationsFunctions, enabled_extensions: DeviceExtensions, physical_device: Arc, device: VkDevice, memory_allocator: Allocator, sampler_manager: Mutex, } impl Device { pub fn preinitialized( device: VkDevice, proc_addr: PFN_vkGetDeviceProcAddr, physical_device: Arc, extensions: &[VkString], ) -> Result> { let device_functions = DeviceFunctions::load(|name| { proc_addr(device, name.as_ptr()) as *const std::ffi::c_void }); let device_wsi_functions = DeviceWSIFunctions::load(|name| { proc_addr(device, name.as_ptr()) as *const std::ffi::c_void }); let maintenance3_functions = Maintenance3Functions::load(|name| { proc_addr(device, name.as_ptr()) as *const std::ffi::c_void }); let ray_tracing_functions = RayTracingPipelineFunctions::load(|name| { proc_addr(device, name.as_ptr()) as *const std::ffi::c_void }); let acceleration_structure_functions = AccelerationStructureFunctions::load(|name| { proc_addr(device, name.as_ptr()) as *const std::ffi::c_void }); let deferred_operation_functions = DeferredOperationsFunctions::load(|name| { proc_addr(device, name.as_ptr()) as *const std::ffi::c_void }); let vma_fns = VmaVulkanFunctions { vkGetPhysicalDeviceProperties: physical_device .instance() .instance_functions .vkGetPhysicalDeviceProperties, vkGetPhysicalDeviceMemoryProperties: physical_device .instance() .instance_functions .vkGetPhysicalDeviceMemoryProperties, vkAllocateMemory: device_functions.vkAllocateMemory, vkFreeMemory: device_functions.vkFreeMemory, vkMapMemory: device_functions.vkMapMemory, vkUnmapMemory: device_functions.vkUnmapMemory, vkFlushMappedMemoryRanges: device_functions.vkFlushMappedMemoryRanges, vkInvalidateMappedMemoryRanges: device_functions.vkInvalidateMappedMemoryRanges, vkBindBufferMemory: device_functions.vkBindBufferMemory, vkBindImageMemory: device_functions.vkBindImageMemory, vkGetBufferMemoryRequirements: device_functions.vkGetBufferMemoryRequirements, vkGetImageMemoryRequirements: device_functions.vkGetImageMemoryRequirements, vkCreateBuffer: device_functions.vkCreateBuffer, vkDestroyBuffer: device_functions.vkDestroyBuffer, vkCreateImage: device_functions.vkCreateImage, vkDestroyImage: device_functions.vkDestroyImage, vkCmdCopyBuffer: device_functions.vkCmdCopyBuffer, vkGetBufferMemoryRequirements2KHR: device_functions.vkGetBufferMemoryRequirements2, vkGetImageMemoryRequirements2KHR: device_functions.vkGetImageMemoryRequirements2, vkBindBufferMemory2KHR: device_functions.vkBindBufferMemory2, vkBindImageMemory2KHR: device_functions.vkBindImageMemory2, vkGetPhysicalDeviceMemoryProperties2KHR: physical_device .instance() .instance_functions .vkGetPhysicalDeviceMemoryProperties2, }; Self::verify_vma_vk_functions(&vma_fns)?; let memory_allocator = Allocator::builder() .set_vulkan_functions(vma_fns) .set_flags(VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT as u32) .build( physical_device.instance().vk_handle(), device, physical_device.vk_handle(), physical_device.instance().api_version(), )?; Ok(Arc::new(Device { memory_allocator, device_functions, device_wsi_functions, maintenance3_functions, deferred_operation_functions, _acceleration_structure_functions: acceleration_structure_functions, _ray_tracing_pipeline_functions: ray_tracing_functions, enabled_extensions: DeviceExtensions::from_list(extensions), physical_device, device, sampler_manager: SamplerManager::new(), })) } pub fn new( physical_device: Arc, mut extensions: DeviceExtensions, queue_infos: &[VkDeviceQueueCreateInfo], requested_device_features: DeviceFeatures, ) -> Result> { // buffer device address is required in the current library implementation extensions.buffer_device_address = true; let device_extensions = physical_device.extensions(); let mut checked_extensions = Vec::new(); let extension_list = extensions.as_list(); for extension in extension_list { for ext_prop in device_extensions { if *ext_prop == extension { checked_extensions.push(extension); break; } } } let names = VkNames::new(checked_extensions.as_slice()); println!("\nenabled device extensions ({}):", names.len()); for extension_name in names.iter() { println!("\t- {:?}", extension_name); } println!(); if !requested_device_features.is_subset_of(&physical_device.features()) { return Err(anyhow::Error::msg( "Requested features are not supported by the device", )); } let mut device_ci = VkDeviceCreateInfo::new( VK_DEVICE_CREATE_NULL_BIT, queue_infos, &names, &requested_device_features, ); let enabled_extensions = DeviceExtensions::from_list(&checked_extensions); if let Err(missing_extensions) = extensions.check_availability(&enabled_extensions) { for m in missing_extensions { println!("{}", m); } } if enabled_extensions.descriptor_indexing { device_ci.chain(physical_device.descriptor_indexing_features()); } // only required for khr ray tracing // ----- if enabled_extensions.buffer_device_address { device_ci.chain(physical_device.buffer_device_address_features()); } if enabled_extensions.acceleration_structure { device_ci.chain(physical_device.acceleration_structure_features()); } if enabled_extensions.ray_tracing_pipeline { device_ci.chain(physical_device.ray_tracing_features()); } // ----- let instance = physical_device.instance(); let device = instance.create_device(physical_device.vk_handle(), &device_ci)?; let device_functions = DeviceFunctions::new(&instance.instance_functions, device); let device_wsi_functions = DeviceWSIFunctions::new(&instance.instance_functions, device); let maintenance3_functions = Maintenance3Functions::new(&instance.instance_functions, device); let ray_tracing_functions = RayTracingPipelineFunctions::new(&instance.instance_functions, device); let acceleration_structure_functions = AccelerationStructureFunctions::new(&instance.instance_functions, device); let deferred_operation_functions = DeferredOperationsFunctions::new(&instance.instance_functions, device); let memory_allocator = Allocator::builder() .set_vulkan_functions(VmaVulkanFunctions { vkGetPhysicalDeviceProperties: physical_device .instance() .instance_functions .vkGetPhysicalDeviceProperties, vkGetPhysicalDeviceMemoryProperties: physical_device .instance() .instance_functions .vkGetPhysicalDeviceMemoryProperties, vkAllocateMemory: device_functions.vkAllocateMemory, vkFreeMemory: device_functions.vkFreeMemory, vkMapMemory: device_functions.vkMapMemory, vkUnmapMemory: device_functions.vkUnmapMemory, vkFlushMappedMemoryRanges: device_functions.vkFlushMappedMemoryRanges, vkInvalidateMappedMemoryRanges: device_functions.vkInvalidateMappedMemoryRanges, vkBindBufferMemory: device_functions.vkBindBufferMemory, vkBindImageMemory: device_functions.vkBindImageMemory, vkGetBufferMemoryRequirements: device_functions.vkGetBufferMemoryRequirements, vkGetImageMemoryRequirements: device_functions.vkGetImageMemoryRequirements, vkCreateBuffer: device_functions.vkCreateBuffer, vkDestroyBuffer: device_functions.vkDestroyBuffer, vkCreateImage: device_functions.vkCreateImage, vkDestroyImage: device_functions.vkDestroyImage, vkCmdCopyBuffer: device_functions.vkCmdCopyBuffer, vkGetBufferMemoryRequirements2KHR: device_functions.vkGetBufferMemoryRequirements2, vkGetImageMemoryRequirements2KHR: device_functions.vkGetImageMemoryRequirements2, vkBindBufferMemory2KHR: device_functions.vkBindBufferMemory2, vkBindImageMemory2KHR: device_functions.vkBindImageMemory2, vkGetPhysicalDeviceMemoryProperties2KHR: physical_device .instance() .instance_functions .vkGetPhysicalDeviceMemoryProperties2, }) .set_flags(VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT as u32) .build( physical_device.instance().vk_handle(), device, physical_device.vk_handle(), physical_device.instance().api_version(), )?; Ok(Arc::new(Device { memory_allocator, device_functions, device_wsi_functions, maintenance3_functions, deferred_operation_functions, _acceleration_structure_functions: acceleration_structure_functions, _ray_tracing_pipeline_functions: ray_tracing_functions, enabled_extensions, physical_device, device, sampler_manager: SamplerManager::new(), })) } fn verify_vma_vk_functions(fns: &VmaVulkanFunctions) -> Result<()> { macro_rules! test_vma_fn { ($([$var:ident: $pfn:ident],)*) => { $( if unsafe { std::mem::transmute::<$pfn, *const c_void>(fns.$var) } == ptr::null() { return Err(anyhow::anyhow!(format!("vma function {} is null ptr", stringify!($var)))); } )* }; } test_vma_fn!( [vkGetPhysicalDeviceProperties: PFN_vkGetPhysicalDeviceProperties], [vkGetPhysicalDeviceMemoryProperties: PFN_vkGetPhysicalDeviceMemoryProperties], [vkAllocateMemory: PFN_vkAllocateMemory], [vkFreeMemory: PFN_vkFreeMemory], [vkMapMemory: PFN_vkMapMemory], [vkUnmapMemory: PFN_vkUnmapMemory], [vkFlushMappedMemoryRanges: PFN_vkFlushMappedMemoryRanges], [vkInvalidateMappedMemoryRanges: PFN_vkInvalidateMappedMemoryRanges], [vkBindBufferMemory: PFN_vkBindBufferMemory], [vkBindImageMemory: PFN_vkBindImageMemory], [vkGetBufferMemoryRequirements: PFN_vkGetBufferMemoryRequirements], [vkGetImageMemoryRequirements: PFN_vkGetImageMemoryRequirements], [vkCreateBuffer: PFN_vkCreateBuffer], [vkDestroyBuffer: PFN_vkDestroyBuffer], [vkCreateImage: PFN_vkCreateImage], [vkDestroyImage: PFN_vkDestroyImage], [vkCmdCopyBuffer: PFN_vkCmdCopyBuffer], [vkGetBufferMemoryRequirements2KHR: PFN_vkGetBufferMemoryRequirements2], [vkGetImageMemoryRequirements2KHR: PFN_vkGetImageMemoryRequirements2], [vkBindBufferMemory2KHR: PFN_vkBindBufferMemory2], [vkBindImageMemory2KHR: PFN_vkBindImageMemory2], [ vkGetPhysicalDeviceMemoryProperties2KHR: PFN_vkGetPhysicalDeviceMemoryProperties2KHR ], ); Ok(()) } pub fn get_queue( self: &Arc, queue_family_index: u32, queue_index: u32, ) -> Arc> { Queue::new( self.clone(), self.get_device_queue(queue_family_index, queue_index), queue_family_index, queue_index, ) } pub fn physical_device(&self) -> &Arc { &self.physical_device } pub fn wait_for_fences( &self, fences: &[&Arc], wait_all: bool, timeout: Duration, ) -> Result<()> { let vkfences: Vec = fences.iter().map(|fence| fence.vk_handle()).collect(); self.device_wait_for_fences(vkfences.as_slice(), wait_all, timeout.as_nanos() as u64)?; Ok(()) } pub fn enabled_extensions(&self) -> &DeviceExtensions { &self.enabled_extensions } pub fn memory_budgets(&self) -> Vec { let phys_dev = self.physical_device(); let (budget, count) = phys_dev .instance() .physical_device_memory_budget(phys_dev.vk_handle()); let mut heaps = Vec::with_capacity(count as usize); let usages = budget.heap_usages(count); let budgets = budget.heap_budgets(count); for i in 0..count { heaps.push(MemoryHeap { usage: usages[i as usize], budget: budgets[i as usize], }) } heaps } pub fn max_supported_sample_count( &self, requested_sample_count: VkSampleCountFlags, ) -> VkSampleCountFlags { let dev_props = self.physical_device.properties(); let phys_counts = min( dev_props.limits.framebufferColorSampleCounts, dev_props.limits.framebufferDepthSampleCounts, ); let counts = min(phys_counts, requested_sample_count.into()); if (counts & VK_SAMPLE_COUNT_64_BIT) != 0 { VK_SAMPLE_COUNT_64_BIT } else if (counts & VK_SAMPLE_COUNT_32_BIT) != 0 { VK_SAMPLE_COUNT_32_BIT } else if (counts & VK_SAMPLE_COUNT_16_BIT) != 0 { VK_SAMPLE_COUNT_16_BIT } else if (counts & VK_SAMPLE_COUNT_8_BIT) != 0 { VK_SAMPLE_COUNT_8_BIT } else if (counts & VK_SAMPLE_COUNT_4_BIT) != 0 { VK_SAMPLE_COUNT_4_BIT } else if (counts & VK_SAMPLE_COUNT_2_BIT) != 0 { VK_SAMPLE_COUNT_2_BIT } else { VK_SAMPLE_COUNT_1_BIT } } pub fn memory_statistics(&self) -> VmaStats { self.memory_allocator.statistics() } } impl_vk_handle!(Device, VkDevice, device); impl fmt::Debug for Device { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "Device {{ device: {:#?}, physical_device: {:#?} }}", self.device, self.physical_device ) } } impl Drop for Device { fn drop(&mut self) { unsafe { self.sampler_manager .lock() .expect("failed to lock sampler manager at drop of device") .clear(self); } self.memory_allocator.destroy(); self.destroy_device(); } } impl Device { #[inline] pub fn device_proc_addr(&self, name: VkString) -> PFN_vkVoidFunction { self.physical_device .instance() .get_device_proc_addr(self.device, name) } #[inline] fn destroy_device(&self) { unsafe { self.device_functions .vkDestroyDevice(self.device, ptr::null()); } } #[inline] pub fn wait_idle(&self) -> Result<()> { let result = unsafe { self.device_functions.vkDeviceWaitIdle(self.device) }; match result { VK_SUCCESS => Ok(()), _ => Err(anyhow::Error::new(result)), } } #[inline] fn get_device_queue(&self, queue_family_index: u32, queue_index: u32) -> VkQueue { unsafe { let mut queue = MaybeUninit::uninit(); self.device_functions.vkGetDeviceQueue( self.device, queue_family_index, queue_index, queue.as_mut_ptr(), ); queue.assume_init() } } #[inline] fn device_wait_for_fences( &self, fences: &[VkFence], wait_all: impl Into, timeout: u64, ) -> Result<()> { unsafe { let result = self.device_functions.vkWaitForFences( self.device, fences.len() as u32, fences.as_ptr(), wait_all.into(), timeout, ); if result == VK_SUCCESS { Ok(()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn query_pool_results( &self, query_pool: VkQueryPool, first_query: u32, query_count: u32, data: &mut T, stride: VkDeviceSize, flags: impl Into, ) -> Result<()> { unsafe { let result = self.device_functions.vkGetQueryPoolResults( self.device, query_pool, first_query, query_count, size_of::(), data as *mut T as *mut c_void, stride, flags.into(), ); if result == VK_SUCCESS { Ok(()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn queue_submit( &self, queue: VkQueue, submits: &[VkSubmitInfo], fence: VkFence, ) -> Result<()> { unsafe { let result = self.device_functions.vkQueueSubmit( queue, submits.len() as u32, submits.as_ptr(), fence, ); if result == VK_SUCCESS { Ok(()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn queue_wait_idle(&self, queue: VkQueue) -> Result<()> { unsafe { let result = self.device_functions.vkQueueWaitIdle(queue); if result == VK_SUCCESS { Ok(()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn create_buffer(&self, create_info: &VkBufferCreateInfo) -> Result { unsafe { let mut buffer = MaybeUninit::uninit(); let result = self.device_functions.vkCreateBuffer( self.device, create_info, ptr::null(), buffer.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(buffer.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn destroy_buffer(&self, buffer: VkBuffer) { unsafe { self.device_functions .vkDestroyBuffer(self.device, buffer, ptr::null()) }; } #[inline] pub fn buffer_memory_requirements(&self, buffer: VkBuffer) -> VkMemoryRequirements { unsafe { let mut memory_requirements = MaybeUninit::uninit(); self.device_functions.vkGetBufferMemoryRequirements( self.device, buffer, memory_requirements.as_mut_ptr(), ); memory_requirements.assume_init() } } #[inline] pub fn get_buffer_device_address(&self, buffer: VkBuffer) -> Address { Address::from(unsafe { self.device_functions .vkGetBufferDeviceAddress(self.device, &VkBufferDeviceAddressInfo::new(buffer)) }) } pub(crate) fn allocator(&self) -> &Allocator { &self.memory_allocator } #[inline] pub fn allocate_memory(&self, allocate_info: &VkMemoryAllocateInfo) -> Result { unsafe { let mut memory = MaybeUninit::uninit(); let result = self.device_functions.vkAllocateMemory( self.device, allocate_info, ptr::null(), memory.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(memory.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn free_memory(&self, memory: VkDeviceMemory) { unsafe { self.device_functions .vkFreeMemory(self.device, memory, ptr::null()) }; } #[inline] pub fn unmap_memory(&self, memory: VkDeviceMemory) { unsafe { self.device_functions.vkUnmapMemory(self.device, memory) }; } #[inline] pub fn bind_buffer_memory( &self, buffer: VkBuffer, memory: VkDeviceMemory, offset: VkDeviceSize, ) -> Result<()> { unsafe { let result = self.device_functions .vkBindBufferMemory(self.device, buffer, memory, offset); if result == VK_SUCCESS { Ok(()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn create_render_pass(&self, create_info: &VkRenderPassCreateInfo) -> Result { unsafe { let mut render_pass = MaybeUninit::uninit(); let result = self.device_functions.vkCreateRenderPass( self.device, create_info, ptr::null(), render_pass.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(render_pass.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn destroy_render_pass(&self, render_pass: VkRenderPass) { unsafe { self.device_functions .vkDestroyRenderPass(self.device, render_pass, ptr::null()) }; } #[inline] pub fn create_image(&self, create_info: &VkImageCreateInfo) -> Result { unsafe { let mut image = MaybeUninit::uninit(); let result = self.device_functions.vkCreateImage( self.device, create_info, ptr::null(), image.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(image.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn destroy_image(&self, image: VkImage) { unsafe { self.device_functions .vkDestroyImage(self.device, image, ptr::null()) }; } #[inline] pub fn image_subresource_layout( &self, image: VkImage, subresource: &VkImageSubresource, ) -> VkSubresourceLayout { unsafe { let mut subresource_layout = MaybeUninit::uninit(); self.device_functions.vkGetImageSubresourceLayout( self.device, image, subresource, subresource_layout.as_mut_ptr(), ); subresource_layout.assume_init() } } #[inline] pub fn create_image_view(&self, create_info: &VkImageViewCreateInfo) -> Result { unsafe { let mut image_view = MaybeUninit::uninit(); let result = self.device_functions.vkCreateImageView( self.device, create_info, ptr::null(), image_view.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(image_view.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn destroy_image_view(&self, image_view: VkImageView) { unsafe { self.device_functions .vkDestroyImageView(self.device, image_view, ptr::null()) }; } #[inline] pub fn image_memory_requirements(&self, image: VkImage) -> VkMemoryRequirements { unsafe { let mut memory_requirements = MaybeUninit::uninit(); self.device_functions.vkGetImageMemoryRequirements( self.device, image, memory_requirements.as_mut_ptr(), ); memory_requirements.assume_init() } } #[inline] pub fn image_sparse_memory_requirements( &self, image: VkImage, ) -> Vec { let mut count: u32 = 0; unsafe { self.device_functions.vkGetImageSparseMemoryRequirements( self.device, image, &mut count, ptr::null_mut(), ) }; let mut sparse_memory_requirements = Vec::with_capacity(count as usize); unsafe { sparse_memory_requirements.set_len(count as usize) }; unsafe { self.device_functions.vkGetImageSparseMemoryRequirements( self.device, image, &mut count, sparse_memory_requirements.as_mut_ptr(), ) }; sparse_memory_requirements } #[inline] pub fn bind_image_memory( &self, image: VkImage, memory: VkDeviceMemory, offset: VkDeviceSize, ) -> Result<()> { unsafe { let result = self.device_functions .vkBindImageMemory(self.device, image, memory, offset); if result == VK_SUCCESS { Ok(()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub(crate) fn create_sampler_from_manager( &self, create_info: VkSamplerCreateInfo, ) -> Result> { self.sampler_manager .lock() .unwrap() .create_sampler(create_info, self) } #[inline] pub fn create_sampler(&self, create_info: &VkSamplerCreateInfo) -> Result { unsafe { let mut sampler = MaybeUninit::uninit(); let result = self.device_functions.vkCreateSampler( self.device, create_info, ptr::null(), sampler.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(sampler.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn destroy_sampler(&self, sampler: VkSampler) { unsafe { self.device_functions .vkDestroySampler(self.device, sampler, ptr::null()) }; } #[inline] pub fn create_buffer_view(&self, create_info: &VkBufferViewCreateInfo) -> Result { unsafe { let mut buffer_view = MaybeUninit::uninit(); let result = self.device_functions.vkCreateBufferView( self.device, create_info, ptr::null(), buffer_view.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(buffer_view.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn destroy_buffer_view(&self, buffer_view: VkBufferView) { unsafe { self.device_functions .vkDestroyBufferView(self.device, buffer_view, ptr::null()) }; } #[inline] pub fn create_fence(&self, create_info: &VkFenceCreateInfo) -> Result { unsafe { let mut fence = MaybeUninit::uninit(); let result = self.device_functions.vkCreateFence( self.device, create_info, ptr::null(), fence.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(fence.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn destroy_fence(&self, fence: VkFence) { unsafe { self.device_functions .vkDestroyFence(self.device, fence, ptr::null()) }; } #[inline] pub fn reset_fences(&self, fences: &[VkFence]) -> Result<()> { unsafe { let result = self.device_functions.vkResetFences( self.device, fences.len() as u32, fences.as_ptr(), ); if result == VK_SUCCESS { Ok(()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn create_semaphore(&self, create_info: &VkSemaphoreCreateInfo) -> Result { unsafe { let mut semaphore = MaybeUninit::uninit(); let result = self.device_functions.vkCreateSemaphore( self.device, create_info, ptr::null(), semaphore.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(semaphore.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn destroy_semaphore(&self, semaphore: VkSemaphore) { unsafe { self.device_functions .vkDestroySemaphore(self.device, semaphore, ptr::null()) }; } #[inline] pub fn create_shader_module( &self, create_info: &VkShaderModuleCreateInfo, ) -> Result { unsafe { let mut shader_module = MaybeUninit::uninit(); let result = self.device_functions.vkCreateShaderModule( self.device, create_info, ptr::null(), shader_module.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(shader_module.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn destroy_shader_module(&self, shader_module: VkShaderModule) { unsafe { self.device_functions .vkDestroyShaderModule(self.device, shader_module, ptr::null()) }; } #[inline] pub fn create_descriptor_pool( &self, create_info: &VkDescriptorPoolCreateInfo, ) -> Result { unsafe { let mut descriptor_pool = MaybeUninit::uninit(); let result = self.device_functions.vkCreateDescriptorPool( self.device, create_info, ptr::null(), descriptor_pool.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(descriptor_pool.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn destroy_descriptor_pool(&self, descriptor_pool: VkDescriptorPool) { unsafe { self.device_functions .vkDestroyDescriptorPool(self.device, descriptor_pool, ptr::null()) }; } #[inline] pub fn reset_descriptor_pool( &self, descriptor_pool: VkDescriptorPool, flags: T, ) -> Result<()> where T: Into, { unsafe { let result = self.device_functions.vkResetDescriptorPool( self.device, descriptor_pool, flags.into(), ); if result == VK_SUCCESS { Ok(()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn create_descriptor_set_layout( &self, create_info: &VkDescriptorSetLayoutCreateInfo, ) -> Result { unsafe { let mut descriptor_set_layout = MaybeUninit::uninit(); let result = self.device_functions.vkCreateDescriptorSetLayout( self.device, create_info, ptr::null(), descriptor_set_layout.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(descriptor_set_layout.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn destroy_descriptor_set_layout(&self, descriptor_set_layout: VkDescriptorSetLayout) { unsafe { self.device_functions.vkDestroyDescriptorSetLayout( self.device, descriptor_set_layout, ptr::null(), ) }; } #[inline] pub fn allocate_descriptor_sets<'a>( &self, allocate_info: &VkDescriptorSetAllocateInfo<'a>, ) -> Result> { unsafe { let count = allocate_info.descriptorSetCount as usize; let mut descriptor_sets = vec![VkDescriptorSet::NULL_HANDLE; count]; let result = self.device_functions.vkAllocateDescriptorSets( self.device, allocate_info, descriptor_sets.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(descriptor_sets) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn free_descriptor_sets( &self, descriptor_pool: VkDescriptorPool, descriptor_sets: &[VkDescriptorSet], ) -> Result<()> { unsafe { let result = self.device_functions.vkFreeDescriptorSets( self.device, descriptor_pool, descriptor_sets.len() as u32, descriptor_sets.as_ptr(), ); if result == VK_SUCCESS { Ok(()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn update_descriptor_sets( &self, writes: &[VkWriteDescriptorSet], copies: &[VkCopyDescriptorSet], ) { unsafe { self.device_functions.vkUpdateDescriptorSets( self.device, writes.len() as u32, writes.as_ptr(), copies.len() as u32, copies.as_ptr(), ); } } #[inline] pub fn create_event(&self, create_info: &VkEventCreateInfo) -> Result { unsafe { let mut event = MaybeUninit::uninit(); let result = self.device_functions.vkCreateEvent( self.device, create_info, ptr::null(), event.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(event.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn destroy_event(&self, event: VkEvent) { unsafe { self.device_functions .vkDestroyEvent(self.device, event, ptr::null()) }; } #[inline] pub fn event_status(&self, event: VkEvent) -> Result<()> { unsafe { let result = self.device_functions.vkGetEventStatus(self.device, event); if result == VK_SUCCESS { Ok(()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn set_event(&self, event: VkEvent) -> Result<()> { unsafe { let result = self.device_functions.vkSetEvent(self.device, event); if result == VK_SUCCESS { Ok(()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn reset_event(&self, event: VkEvent) -> Result<()> { unsafe { let result = self.device_functions.vkResetEvent(self.device, event); if result == VK_SUCCESS { Ok(()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn create_command_pool( &self, create_info: &VkCommandPoolCreateInfo, ) -> Result { unsafe { let mut command_pool = MaybeUninit::uninit(); let result = self.device_functions.vkCreateCommandPool( self.device, create_info, ptr::null(), command_pool.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(command_pool.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn destroy_command_pool(&self, command_pool: VkCommandPool) { unsafe { self.device_functions .vkDestroyCommandPool(self.device, command_pool, ptr::null()) }; } #[inline] pub fn reset_command_pool( &self, command_pool: VkCommandPool, flags: impl Into, ) -> Result<()> { unsafe { let result = self.device_functions .vkResetCommandPool(self.device, command_pool, flags.into()); if result == VK_SUCCESS { Ok(()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn trim_command_pool(&self, command_pool: VkCommandPool, flags: T) where T: Into, { unsafe { self.device_functions .vkTrimCommandPool(self.device, command_pool, flags.into()); } } #[inline] pub fn create_framebuffer( &self, create_info: &VkFramebufferCreateInfo, ) -> Result { unsafe { let mut framebuffer = MaybeUninit::uninit(); let result = self.device_functions.vkCreateFramebuffer( self.device, create_info, ptr::null(), framebuffer.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(framebuffer.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn destroy_framebuffer(&self, framebuffer: VkFramebuffer) { unsafe { self.device_functions .vkDestroyFramebuffer(self.device, framebuffer, ptr::null()) }; } #[inline] pub fn allocate_command_buffers( &self, allocate_info: &VkCommandBufferAllocateInfo, ) -> Result> { unsafe { let count = allocate_info.commandBufferCount as usize; let mut command_buffers = Vec::with_capacity(count); command_buffers.set_len(count); let result = self.device_functions.vkAllocateCommandBuffers( self.device, allocate_info, command_buffers.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(command_buffers) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn free_command_buffers( &self, command_pool: VkCommandPool, command_buffers: &[VkCommandBuffer], ) { unsafe { self.device_functions.vkFreeCommandBuffers( self.device, command_pool, command_buffers.len() as u32, command_buffers.as_ptr(), ) } } #[inline] pub fn create_query_pool(&self, create_info: &VkQueryPoolCreateInfo) -> Result { unsafe { let mut query_pool = MaybeUninit::uninit(); let result = self.device_functions.vkCreateQueryPool( self.device, create_info, ptr::null(), query_pool.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(query_pool.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn destroy_query_pool(&self, query_pool: VkQueryPool) { unsafe { self.device_functions .vkDestroyQueryPool(self.device, query_pool, ptr::null()) }; } #[inline] pub fn create_pipeline_cache( &self, create_info: &VkPipelineCacheCreateInfo, ) -> Result { unsafe { let mut pipeline_cache = MaybeUninit::uninit(); let result = self.device_functions.vkCreatePipelineCache( self.device, create_info, ptr::null(), pipeline_cache.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(pipeline_cache.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn destroy_pipeline_cache(&self, pipeline_cache: VkPipelineCache) { unsafe { self.device_functions .vkDestroyPipelineCache(self.device, pipeline_cache, ptr::null()) }; } #[inline] pub fn pipeline_cache_data(&self, pipeline_cache: VkPipelineCache) -> Result { let mut count = 0; let result = unsafe { self.device_functions.vkGetPipelineCacheData( self.device, pipeline_cache, &mut count, ptr::null_mut(), ) }; if result != VK_SUCCESS || count != size_of::() { return Err(anyhow::Error::new(result)); } unsafe { let mut data = MaybeUninit::::uninit(); let result = self.device_functions.vkGetPipelineCacheData( self.device, pipeline_cache, &mut count, data.as_mut_ptr() as *mut c_void, ); if result == VK_SUCCESS { Ok(data.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn merge_pipeline_cache( &self, sources: &[VkPipelineCache], destination: VkPipelineCache, ) -> Result<()> { unsafe { let result = self.device_functions.vkMergePipelineCaches( self.device, destination, sources.len() as u32, sources.as_ptr(), ); if result == VK_SUCCESS { Ok(()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn create_pipeline_layout( &self, create_info: &VkPipelineLayoutCreateInfo, ) -> Result { unsafe { let mut pipeline_layout = MaybeUninit::uninit(); let result = self.device_functions.vkCreatePipelineLayout( self.device, create_info, ptr::null(), pipeline_layout.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(pipeline_layout.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn destroy_pipeline_layout(&self, pipeline_layout: VkPipelineLayout) { unsafe { self.device_functions .vkDestroyPipelineLayout(self.device, pipeline_layout, ptr::null()) }; } #[inline] pub fn create_graphics_pipelines( &self, pipeline_cache: Option, create_infos: &[VkGraphicsPipelineCreateInfo], ) -> Result> { unsafe { let count = create_infos.len() as usize; let mut pipelines = Vec::with_capacity(count); pipelines.set_len(count); let result = self.device_functions.vkCreateGraphicsPipelines( self.device, match pipeline_cache { Some(cache) => cache, None => VkPipelineCache::NULL_HANDLE, }, create_infos.len() as u32, create_infos.as_ptr(), ptr::null(), pipelines.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(pipelines) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn create_compute_pipelines( &self, pipeline_cache: Option, create_infos: &[VkComputePipelineCreateInfo], ) -> Result> { unsafe { let count = create_infos.len() as usize; let mut pipelines = Vec::with_capacity(count); pipelines.set_len(count); let result = self.device_functions.vkCreateComputePipelines( self.device, match pipeline_cache { Some(cache) => cache, None => VkPipelineCache::NULL_HANDLE, }, create_infos.len() as u32, create_infos.as_ptr(), ptr::null(), pipelines.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(pipelines) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn destroy_pipeline(&self, pipeline: VkPipeline) { unsafe { self.device_functions .vkDestroyPipeline(self.device, pipeline, ptr::null()) }; } #[inline] pub fn queue_present( &self, queue: VkQueue, present_info: &VkPresentInfoKHR, ) -> Result> { unsafe { let result = self .device_wsi_functions .vkQueuePresentKHR(queue, present_info); if result == VK_SUCCESS { Ok(OutOfDate::Ok(())) } else if result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR { Ok(OutOfDate::OutOfDate) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn create_swapchain( &self, create_info: &VkSwapchainCreateInfoKHR, ) -> Result { unsafe { let mut swapchain = MaybeUninit::uninit(); let result = self.device_wsi_functions.vkCreateSwapchainKHR( self.device, create_info, ptr::null(), swapchain.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(swapchain.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn destroy_swapchain(&self, swapchain: VkSwapchainKHR) { unsafe { self.device_wsi_functions .vkDestroySwapchainKHR(self.device, swapchain, ptr::null()) }; } #[inline] pub fn swapchain_images(&self, swapchain: VkSwapchainKHR) -> Result> { let mut count = 0; let result = unsafe { self.device_wsi_functions.vkGetSwapchainImagesKHR( self.device, swapchain, &mut count, ptr::null_mut(), ) }; if result != VK_SUCCESS { return Err(anyhow::Error::new(result)); } let mut images = Vec::with_capacity(count as usize); unsafe { images.set_len(count as usize) }; let result = unsafe { self.device_wsi_functions.vkGetSwapchainImagesKHR( self.device, swapchain, &mut count, images.as_mut_ptr(), ) }; if result == VK_SUCCESS { Ok(images) } else { Err(anyhow::Error::new(result)) } } #[inline] pub fn acquire_next_image( &self, swapchain: VkSwapchainKHR, timeout: u64, semaphore: Option, fence: Option, ) -> Result> { unsafe { let mut image_index = 0; let result = self.device_wsi_functions.vkAcquireNextImageKHR( self.device, swapchain, timeout, match semaphore { Some(sem) => sem, None => VkSemaphore::NULL_HANDLE, }, match fence { Some(fence) => fence, None => VkFence::NULL_HANDLE, }, &mut image_index, ); match result { VK_SUCCESS => Ok(OutOfDate::Ok(image_index)), VK_ERROR_OUT_OF_DATE_KHR | VK_SUBOPTIMAL_KHR => Ok(OutOfDate::OutOfDate), VK_TIMEOUT | VK_NOT_READY => Ok(OutOfDate::TimeOut), _ => Err(anyhow::Error::new(result)), } } } } // command buffer functions impl Device { #[inline] pub fn begin_command_buffer( &self, command_buffer: VkCommandBuffer, begin_info: &VkCommandBufferBeginInfo, ) -> Result<()> { unsafe { if std::mem::transmute::( self.device_functions.vkBeginCommandBuffer, ) == ptr::null() { return Err(anyhow::anyhow!("vkBeginCommandBuffer is null")); } let result = self .device_functions .vkBeginCommandBuffer(command_buffer, begin_info); if result == VK_SUCCESS { Ok(()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn end_command_buffer(&self, command_buffer: VkCommandBuffer) -> Result<()> { unsafe { let result = self.device_functions.vkEndCommandBuffer(command_buffer); if result == VK_SUCCESS { Ok(()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn reset_command_buffer( &self, command_buffer: VkCommandBuffer, flags: impl Into, ) -> Result<()> { unsafe { let result = self .device_functions .vkResetCommandBuffer(command_buffer, flags.into()); if result == VK_SUCCESS { Ok(()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn cmd_bind_pipeline( &self, command_buffer: VkCommandBuffer, pipeline_bind_point: VkPipelineBindPoint, pipeline: VkPipeline, ) { unsafe { self.device_functions .vkCmdBindPipeline(command_buffer, pipeline_bind_point, pipeline); } } #[inline] pub fn cmd_resolve_image( &self, command_buffer: VkCommandBuffer, src_image: VkImage, src_image_layout: VkImageLayout, dst_image: VkImage, dst_image_layout: VkImageLayout, regions: &[VkImageResolve], ) { unsafe { self.device_functions.vkCmdResolveImage( command_buffer, src_image, src_image_layout, dst_image, dst_image_layout, regions.len() as u32, regions.as_ptr(), ) } } #[inline] pub fn cmd_set_viewport( &self, command_buffer: VkCommandBuffer, first: u32, viewports: &[VkViewport], ) { unsafe { self.device_functions.vkCmdSetViewport( command_buffer, first, viewports.len() as u32, viewports.as_ptr(), ) } } #[inline] pub fn cmd_set_scissor( &self, command_buffer: VkCommandBuffer, first: u32, scissors: &[VkRect2D], ) { unsafe { self.device_functions.vkCmdSetScissor( command_buffer, first, scissors.len() as u32, scissors.as_ptr(), ) } } #[inline] pub fn cmd_set_depth_bias( &self, command_buffer: VkCommandBuffer, depth_bias_constant_factor: f32, depth_bias_clamp: f32, depth_bias_slope_factor: f32, ) { unsafe { self.device_functions.vkCmdSetDepthBias( command_buffer, depth_bias_constant_factor, depth_bias_clamp, depth_bias_slope_factor, ) } } #[inline] pub fn cmd_bind_descriptor_sets( &self, command_buffer: VkCommandBuffer, pipeline_bind_point: VkPipelineBindPoint, pipeline_layout: VkPipelineLayout, first_set: u32, descriptor_sets: &[VkDescriptorSet], dynamic_offsets: &[u32], ) { unsafe { self.device_functions.vkCmdBindDescriptorSets( command_buffer, pipeline_bind_point, pipeline_layout, first_set, descriptor_sets.len() as u32, descriptor_sets.as_ptr(), dynamic_offsets.len() as u32, dynamic_offsets.as_ptr(), ) } } #[inline] pub fn cmd_bind_index_buffer( &self, command_buffer: VkCommandBuffer, buffer: VkBuffer, offset: VkDeviceSize, index_type: VkIndexType, ) { unsafe { self.device_functions .vkCmdBindIndexBuffer(command_buffer, buffer, offset, index_type) } } #[inline] pub fn cmd_bind_vertex_buffers( &self, command_buffer: VkCommandBuffer, first_binding: u32, buffers: &[VkBuffer], offsets: &[VkDeviceSize], ) { // sanity check debug_assert!(buffers.len() == offsets.len()); unsafe { self.device_functions.vkCmdBindVertexBuffers( command_buffer, first_binding, buffers.len() as u32, buffers.as_ptr(), offsets.as_ptr(), ) } } #[inline] pub fn cmd_draw( &self, command_buffer: VkCommandBuffer, vertex_count: u32, instance_count: u32, first_vertex: u32, first_instance: u32, ) { unsafe { self.device_functions.vkCmdDraw( command_buffer, vertex_count, instance_count, first_vertex, first_instance, ) } } #[inline] pub fn cmd_draw_indexed( &self, command_buffer: VkCommandBuffer, index_count: u32, instance_count: u32, first_index: u32, vertex_offset: i32, first_instance: u32, ) { unsafe { self.device_functions.vkCmdDrawIndexed( command_buffer, index_count, instance_count, first_index, vertex_offset, first_instance, ); } } #[inline] pub fn cmd_dispatch(&self, command_buffer: VkCommandBuffer, x: u32, y: u32, z: u32) { unsafe { self.device_functions.vkCmdDispatch(command_buffer, x, y, z) } } #[inline] pub fn cmd_begin_render_pass( &self, command_buffer: VkCommandBuffer, render_pass_begin: &VkRenderPassBeginInfo, contents: VkSubpassContents, ) { unsafe { self.device_functions .vkCmdBeginRenderPass(command_buffer, render_pass_begin, contents) } } #[inline] pub fn cmd_next_subpass(&self, command_buffer: VkCommandBuffer, contents: VkSubpassContents) { unsafe { self.device_functions .vkCmdNextSubpass(command_buffer, contents) } } #[inline] pub fn cmd_end_render_pass(&self, command_buffer: VkCommandBuffer) { unsafe { self.device_functions.vkCmdEndRenderPass(command_buffer) } } #[inline] pub fn cmd_execute_commands( &self, command_buffer: VkCommandBuffer, command_buffers: &[VkCommandBuffer], ) { unsafe { self.device_functions.vkCmdExecuteCommands( command_buffer, command_buffers.len() as u32, command_buffers.as_ptr(), ) } } #[inline] pub fn cmd_pipeline_barrier( &self, command_buffer: VkCommandBuffer, src_stage_mask: impl Into, dst_stage_mask: impl Into, dependency_flags: impl Into, memory_barriers: &[VkMemoryBarrier], buffer_memory_barriers: &[VkBufferMemoryBarrier], image_memory_barriers: &[VkImageMemoryBarrier], ) { unsafe { self.device_functions.vkCmdPipelineBarrier( command_buffer, src_stage_mask.into(), dst_stage_mask.into(), dependency_flags.into(), memory_barriers.len() as u32, memory_barriers.as_ptr(), buffer_memory_barriers.len() as u32, buffer_memory_barriers.as_ptr(), image_memory_barriers.len() as u32, image_memory_barriers.as_ptr(), ) } } #[inline] pub fn cmd_copy_buffer( &self, command_buffer: VkCommandBuffer, src_buffer: VkBuffer, dst_buffer: VkBuffer, regions: &[VkBufferCopy], ) { unsafe { self.device_functions.vkCmdCopyBuffer( command_buffer, src_buffer, dst_buffer, regions.len() as u32, regions.as_ptr(), ) } } #[inline] pub fn cmd_copy_image( &self, command_buffer: VkCommandBuffer, src_image: VkImage, src_image_layout: VkImageLayout, dst_image: VkImage, dst_image_layout: VkImageLayout, regions: &[VkImageCopy], ) { unsafe { self.device_functions.vkCmdCopyImage( command_buffer, src_image, src_image_layout, dst_image, dst_image_layout, regions.len() as u32, regions.as_ptr(), ) } } #[inline] pub fn cmd_blit_image( &self, command_buffer: VkCommandBuffer, src_image: VkImage, src_image_layout: VkImageLayout, dst_image: VkImage, dst_image_layout: VkImageLayout, regions: &[VkImageBlit], filter: VkFilter, ) { unsafe { self.device_functions.vkCmdBlitImage( command_buffer, src_image, src_image_layout, dst_image, dst_image_layout, regions.len() as u32, regions.as_ptr(), filter, ) } } #[inline] pub fn cmd_copy_buffer_to_image( &self, command_buffer: VkCommandBuffer, src_buffer: VkBuffer, dst_image: VkImage, dst_image_layout: VkImageLayout, regions: &[VkBufferImageCopy], ) { unsafe { self.device_functions.vkCmdCopyBufferToImage( command_buffer, src_buffer, dst_image, dst_image_layout, regions.len() as u32, regions.as_ptr(), ) } } #[inline] pub fn cmd_copy_image_to_buffer( &self, command_buffer: VkCommandBuffer, src_image: VkImage, src_image_layout: VkImageLayout, dst_buffer: VkBuffer, regions: &[VkBufferImageCopy], ) { unsafe { self.device_functions.vkCmdCopyImageToBuffer( command_buffer, src_image, src_image_layout, dst_buffer, regions.len() as u32, regions.as_ptr(), ) } } #[inline] pub fn cmd_push_constants( &self, command_buffer: VkCommandBuffer, pipeline_layout: VkPipelineLayout, stage_flags: impl Into, offset: u32, data: &T, ) { unsafe { self.device_functions.vkCmdPushConstants( command_buffer, pipeline_layout, stage_flags.into(), offset, size_of::() as u32, data as *const T as *const c_void, ) } } #[inline] pub fn cmd_begin_query( &self, command_buffer: VkCommandBuffer, query_pool: VkQueryPool, query: u32, flags: impl Into, ) { unsafe { self.device_functions .vkCmdBeginQuery(command_buffer, query_pool, query, flags.into()) } } #[inline] pub fn cmd_end_query( &self, command_buffer: VkCommandBuffer, query_pool: VkQueryPool, query: u32, ) { unsafe { self.device_functions .vkCmdEndQuery(command_buffer, query_pool, query) } } #[inline] pub fn cmd_reset_query_pool( &self, command_buffer: VkCommandBuffer, query_pool: VkQueryPool, first_query: u32, query_count: u32, ) { unsafe { self.device_functions.vkCmdResetQueryPool( command_buffer, query_pool, first_query, query_count, ) } } #[inline] pub fn cmd_write_timestamp( &self, command_buffer: VkCommandBuffer, pipeline_stage: impl Into, query_pool: VkQueryPool, query: u32, ) { unsafe { self.device_functions.vkCmdWriteTimestamp( command_buffer, pipeline_stage.into(), query_pool, query, ) } } #[inline] pub fn cmd_clear_color_image( &self, command_buffer: VkCommandBuffer, image: VkImage, image_layout: VkImageLayout, clear_color: VkClearColorValue, ranges: &[VkImageSubresourceRange], ) { unsafe { self.device_functions.vkCmdClearColorImage( command_buffer, image, image_layout, &clear_color, ranges.len() as u32, ranges.as_ptr(), ) } } #[inline] pub fn descriptor_set_layout_support( &self, create_info: &VkDescriptorSetLayoutCreateInfo, support: &mut VkDescriptorSetLayoutSupport, ) { unsafe { self.maintenance3_functions.vkGetDescriptorSetLayoutSupport( self.device, create_info, support, ); } } } // khr ray tracing pipeline & acceleration structure impl Device { #[inline] pub fn build_acceleration_structures( &self, deferred_operation: Option, infos: &[VkAccelerationStructureBuildGeometryInfoKHR], range_infos: &[&VkAccelerationStructureBuildRangeInfoKHR], ) -> Result<()> { debug_assert_eq!( self.physical_device .acceleration_structure_features() .accelerationStructure, VK_TRUE ); let result = unsafe { self._acceleration_structure_functions .vkBuildAccelerationStructuresKHR( self.device, match deferred_operation { Some(deferred_operation) => deferred_operation, None => VkDeferredOperationKHR::NULL_HANDLE, }, infos.len() as u32, infos.as_ptr(), range_infos.as_ptr() as *const *const _, ) }; if result == VK_SUCCESS { Ok(()) } else { Err(anyhow::Error::new(result)) } } #[inline] pub fn cmd_build_acceleration_structure_indirect( &self, command_buffer: VkCommandBuffer, infos: &[VkAccelerationStructureBuildGeometryInfoKHR], device_addresses: &[VkDeviceAddress], strides: &[u32], max_primitive_counts: &[&u32], ) { debug_assert_eq!( self.physical_device .acceleration_structure_features() .accelerationStructure, VK_TRUE ); let count = infos.len(); debug_assert_eq!(infos.len(), count); debug_assert_eq!(device_addresses.len(), count); debug_assert_eq!(strides.len(), count); debug_assert_eq!(max_primitive_counts.len(), count); unsafe { self._acceleration_structure_functions .vkCmdBuildAccelerationStructuresIndirectKHR( command_buffer, count as u32, infos.as_ptr(), device_addresses.as_ptr(), strides.as_ptr(), max_primitive_counts.as_ptr() as *const *const u32, ) } } #[inline] pub fn cmd_build_acceleration_structures( &self, command_buffer: VkCommandBuffer, infos: &[VkAccelerationStructureBuildGeometryInfoKHR], range_infos: &[&[VkAccelerationStructureBuildRangeInfoKHR]], ) { debug_assert_eq!( self.physical_device .acceleration_structure_features() .accelerationStructure, VK_TRUE ); let range_info_ptr = range_infos .iter() .map(|slice| slice.as_ptr()) .collect::>(); unsafe { self._acceleration_structure_functions .vkCmdBuildAccelerationStructuresKHR( command_buffer, infos.len() as u32, infos.as_ptr(), range_info_ptr.as_ptr(), ) } } #[inline] pub fn cmd_copy_acceleration_structure( &self, command_buffer: VkCommandBuffer, info: &VkCopyAccelerationStructureInfoKHR, ) { debug_assert_eq!( self.physical_device .acceleration_structure_features() .accelerationStructure, VK_TRUE ); unsafe { self._acceleration_structure_functions .vkCmdCopyAccelerationStructureKHR(command_buffer, info) } } #[inline] pub fn cmd_copy_acceleration_structure_to_memory( &self, command_buffer: VkCommandBuffer, info: &VkCopyAccelerationStructureToMemoryInfoKHR, ) { debug_assert_eq!( self.physical_device .acceleration_structure_features() .accelerationStructure, VK_TRUE ); unsafe { self._acceleration_structure_functions .vkCmdCopyAccelerationStructureToMemoryKHR(command_buffer, info) } } #[inline] pub fn cmd_copy_memory_to_acceleration_structure( &self, command_buffer: VkCommandBuffer, info: &VkCopyMemoryToAccelerationStructureInfoKHR, ) { debug_assert_eq!( self.physical_device .acceleration_structure_features() .accelerationStructure, VK_TRUE ); unsafe { self._acceleration_structure_functions .vkCmdCopyMemoryToAccelerationStructureKHR(command_buffer, info) } } #[inline] pub fn cmd_trace_rays_indirect( &self, command_buffer: VkCommandBuffer, raygen_shader_binding_table: &VkStridedDeviceAddressRegionKHR, miss_shader_binding_table: &VkStridedDeviceAddressRegionKHR, hit_shader_binding_table: &VkStridedDeviceAddressRegionKHR, callable_shader_binding_table: &VkStridedDeviceAddressRegionKHR, device_address: VkDeviceAddress, ) { debug_assert_eq!( self.physical_device .ray_tracing_features() .rayTracingPipeline, VK_TRUE ); unsafe { self._ray_tracing_pipeline_functions .vkCmdTraceRaysIndirectKHR( command_buffer, raygen_shader_binding_table, miss_shader_binding_table, hit_shader_binding_table, callable_shader_binding_table, device_address, ) } } #[inline] pub fn cmd_trace_rays( &self, command_buffer: VkCommandBuffer, raygen_shader_binding_table: &VkStridedDeviceAddressRegionKHR, miss_shader_binding_table: &VkStridedDeviceAddressRegionKHR, hit_shader_binding_table: &VkStridedDeviceAddressRegionKHR, callable_shader_binding_table: &VkStridedDeviceAddressRegionKHR, width: u32, height: u32, depth: u32, ) { debug_assert_eq!( self.physical_device .ray_tracing_features() .rayTracingPipeline, VK_TRUE ); unsafe { self._ray_tracing_pipeline_functions.vkCmdTraceRaysKHR( command_buffer, raygen_shader_binding_table, miss_shader_binding_table, hit_shader_binding_table, callable_shader_binding_table, width, height, depth, ) } } #[inline] pub fn cmd_write_acceleration_structure_properties( &self, command_buffer: VkCommandBuffer, acceleration_structures: &[VkAccelerationStructureKHR], query_type: VkQueryType, query_pool: VkQueryPool, first_query: u32, ) { debug_assert_eq!( self.physical_device .acceleration_structure_features() .accelerationStructure, VK_TRUE ); unsafe { self._acceleration_structure_functions .vkCmdWriteAccelerationStructuresPropertiesKHR( command_buffer, acceleration_structures.len() as u32, acceleration_structures.as_ptr(), query_type, query_pool, first_query, ) } } #[inline] pub fn create_acceleration_structure( &self, create_info: &VkAccelerationStructureCreateInfoKHR, allocator: Option<&VkAllocationCallbacks>, ) -> Result { debug_assert_eq!( self.physical_device .acceleration_structure_features() .accelerationStructure, VK_TRUE ); unsafe { let mut handle = MaybeUninit::uninit(); let result = self ._acceleration_structure_functions .vkCreateAccelerationStructureKHR( self.device, create_info, match allocator { Some(alloc) => alloc, None => ptr::null(), }, handle.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(handle.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn create_ray_tracing_pipelines( &self, deferred_operation: Option, pipeline_cache: Option, pipeline_create_infos: &[VkRayTracingPipelineCreateInfoKHR], allocator: Option<&VkAllocationCallbacks>, ) -> Result> { debug_assert_eq!( self.physical_device .ray_tracing_features() .rayTracingPipeline, VK_TRUE ); unsafe { let count = pipeline_create_infos.len() as usize; let mut pipelines = Vec::with_capacity(count); pipelines.set_len(count); let result = self ._ray_tracing_pipeline_functions .vkCreateRayTracingPipelinesKHR( self.device, match deferred_operation { Some(deferred_operation) => deferred_operation, None => VkDeferredOperationKHR::NULL_HANDLE, }, match pipeline_cache { Some(cache) => cache, None => VkPipelineCache::NULL_HANDLE, }, pipeline_create_infos.len() as u32, pipeline_create_infos.as_ptr(), match allocator { Some(alloc) => alloc, None => ptr::null(), }, pipelines.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(pipelines) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn copy_acceleration_structure( &self, deferred_operation: Option, info: &VkCopyAccelerationStructureInfoKHR, ) -> Result<()> { debug_assert_eq!( self.physical_device .acceleration_structure_features() .accelerationStructure, VK_TRUE ); let result = unsafe { self._acceleration_structure_functions .vkCopyAccelerationStructureKHR( self.device, match deferred_operation { Some(deferred_operation) => deferred_operation, None => VkDeferredOperationKHR::NULL_HANDLE, }, info, ) }; if result == VK_SUCCESS { Ok(()) } else { Err(anyhow::Error::new(result)) } } #[inline] pub fn copy_acceleration_structure_to_memory( &self, deferred_operation: Option, info: &VkCopyAccelerationStructureToMemoryInfoKHR, ) -> Result<()> { debug_assert_eq!( self.physical_device .acceleration_structure_features() .accelerationStructure, VK_TRUE ); let result = unsafe { self._acceleration_structure_functions .vkCopyAccelerationStructureToMemoryKHR( self.device, match deferred_operation { Some(deferred_operation) => deferred_operation, None => VkDeferredOperationKHR::NULL_HANDLE, }, info, ) }; if result == VK_SUCCESS { Ok(()) } else { Err(anyhow::Error::new(result)) } } #[inline] pub fn destroy_acceleration_structure( &self, acceleration_structure: VkAccelerationStructureKHR, allocator: Option<&VkAllocationCallbacks>, ) { debug_assert_eq!( self.physical_device .acceleration_structure_features() .accelerationStructure, VK_TRUE ); unsafe { self._acceleration_structure_functions .vkDestroyAccelerationStructureKHR( self.device, acceleration_structure, match allocator { Some(alloc) => alloc, None => ptr::null(), }, ) } } #[inline] pub fn get_acceleration_structure_device_address( &self, info: &VkAccelerationStructureDeviceAddressInfoKHR, ) -> VkDeviceAddress { debug_assert_eq!( self.physical_device .acceleration_structure_features() .accelerationStructure, VK_TRUE ); unsafe { self._acceleration_structure_functions .vkGetAccelerationStructureDeviceAddressKHR(self.device, info) } } #[inline] pub fn get_device_acceleration_structure_compatibility( &self, version: &VkAccelerationStructureVersionInfoKHR, ) -> VkAccelerationStructureCompatibilityKHR { debug_assert_eq!( self.physical_device .acceleration_structure_features() .accelerationStructure, VK_TRUE ); unsafe { let mut compatibility = MaybeUninit::zeroed(); self._acceleration_structure_functions .vkGetDeviceAccelerationStructureCompatibilityKHR( self.device, version, compatibility.as_mut_ptr(), ); compatibility.assume_init() } } #[inline] pub fn get_ray_tracing_capture_replay_shader_group_handles( &self, pipeline: VkPipeline, first_group: u32, group_count: u32, data: &mut [T], ) -> Result<()> { debug_assert_eq!( self.physical_device .ray_tracing_features() .rayTracingPipeline, VK_TRUE ); let result = unsafe { self._ray_tracing_pipeline_functions .vkGetRayTracingCaptureReplayShaderGroupHandlesKHR( self.device, pipeline, first_group, group_count, (data.len() * size_of::()) as isize, data.as_mut_ptr() as *mut c_void, ) }; if result == VK_SUCCESS { Ok(()) } else { Err(anyhow::Error::new(result)) } } pub fn get_acceleration_structure_build_sizes( &self, build_type: VkAccelerationStructureBuildTypeKHR, build_info: &VkAccelerationStructureBuildGeometryInfoKHR, max_primitive_counts: &u32, ) -> VkAccelerationStructureBuildSizesInfoKHR { debug_assert_eq!( self.physical_device .acceleration_structure_features() .accelerationStructure, VK_TRUE ); unsafe { let mut res = VkAccelerationStructureBuildSizesInfoKHR::new(0, 0, 0); self._acceleration_structure_functions .vkGetAccelerationStructureBuildSizesKHR( self.device, build_type, build_info, max_primitive_counts, &mut res, ); res } } pub fn get_ray_tracing_shader_group_handles( &self, pipeline: VkPipeline, first_group: u32, group_count: u32, shader_group_handle_size: u32, ) -> Result> { debug_assert_eq!( self.physical_device .ray_tracing_features() .rayTracingPipeline, VK_TRUE ); unsafe { let mut data = vec![255; (group_count * shader_group_handle_size) as usize]; let result = self ._ray_tracing_pipeline_functions .vkGetRayTracingShaderGroupHandlesKHR( self.device, pipeline, first_group, group_count, data.len() as isize, data.as_mut_ptr() as *mut c_void, ); if result == VK_SUCCESS { Ok(data) } else { Err(anyhow::Error::new(result)) } } } } // deferred operations impl Device { #[inline] pub fn create_deferred_operation( &self, allocator: Option<&VkAllocationCallbacks>, ) -> Result { unsafe { let mut handle = MaybeUninit::uninit(); let result = self .deferred_operation_functions .vkCreateDeferredOperationKHR( self.device, match allocator { Some(alloc) => alloc, None => ptr::null(), }, handle.as_mut_ptr(), ); if result == VK_SUCCESS { Ok(handle.assume_init()) } else { Err(anyhow::Error::new(result)) } } } #[inline] pub fn destroy_deferred_operation( &self, deferred_operation: VkDeferredOperationKHR, allocator: Option<&VkAllocationCallbacks>, ) { unsafe { self.deferred_operation_functions .vkDestroyDeferredOperationKHR( self.device, deferred_operation, match allocator { Some(alloc) => alloc, None => ptr::null(), }, ) } } #[inline] pub fn get_deferred_operation_max_concurrency( &self, deferred_operation: VkDeferredOperationKHR, ) -> u32 { unsafe { self.deferred_operation_functions .vkGetDeferredOperationMaxConcurrencyKHR(self.device, deferred_operation) } } #[inline] pub fn get_deferred_operation_result( &self, deferred_operation: VkDeferredOperationKHR, ) -> VkResult { unsafe { self.deferred_operation_functions .vkGetDeferredOperationResultKHR(self.device, deferred_operation) } } #[inline] pub fn deferred_operation_join(&self, deferred_operation: VkDeferredOperationKHR) -> VkResult { unsafe { self.deferred_operation_functions .vkDeferredOperationJoinKHR(self.device, deferred_operation) } } }