vulkan_lib/vulkan-rs/src/commandbuffer.rs

1105 lines
31 KiB
Rust

use super::{
commandpool::{CommandPool, CommandPoolBuilder},
pipeline::PipelineType,
};
use crate::prelude::*;
use anyhow::Result;
use std::any::Any;
use std::sync::{
atomic::{AtomicUsize, Ordering::SeqCst},
Arc, Mutex, MutexGuard,
};
pub struct QueryEnable {
pub query_flags: VkQueryControlFlagBits,
pub pipeline_statistics: VkQueryPipelineStatisticFlagBits,
}
pub struct CommandBufferBuilder {
buffer_level: VkCommandBufferLevel,
pool_builder: CommandPoolBuilder,
}
impl CommandBufferBuilder {
pub fn set_flags(mut self, flags: impl Into<VkCommandPoolCreateFlagBits>) -> Self {
self.pool_builder = self.pool_builder.set_flags(flags);
self
}
pub fn build(
self,
device: Arc<Device>,
queue: Arc<Mutex<Queue>>,
) -> Result<Arc<CommandBuffer>> {
let command_pool = self
.pool_builder
.set_queue_family_index(
queue
.lock()
.map_err(|_| anyhow::Error::msg("Failed locking vulkan queue"))?
.family_index(),
)
.build(device.clone())?;
let command_buffer_ci =
VkCommandBufferAllocateInfo::new(command_pool.vk_handle(), self.buffer_level, 1);
let command_buffer = device.allocate_command_buffers(&command_buffer_ci)?[0];
Ok(Arc::new(CommandBuffer {
device,
pool: command_pool,
buffer: command_buffer,
calls: Arc::new(AtomicUsize::new(0)),
stored_handles: Mutex::new(Vec::new()),
}))
}
}
#[derive(Debug)]
pub struct CommandBuffer {
device: Arc<Device>,
pool: Arc<CommandPool>,
buffer: VkCommandBuffer,
calls: Arc<AtomicUsize>,
stored_handles: Mutex<Vec<Arc<dyn Any + Send + Sync>>>,
}
#[derive(Debug)]
pub struct CommandBufferRecorder<'a> {
device: Arc<Device>,
sub_pass: u32,
pipeline: Option<Arc<Pipeline>>,
calls: Arc<AtomicUsize>,
buffer: VkCommandBuffer,
handles_lock: MutexGuard<'a, Vec<Arc<dyn Any + Send + Sync>>>,
}
impl_vk_handle!(CommandBuffer, VkCommandBuffer, buffer);
impl CommandBuffer {
pub fn new_primary() -> CommandBufferBuilder {
CommandBufferBuilder {
buffer_level: VK_COMMAND_BUFFER_LEVEL_PRIMARY,
pool_builder: CommandPool::builder(),
}
}
pub fn new_secondary() -> CommandBufferBuilder {
CommandBufferBuilder {
buffer_level: VK_COMMAND_BUFFER_LEVEL_SECONDARY,
pool_builder: CommandPool::builder(),
}
}
pub fn reset(&self, flags: impl Into<VkCommandBufferResetFlagBits>) -> Result<()> {
self.device.reset_command_buffer(self.buffer, flags)
}
pub fn calls(&self) -> usize {
self.calls.load(SeqCst)
}
pub fn begin(&self, begin_info: VkCommandBufferBeginInfo) -> Result<CommandBufferRecorder<'_>> {
self.device.begin_command_buffer(self.buffer, &begin_info)?;
let mut handles_lock = self.stored_handles.lock().unwrap();
handles_lock.clear();
self.calls.store(0, SeqCst);
Ok(CommandBufferRecorder {
device: self.device.clone(),
sub_pass: 0,
pipeline: None,
calls: self.calls.clone(),
buffer: self.buffer,
handles_lock,
})
}
pub fn access_to_stage(access_mask: impl Into<VkAccessFlagBits>) -> VkPipelineStageFlags {
let access_mask = access_mask.into();
if access_mask == 0 {
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
} else if access_mask == VK_ACCESS_HOST_WRITE_BIT {
VK_PIPELINE_STAGE_HOST_BIT
} else if access_mask == VK_ACCESS_TRANSFER_WRITE_BIT
|| access_mask == VK_ACCESS_TRANSFER_READ_BIT
{
VK_PIPELINE_STAGE_TRANSFER_BIT
} else if access_mask == VK_ACCESS_SHADER_READ_BIT {
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT
} else if access_mask == VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
|| access_mask == VK_ACCESS_COLOR_ATTACHMENT_READ_BIT
|| access_mask
== VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_COLOR_ATTACHMENT_READ_BIT
{
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT
} else if access_mask == VK_ACCESS_MEMORY_READ_BIT {
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT
} else if access_mask == VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT {
VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT
} else if access_mask
== VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
| VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT
{
VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT
} else {
unimplemented!("access mask not supported {:?}", access_mask)
}
}
pub fn inheritance_info(
render_pass: Option<&Arc<RenderPass>>,
sub_pass: Option<u32>,
framebuffer: Option<&Arc<Framebuffer>>,
query_enable: Option<QueryEnable>,
) -> VkCommandBufferInheritanceInfo {
let mut info = VkCommandBufferInheritanceInfo::new(
match render_pass {
Some(render_pass) => render_pass.vk_handle(),
None => VkRenderPass::NULL_HANDLE,
},
sub_pass.unwrap_or(0),
match framebuffer {
Some(framebuffer) => framebuffer.vk_handle(),
None => VkFramebuffer::NULL_HANDLE,
},
);
if let Some(query) = query_enable {
info.set_query(true, query.query_flags, query.pipeline_statistics);
}
info
}
}
impl<'a> CommandBufferRecorder<'a> {
pub fn pipeline_barrier(
&self,
src_stage_mask: impl Into<VkPipelineStageFlagBits>,
dst_stage_mask: impl Into<VkPipelineStageFlagBits>,
dependency_flags: impl Into<VkDependencyFlagBits>,
memory_barriers: &[VkMemoryBarrier],
buffer_memory_barriers: &[VkBufferMemoryBarrier],
image_memory_barriers: &[VkImageMemoryBarrier],
) {
self.calls.fetch_add(1, SeqCst);
self.device.cmd_pipeline_barrier(
self.buffer,
src_stage_mask,
dst_stage_mask,
dependency_flags,
memory_barriers,
buffer_memory_barriers,
image_memory_barriers,
)
}
pub fn memory_barrier(
&self,
src_access_mask: impl Into<VkAccessFlagBits>,
src_stage: VkPipelineStageFlags,
dst_access_mask: impl Into<VkAccessFlagBits>,
dst_stage: VkPipelineStageFlags,
) {
self.pipeline_barrier(
src_stage,
dst_stage,
0,
&[VkMemoryBarrier::new(src_access_mask, dst_access_mask)],
&[],
&[],
);
}
pub fn buffer_barrier<T: ReprC + Send + Sync + 'static>(
&mut self,
buffer: &Arc<Buffer<T>>,
src_access_mask: impl Into<VkAccessFlagBits>,
src_stage: impl Into<VkPipelineStageFlagBits>,
dst_access_mask: impl Into<VkAccessFlagBits>,
dst_stage: impl Into<VkPipelineStageFlagBits>,
) {
self.handles_lock.push(buffer.clone());
self.pipeline_barrier(
src_stage,
dst_stage,
0,
&[],
&[VkBufferMemoryBarrier::new(
src_access_mask,
dst_access_mask,
VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED,
buffer.vk_handle(),
0,
buffer.byte_size(),
)],
&[],
);
}
pub fn image_barrier(
&mut self,
image: &Arc<Image>,
old_image_layout: VkImageLayout,
src_stage: impl Into<VkPipelineStageFlagBits>,
new_image_layout: VkImageLayout,
dst_stage: impl Into<VkPipelineStageFlagBits>,
) {
let src_access_mask = Image::src_layout_to_access(old_image_layout);
let dst_access_mask = Image::dst_layout_to_access(new_image_layout);
self.handles_lock.push(image.clone());
self.pipeline_barrier(
src_stage,
dst_stage,
0,
&[],
&[],
&[VkImageMemoryBarrier::new(
src_access_mask,
dst_access_mask,
old_image_layout,
new_image_layout,
VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED,
image.vk_handle(),
image.full_resource_range(),
)],
);
image.set_image_layout(new_image_layout);
}
pub fn image_barrier_auto_stage(
&mut self,
image: &Arc<Image>,
old_image_layout: VkImageLayout,
new_image_layout: VkImageLayout,
) {
let src_access_mask = Image::src_layout_to_access(old_image_layout);
let dst_access_mask = Image::dst_layout_to_access(new_image_layout);
self.handles_lock.push(image.clone());
self.pipeline_barrier(
CommandBuffer::access_to_stage(src_access_mask),
CommandBuffer::access_to_stage(dst_access_mask),
0,
&[],
&[],
&[VkImageMemoryBarrier::new(
src_access_mask,
dst_access_mask,
old_image_layout,
new_image_layout,
VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED,
image.vk_handle(),
image.full_resource_range(),
)],
);
image.set_image_layout(new_image_layout);
}
pub fn begin_render_pass(
&mut self,
renderpass_begin_info: VkRenderPassBeginInfo,
subpass_contents: VkSubpassContents,
) {
self.sub_pass = 0;
self.device
.cmd_begin_render_pass(self.buffer, &renderpass_begin_info, subpass_contents);
}
pub fn begin_render_pass_full(
&mut self,
render_pass: &Arc<RenderPass>,
framebuffer: &Arc<Framebuffer>,
clear_values: &[VkClearValue],
subpass_contents: VkSubpassContents,
) {
self.handles_lock.push(render_pass.clone());
self.handles_lock.push(framebuffer.clone());
self.sub_pass = 0;
let render_pass_begin_info = VkRenderPassBeginInfo::new(
render_pass.vk_handle(),
framebuffer.vk_handle(),
VkRect2D {
offset: VkOffset2D { x: 0, y: 0 },
extent: VkExtent2D {
width: framebuffer.width(),
height: framebuffer.height(),
},
},
clear_values,
);
self.device
.cmd_begin_render_pass(self.buffer, &render_pass_begin_info, subpass_contents);
}
pub fn next_subpass(&mut self, subpass_contents: VkSubpassContents) {
self.sub_pass += 1;
self.device.cmd_next_subpass(self.buffer, subpass_contents);
}
pub fn end_render_pass(&self) {
self.device.cmd_end_render_pass(self.buffer);
}
pub fn bind_pipeline(&mut self, pipeline: &Arc<Pipeline>) -> Result<()> {
self.handles_lock.push(pipeline.clone());
match pipeline.pipeline_type() {
PipelineType::Graphics => {
debug_assert_eq!(self.sub_pass, pipeline.sub_pass());
self.device.cmd_bind_pipeline(
self.buffer,
VK_PIPELINE_BIND_POINT_GRAPHICS,
pipeline.vk_handle(),
);
}
PipelineType::Compute => self.device.cmd_bind_pipeline(
self.buffer,
VK_PIPELINE_BIND_POINT_COMPUTE,
pipeline.vk_handle(),
),
PipelineType::RayTracing => self.device.cmd_bind_pipeline(
self.buffer,
VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR,
pipeline.vk_handle(),
),
}
self.pipeline = Some(pipeline.clone());
Ok(())
}
pub fn execute_commands(&self, command_buffers: &[&impl VkHandle<VkCommandBuffer>]) {
self.calls.fetch_add(1, SeqCst);
let buffers: Vec<VkCommandBuffer> =
command_buffers.iter().map(|cb| cb.vk_handle()).collect();
self.device
.cmd_execute_commands(self.buffer, buffers.as_slice());
}
pub fn bind_descriptor_sets_minimal(&mut self, descriptor_sets: &[&Arc<DescriptorSet>]) {
self.calls.fetch_add(1, SeqCst);
let (pipeline_bind_point, vk_layout) = {
let pipeline = match &self.pipeline {
Some(pipeline) => pipeline,
None => panic!("no pipeline in command buffer"),
};
let pipe_type = match pipeline.pipeline_type() {
PipelineType::Graphics => VK_PIPELINE_BIND_POINT_GRAPHICS,
PipelineType::Compute => VK_PIPELINE_BIND_POINT_COMPUTE,
PipelineType::RayTracing => VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR,
};
(pipe_type, pipeline.pipeline_layout().vk_handle())
};
let vk_descriptor_sets: Vec<VkDescriptorSet> = descriptor_sets
.iter()
.map(|ds: &&Arc<DescriptorSet>| {
self.handles_lock.push((*ds).clone());
ds.vk_handle()
})
.collect();
self.device.cmd_bind_descriptor_sets(
self.buffer,
pipeline_bind_point,
vk_layout,
0,
vk_descriptor_sets.as_slice(),
&[],
);
}
pub fn bind_vertex_buffer<T: ReprC + Send + Sync + 'static>(
&mut self,
buffer: &Arc<Buffer<T>>,
) {
self.calls.fetch_add(1, SeqCst);
self.handles_lock.push(buffer.clone());
self.device
.cmd_bind_vertex_buffers(self.buffer, 0, &[buffer.vk_handle()], &[0]);
}
pub fn bind_vertex_buffers_minimal<T: ReprC + Send + Sync + 'static>(
&mut self,
buffers: &[&Arc<Buffer<T>>],
) {
self.calls.fetch_add(1, SeqCst);
let vk_buffers: Vec<VkBuffer> = buffers
.iter()
.map(|b: &&Arc<Buffer<T>>| {
self.handles_lock.push((*b).clone());
b.vk_handle()
})
.collect();
let offsets = vec![0; vk_buffers.len()];
self.device.cmd_bind_vertex_buffers(
self.buffer,
0,
vk_buffers.as_slice(),
offsets.as_slice(),
);
}
pub fn bind_index_buffer<T: ReprC + Send + Sync + 'static>(
&mut self,
buffer: &Arc<Buffer<T>>,
offset: VkDeviceSize,
index_type: VkIndexType,
) {
self.calls.fetch_add(1, SeqCst);
self.handles_lock.push(buffer.clone());
self.device
.cmd_bind_index_buffer(self.buffer, buffer.vk_handle(), offset, index_type);
}
pub fn set_viewport(&self, viewports: &[VkViewport]) {
self.device.cmd_set_viewport(self.buffer, 0, viewports);
}
pub fn set_scissor(&self, scissors: &[VkRect2D]) {
self.device.cmd_set_scissor(self.buffer, 0, scissors);
}
pub fn draw(
&self,
vertex_count: u32,
instance_count: u32,
first_vertex: u32,
first_instance: u32,
) {
self.calls.fetch_add(1, SeqCst);
self.device.cmd_draw(
self.buffer,
vertex_count,
instance_count,
first_vertex,
first_instance,
);
}
pub fn draw_complete_single_instance(&self, vertex_count: u32) {
self.calls.fetch_add(1, SeqCst);
self.device.cmd_draw(self.buffer, vertex_count, 1, 0, 0);
}
pub fn draw_indexed(
&self,
index_count: u32,
instance_count: u32,
first_index: u32,
vertex_offset: i32,
first_instance: u32,
) {
self.calls.fetch_add(1, SeqCst);
self.device.cmd_draw_indexed(
self.buffer,
index_count,
instance_count,
first_index,
vertex_offset,
first_instance,
);
}
pub fn draw_indexed_complete_single_instance(&self, index_count: u32) {
self.calls.fetch_add(1, SeqCst);
self.device
.cmd_draw_indexed(self.buffer, index_count, 1, 0, 0, 0);
}
pub fn push_constants<U>(&self, stage_flags: impl Into<VkShaderStageFlagBits>, data: &U) {
self.calls.fetch_add(1, SeqCst);
let pipeline = match &self.pipeline {
Some(pipeline) => pipeline,
None => panic!("no pipeline in command buffer"),
};
let layout = pipeline.pipeline_layout();
self.device
.cmd_push_constants(self.buffer, layout.vk_handle(), stage_flags, 0, data);
}
pub fn set_image_layout(
&mut self,
image: &Arc<Image>,
new_image_layout: VkImageLayout,
subresource_range: VkImageSubresourceRange,
) {
let src_access = Image::src_layout_to_access(image.image_layout());
let dst_access = Image::dst_layout_to_access(new_image_layout);
self.handles_lock.push(image.clone());
self.pipeline_barrier(
CommandBuffer::access_to_stage(src_access),
CommandBuffer::access_to_stage(dst_access),
0,
&[],
&[],
&[VkImageMemoryBarrier::new(
src_access,
dst_access,
image.image_layout(),
new_image_layout,
VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED,
image.vk_handle(),
subresource_range,
)],
);
image.set_image_layout(new_image_layout);
}
pub fn set_full_image_layout(&mut self, image: &Arc<Image>, new_image_layout: VkImageLayout) {
let src_access = Image::src_layout_to_access(image.image_layout());
let dst_access = Image::dst_layout_to_access(new_image_layout);
self.handles_lock.push(image.clone());
self.pipeline_barrier(
CommandBuffer::access_to_stage(src_access),
CommandBuffer::access_to_stage(dst_access),
0,
&[],
&[],
&[VkImageMemoryBarrier::new(
src_access,
dst_access,
image.image_layout(),
new_image_layout,
VK_QUEUE_FAMILY_IGNORED,
VK_QUEUE_FAMILY_IGNORED,
image.vk_handle(),
image.full_resource_range(),
)],
);
image.set_image_layout(new_image_layout);
}
// TODO:
pub fn set_line_width(&self) {
unimplemented!();
}
pub fn set_depth_bias(&self) {
unimplemented!();
}
pub fn set_blend_constants(&self) {
unimplemented!();
}
pub fn set_depth_bounds(&self) {
unimplemented!();
}
pub fn set_stencil_compare_mask(&self) {
unimplemented!();
}
pub fn set_stencil_write_mask(&self) {
unimplemented!();
}
pub fn set_stencil_reference(&self) {
unimplemented!();
}
pub fn draw_indirect(&self) {
unimplemented!();
}
pub fn draw_indexed_indirect(&self) {
unimplemented!();
}
pub fn dispatch(&self, x: u32, y: u32, z: u32) {
self.calls.fetch_add(1, SeqCst);
self.device.cmd_dispatch(self.buffer, x, y, z);
}
pub fn dispatch_indirect(&self) {
unimplemented!();
}
pub fn copy_buffer<T: ReprC + Send + Sync + 'static, U: ReprC + Send + Sync + 'static>(
&mut self,
src_buffer: &Arc<Buffer<T>>,
dst_buffer: &Arc<Buffer<U>>,
regions: &[VkBufferCopy],
) {
self.calls.fetch_add(1, SeqCst);
self.handles_lock.push(src_buffer.clone());
self.handles_lock.push(dst_buffer.clone());
self.device.cmd_copy_buffer(
self.buffer,
src_buffer.vk_handle(),
dst_buffer.vk_handle(),
regions,
);
}
pub fn copy_image(
&mut self,
src_image: &Arc<Image>,
dst_image: &Arc<Image>,
src_layout: VkImageLayout,
dst_layout: VkImageLayout,
regions: &[VkImageCopy],
) {
self.calls.fetch_add(1, SeqCst);
self.handles_lock.push(src_image.clone());
self.handles_lock.push(dst_image.clone());
self.device.cmd_copy_image(
self.buffer,
src_image.vk_handle(),
src_layout,
dst_image.vk_handle(),
dst_layout,
regions,
);
}
pub fn blit_complete(
&mut self,
src_image: &Arc<Image>,
dst_image: &Arc<Image>,
filter: VkFilter,
) {
self.handles_lock.push(src_image.clone());
self.handles_lock.push(dst_image.clone());
let image_blit = VkImageBlit {
srcSubresource: src_image.full_resource_layers(),
srcOffsets: [
VkOffset3D { x: 0, y: 0, z: 0 },
VkOffset3D {
x: src_image.width() as i32,
y: src_image.height() as i32,
z: 1,
},
],
dstSubresource: dst_image.full_resource_layers(),
dstOffsets: [
VkOffset3D { x: 0, y: 0, z: 0 },
VkOffset3D {
x: dst_image.width() as i32,
y: dst_image.height() as i32,
z: 1,
},
],
};
self.blit_image(
src_image,
dst_image,
src_image.image_layout(),
dst_image.image_layout(),
&[image_blit],
filter,
);
}
pub fn blit_image(
&mut self,
src_image: &Arc<Image>,
dst_image: &Arc<Image>,
src_layout: VkImageLayout,
dst_layout: VkImageLayout,
regions: &[VkImageBlit],
filter: VkFilter,
) {
self.calls.fetch_add(1, SeqCst);
self.handles_lock.push(src_image.clone());
self.handles_lock.push(dst_image.clone());
self.device.cmd_blit_image(
self.buffer,
src_image.vk_handle(),
src_layout,
dst_image.vk_handle(),
dst_layout,
regions,
filter,
);
}
pub fn copy_buffer_to_image<T: ReprC + Send + Sync + 'static>(
&mut self,
src_buffer: &Arc<Buffer<T>>,
dst_image: &Arc<Image>,
image_layout: VkImageLayout,
regions: &[VkBufferImageCopy],
) {
self.calls.fetch_add(1, SeqCst);
self.handles_lock.push(src_buffer.clone());
self.handles_lock.push(dst_image.clone());
self.device.cmd_copy_buffer_to_image(
self.buffer,
src_buffer.vk_handle(),
dst_image.vk_handle(),
image_layout,
regions,
);
}
pub fn copy_image_to_buffer<T: ReprC + Send + Sync + 'static>(
&mut self,
src_image: &Arc<Image>,
image_layout: VkImageLayout,
dst_buffer: &Arc<Buffer<T>>,
regions: &[VkBufferImageCopy],
) {
self.calls.fetch_add(1, SeqCst);
self.handles_lock.push(src_image.clone());
self.handles_lock.push(dst_buffer.clone());
self.device.cmd_copy_image_to_buffer(
self.buffer,
src_image.vk_handle(),
image_layout,
dst_buffer.vk_handle(),
regions,
)
}
pub fn update_buffer(&self) {
unimplemented!();
}
pub fn fill_buffer(&self) {
unimplemented!();
}
pub fn clear_color_image(&mut self, image: &Arc<Image>, clear_color: VkClearColorValue) {
self.calls.fetch_add(1, SeqCst);
self.handles_lock.push(image.clone());
self.device.cmd_clear_color_image(
self.buffer,
image.vk_handle(),
image.image_layout(),
clear_color,
&[image.full_resource_range()],
);
}
pub fn clear_depth_stencil_image(&self) {
unimplemented!();
}
pub fn clear_attachments(&self) {
unimplemented!();
}
pub fn resolve_image(
&mut self,
src_image: &Arc<Image>,
dst_image: &Arc<Image>,
regions: &[VkImageResolve],
) {
self.calls.fetch_add(1, SeqCst);
self.handles_lock.push(src_image.clone());
self.handles_lock.push(dst_image.clone());
self.device.cmd_resolve_image(
self.buffer,
src_image.vk_handle(),
src_image.image_layout(),
dst_image.vk_handle(),
dst_image.image_layout(),
regions,
);
}
pub fn set_event(&self) {
unimplemented!();
}
pub fn reset_event(&self) {
unimplemented!();
}
pub fn wait_events(&self) {
unimplemented!();
}
pub fn begin_query(&self) {
unimplemented!();
}
pub fn end_query(&self) {
unimplemented!();
}
pub fn reset_query_pool(&self) {
unimplemented!();
}
pub fn write_timestamp(
&mut self,
query_pool: &Arc<QueryPool>,
query: u32,
pipeline_stage: impl Into<VkPipelineStageFlagBits>,
) {
self.calls.fetch_add(1, SeqCst);
self.handles_lock.push(query_pool.clone());
self.device
.cmd_write_timestamp(self.buffer, pipeline_stage, query_pool.vk_handle(), query);
}
pub fn copy_query_pool_results(&self) {
unimplemented!();
}
}
impl<'a> CommandBufferRecorder<'a> {
pub fn build_acceleration_structure_indirect(
&mut self,
infos: &[VkAccelerationStructureBuildGeometryInfoKHR],
indirect_buffers: &[Arc<Buffer<impl ReprC + Send + Sync + 'static>>],
indirect_strides: &[u32],
max_primitive_counts: &[&u32],
) {
let mut device_addresses: Vec<VkDeviceAddress> = Vec::with_capacity(indirect_buffers.len());
for indirect_buffer in indirect_buffers.iter() {
self.handles_lock.push(indirect_buffer.clone());
device_addresses.push(indirect_buffer.device_address().into());
}
self.device.cmd_build_acceleration_structure_indirect(
self.buffer,
infos,
&device_addresses,
indirect_strides,
max_primitive_counts,
);
}
pub fn build_acceleration_structures(
&self,
infos: &[VkAccelerationStructureBuildGeometryInfoKHR],
range_infos: &[&[VkAccelerationStructureBuildRangeInfoKHR]],
) {
self.device
.cmd_build_acceleration_structures(self.buffer, infos, range_infos);
}
pub fn copy_acceleration_structure(
&mut self,
src: &Arc<AccelerationStructure>,
dst: &Arc<AccelerationStructure>,
mode: VkCopyAccelerationStructureModeKHR,
) {
self.handles_lock.push(src.clone());
self.handles_lock.push(dst.clone());
let info = VkCopyAccelerationStructureInfoKHR::new(src.vk_handle(), dst.vk_handle(), mode);
self.device
.cmd_copy_acceleration_structure(self.buffer, &info);
}
pub fn copy_acceleration_structure_to_memory(
&mut self,
src: &Arc<AccelerationStructure>,
dst: VkDeviceOrHostAddressKHR,
mode: VkCopyAccelerationStructureModeKHR,
) {
self.handles_lock.push(src.clone());
let info = VkCopyAccelerationStructureToMemoryInfoKHR::new(src.vk_handle(), dst, mode);
self.device
.cmd_copy_acceleration_structure_to_memory(self.buffer, &info);
}
pub fn copy_memory_to_acceleration_structure(
&mut self,
src: VkDeviceOrHostAddressConstKHR,
dst: &Arc<AccelerationStructure>,
mode: VkCopyAccelerationStructureModeKHR,
) {
self.handles_lock.push(dst.clone());
let info = VkCopyMemoryToAccelerationStructureInfoKHR::new(src, dst.vk_handle(), mode);
self.device
.cmd_copy_memory_to_acceleration_structure(self.buffer, &info);
}
pub fn trace_rays_indirect(
&mut self,
sbt: ShaderBindingTable,
buffer: Arc<Buffer<impl ReprC + Send + Sync + 'static>>,
) {
self.handles_lock.push(buffer.clone());
self.device.cmd_trace_rays_indirect(
self.buffer,
sbt.raygen_shader_binding_table(),
sbt.miss_shader_binding_table(),
sbt.hit_shader_binding_table(),
sbt.callable_shader_binding_table(),
buffer.device_address().into(),
)
}
pub fn trace_rays(&self, sbt: &ShaderBindingTable, width: u32, height: u32, depth: u32) {
self.device.cmd_trace_rays(
self.buffer,
sbt.raygen_shader_binding_table(),
sbt.miss_shader_binding_table(),
sbt.hit_shader_binding_table(),
sbt.callable_shader_binding_table(),
width,
height,
depth,
)
}
pub fn write_acceleration_structure_properties(
&mut self,
acceleration_structures: &[&Arc<AccelerationStructure>],
query_type: VkQueryType,
query_pool: &Arc<QueryPool>,
first_query: u32,
) {
self.handles_lock.push(query_pool.clone());
let as_handles: Vec<VkAccelerationStructureKHR> = acceleration_structures
.iter()
.map(|a| {
self.handles_lock.push((*a).clone());
a.vk_handle()
})
.collect();
self.device.cmd_write_acceleration_structure_properties(
self.buffer,
&as_handles,
query_type,
query_pool.vk_handle(),
first_query,
)
}
}
impl VulkanDevice for CommandBuffer {
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl Drop for CommandBuffer {
fn drop(&mut self) {
self.device
.free_command_buffers(self.pool.vk_handle(), &[self.buffer]);
}
}
impl<'a> Drop for CommandBufferRecorder<'a> {
fn drop(&mut self) {
self.device.end_command_buffer(self.buffer).unwrap()
}
}
// ==========================================================================================
// ======================================== FFI =============================================
// ==========================================================================================
// use crate::{ffi::*, handle_ffi_result};
// #[no_mangle]
// pub extern "C" fn allocate_primary_buffer(
// flags: VkCommandPoolCreateFlagBits,
// device: *const Device,
// queue: *const Queue,
// ) -> *const CommandBuffer {
// handle_ffi_result!(CommandBuffer::new_primary()
// .set_flags(flags)
// .build(unsafe { Arc::from_raw(device) }, unsafe {
// Arc::from_raw(queue)
// }))
// }
// #[no_mangle]
// pub extern "C" fn allocate_secondary_buffer(
// flags: VkCommandPoolCreateFlagBits,
// device: *const Device,
// queue: *const Queue,
// ) -> *const CommandBuffer {
// handle_ffi_result!(CommandBuffer::new_secondary()
// .set_flags(flags)
// .build(unsafe { Arc::from_raw(device) }, unsafe {
// Arc::from_raw(queue)
// }))
// }