Initial commit

This commit is contained in:
hodasemi 2023-01-14 13:03:01 +01:00
commit 7070ff726b
470 changed files with 59921 additions and 0 deletions

2
.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
Cargo.lock
target/

7
.vscode/settings.json vendored Normal file
View file

@ -0,0 +1,7 @@
{
"workbench.colorCustomizations": {
"activityBar.background": "#521B23",
"titleBar.activeBackground": "#722631",
"titleBar.activeForeground": "#FEFBFB"
}
}

8
Cargo.toml Normal file
View file

@ -0,0 +1,8 @@
[workspace]
members = [
"assetpath",
"vma-rs",
"vulkan-rs",
"vulkan-sys",
"library_loader",
]

7
assetpath/.vscode/settings.json vendored Normal file
View file

@ -0,0 +1,7 @@
{
"workbench.colorCustomizations": {
"activityBar.background": "#1B2F41",
"titleBar.activeBackground": "#26425B",
"titleBar.activeForeground": "#F7FAFC"
}
}

10
assetpath/Cargo.toml Normal file
View file

@ -0,0 +1,10 @@
[package]
name = "assetpath"
version = "0.1.0"
authors = ["hodasemi <michaelh.95@t-online.de>"]
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
serde = { version = "1.0.152", features = ["derive"] }

133
assetpath/src/lib.rs Normal file
View file

@ -0,0 +1,133 @@
use serde::{Deserialize, Serialize};
use std::{fmt::Display, path::Path, str::FromStr};
#[derive(Default, Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)]
pub struct AssetPath {
#[serde(skip)]
prefix: Option<String>,
path: String,
}
impl AssetPath {
fn check_prefix(prefix: &str) -> String {
if prefix.ends_with('/') {
prefix.to_string()
} else {
format!("{}/", prefix)
}
}
pub fn assume_prefix_free(&mut self) {
assert!(self.prefix.is_none(), "Prefix already set!");
self.prefix = Some(String::new());
}
pub fn set_prefix(&mut self, prefix: &str) {
assert!(self.prefix.is_none(), "Prefix already set!");
self.prefix = Some(Self::check_prefix(prefix));
}
pub fn has_prefix(&self) -> bool {
match &self.prefix {
Some(prefix) => !prefix.is_empty(),
None => false,
}
}
pub fn prefix(&self) -> Option<&str> {
self.prefix.as_ref().map(|s| s.as_str())
}
pub fn set_path(&mut self, path: impl ToString) {
self.path = path.to_string();
}
pub fn full_path(&self) -> String {
assert!(self.prefix.is_some(), "Prefix must be set!");
format!("{}{}", self.prefix.clone().unwrap(), self.path)
}
pub fn path_without_prefix(&self) -> &str {
&self.path
}
pub fn is_empty(&self) -> bool {
self.path.is_empty()
}
pub fn exists(&self) -> bool {
let s = self.full_path();
let path = Path::new(&s);
path.exists()
}
}
impl Display for AssetPath {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.path)
}
}
impl FromStr for AssetPath {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Self::from(s))
}
}
impl From<(&str, &str)> for AssetPath {
fn from((prefix, path): (&str, &str)) -> Self {
Self {
prefix: Some(Self::check_prefix(prefix)),
path: path.to_string(),
}
}
}
impl From<(String, &str)> for AssetPath {
fn from((prefix, path): (String, &str)) -> Self {
Self {
prefix: Some(Self::check_prefix(&prefix)),
path: path.to_string(),
}
}
}
impl From<(&str, String)> for AssetPath {
fn from((prefix, path): (&str, String)) -> Self {
Self {
prefix: Some(Self::check_prefix(prefix)),
path,
}
}
}
impl From<(String, String)> for AssetPath {
fn from((prefix, path): (String, String)) -> Self {
Self {
prefix: Some(Self::check_prefix(&prefix)),
path,
}
}
}
impl From<&str> for AssetPath {
fn from(path: &str) -> Self {
Self {
prefix: None,
path: path.to_string(),
}
}
}
impl From<String> for AssetPath {
fn from(path: String) -> Self {
Self { prefix: None, path }
}
}

7
library_loader/.vscode/settings.json vendored Normal file
View file

@ -0,0 +1,7 @@
{
"workbench.colorCustomizations": {
"activityBar.background": "#382B16",
"titleBar.activeBackground": "#4F3D1F",
"titleBar.activeForeground": "#FDFBF9"
}
}

View file

@ -0,0 +1,8 @@
[package]
name = "library_loader"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]

View file

@ -0,0 +1 @@
pub mod macros;

View file

@ -0,0 +1,115 @@
#[macro_export]
macro_rules! load_function_ptrs {
($struct_name: ident, { $($name: ident($($param_n: ident: $param_ty: ty),*) -> $ret: ty,)+ }) => (
paste::item! {
$(
#[allow(non_camel_case_types)]
pub type [<PFN_ $name>] = extern "system" fn($($param_ty),*) -> $ret;
)+
pub struct $struct_name {
$(
pub $name: [<PFN_ $name>],
)+
}
impl $struct_name {
pub fn load<F>(mut f: F) -> $struct_name
where F: FnMut(&std::ffi::CStr) -> *const std::os::raw::c_void
{
$struct_name {
$(
$name: unsafe {
let dummy: *const std::ffi::c_void = std::ptr::null();
let name = std::ffi::CStr::from_bytes_with_nul_unchecked(concat!(stringify!($name), "\0").as_bytes());
let val = f(name);
if val.is_null() {
println!("failed loading {}", stringify!($name));
std::mem::transmute(dummy)
} else {
std::mem::transmute(val)
}
},
)+
}
}
$(
#[inline]
pub unsafe fn $name(&self $(, $param_n: $param_ty)*) -> $ret {
let ptr = self.$name;
ptr($($param_n),*)
}
)+
}
}
)
}
#[macro_export]
macro_rules! load_function_ptrs_from_lib {
($struct_name: ident, $library: expr, { $($name: ident($($param_n: ident: $param_ty: ty),*) -> $ret: ty,)+ }) => (
paste::item! {
$(
#[allow(non_camel_case_types)]
pub type [<PFN_ $name>] = extern "system" fn($($param_ty),*) -> $ret;
)+
pub struct $struct_name {
pub _lib: Option<shared_library::dynamic_library::DynamicLibrary>,
$(
pub $name: [<PFN_ $name>],
)+
}
impl $struct_name {
pub fn load() -> anyhow::Result<$struct_name> {
let lib = match shared_library::dynamic_library::DynamicLibrary::open(Some(std::path::Path::new($library))) {
Ok(lib) => lib,
Err(err) => {
return Err(anyhow::Error::msg(format!(
"Failed loading library ({}): {}",
$library,
err
)))
}
};
Ok($struct_name {
$(
$name: unsafe {
extern "system" fn $name($(_: $param_ty),*) { panic!("function pointer `{}` not loaded", stringify!($name)) }
let name = std::ffi::CStr::from_bytes_with_nul_unchecked(concat!(stringify!($name), "\0").as_bytes());
let val: *const std::os::raw::c_void = {
let str_name = name.to_str().expect("can't convert CStr");
lib.symbol(str_name)
.unwrap_or_else(|_| panic!("failed getting {}", str_name))
};
if val.is_null() {
println!("failed loading {}", stringify!($name));
std::mem::transmute($name as *const ())
} else {
std::mem::transmute(val)
}
},
)+
_lib: Some(lib),
})
}
$(
#[inline]
pub unsafe fn $name(&self $(, $param_n: $param_ty)*) -> $ret {
let ptr = self.$name;
ptr($($param_n),*)
}
)+
}
}
)
}

15
vma-rs/Cargo.toml Normal file
View file

@ -0,0 +1,15 @@
[package]
name = "vma-rs"
version = "0.1.0"
authors = ["hodasemi <michaelh.95@t-online.de>"]
edition = "2021"
build = "build.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
vulkan-sys = { path = "../vulkan-sys" }
anyhow = { version = "1.0.68", features = ["backtrace"] }
[build-dependencies]
cc = "1.0.78"

76
vma-rs/build.rs Normal file
View file

@ -0,0 +1,76 @@
use cc;
use std::env;
fn main() {
let mut build = cc::Build::new();
build.include("vma_source");
// We want to use our own loader, instead of requiring us to link
// in vulkan.dll/.dylib in addition. This is especially important
// for MoltenVK, where there is no default installation path, unlike
// Linux (pkconfig) and Windows (VULKAN_SDK environment variable).
build.define("VMA_STATIC_VULKAN_FUNCTIONS", "0");
build.define("VMA_VULKAN_VERSION", "1002000");
build.define("VMA_DYNAMIC_VULKAN_FUNCTIONS", "0");
// TODO: Add some configuration options under crate features
//#define VMA_HEAVY_ASSERT(expr) assert(expr)
//#define VMA_USE_STL_CONTAINERS 1
//#define VMA_DEDICATED_ALLOCATION 0
//#define VMA_DEBUG_MARGIN 16
//#define VMA_DEBUG_DETECT_CORRUPTION 1
//#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1
//#define VMA_RECORDING_ENABLED 0
//#define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY 256
let source_files = ["vma_lib/vma_lib.cpp"];
for source_file in &source_files {
build.file(&source_file);
}
build.cpp(true);
let target = env::var("TARGET").unwrap();
// don't assume vulkan headers to be installed, thus import them
build.include(".");
if target.contains("darwin") {
build
.flag("-std=c++11")
.flag("-Wno-missing-field-initializers")
.flag("-Wno-unused-variable")
.flag("-Wno-unused-parameter")
.flag("-Wno-unused-private-field")
.flag("-Wno-reorder")
.flag("-Wno-type-limits")
.cpp_link_stdlib("c++")
.cpp_set_stdlib("c++");
} else if target.contains("linux") {
build
.flag("-std=c++11")
.flag("-Wno-missing-field-initializers")
.flag("-Wno-unused-variable")
.flag("-Wno-unused-parameter")
.flag("-Wno-unused-private-field")
.flag("-Wno-reorder")
.flag("-Wno-type-limits")
.cpp_link_stdlib("stdc++");
} else if target.contains("windows") && target.contains("gnu") {
build
.flag("-std=c++11")
.flag("-Wno-missing-field-initializers")
.flag("-Wno-unused-variable")
.flag("-Wno-unused-parameter")
.flag("-Wno-unused-private-field")
.flag("-Wno-reorder")
.flag("-Wno-type-limits")
.cpp_link_stdlib("stdc++");
}
build.compile("vma_cpp");
}

470
vma-rs/src/allocation.rs Normal file
View file

@ -0,0 +1,470 @@
use crate::vma_bindings::*;
use anyhow::Result;
use vulkan_sys::prelude::*;
use crate::allocator_pool::AllocatorPool;
use std::mem::MaybeUninit;
use std::ptr;
use std::slice;
#[derive(Debug, Clone)]
pub struct AllocationBuilder {
allocator: VmaAllocator,
flags: VmaAllocationCreateFlags,
usage: VmaMemoryUsage,
required_flags: VkMemoryPropertyFlagBits,
preferred_flags: VkMemoryPropertyFlagBits,
memory_type_bits: u32,
priority: f32,
pool: Option<AllocatorPool>,
}
pub trait Build<T> {
fn build(self, argument: T) -> Result<Allocation>;
}
impl AllocationBuilder {
pub fn set_flags(mut self, flags: impl Into<VmaAllocationCreateFlags>) -> Self {
self.flags = flags.into();
self
}
pub fn set_usage(mut self, usage: VmaMemoryUsage) -> Self {
self.usage = usage;
self
}
pub fn priority(mut self, priority: f32) -> Self {
self.priority = priority;
self
}
pub fn set_required_flags(
mut self,
required_flags: impl Into<VkMemoryPropertyFlagBits>,
) -> Self {
self.required_flags = required_flags.into();
self
}
pub fn set_preferred_flags(
mut self,
preferred_flags: impl Into<VkMemoryPropertyFlagBits>,
) -> Self {
self.preferred_flags = preferred_flags.into();
self
}
pub fn set_memory_type_bits(mut self, memory_type_bits: u32) -> Self {
self.memory_type_bits = memory_type_bits;
self
}
pub fn set_pool(mut self, pool: AllocatorPool) -> Self {
self.pool = Some(pool);
self
}
fn vma_allocation_create_info(&self) -> VmaAllocationCreateInfo {
let mut create_info = VmaAllocationCreateInfo::new(
self.flags,
self.usage,
self.required_flags,
self.preferred_flags,
self.memory_type_bits,
self.priority,
);
if let Some(pool) = &self.pool {
create_info.set_pool(pool.pool());
}
create_info
}
}
impl Build<&VkMemoryRequirements> for AllocationBuilder {
fn build(self, memory_requirements: &VkMemoryRequirements) -> Result<Allocation> {
let create_info = self.vma_allocation_create_info();
let mut allocation = MaybeUninit::uninit();
let mut allocation_info = MaybeUninit::uninit();
let result = unsafe {
vmaAllocateMemory(
self.allocator,
memory_requirements,
&create_info,
allocation.as_mut_ptr(),
allocation_info.as_mut_ptr(),
)
};
if result == VK_SUCCESS {
Ok(Allocation::new(
self.allocator,
unsafe { allocation.assume_init() },
AllocationType::MemoryOnly,
unsafe { allocation_info.assume_init() },
))
} else {
Err(anyhow::Error::msg(format!(
"Failed allocating memory for Buffer: {:?}",
result
)))
}
}
}
impl Build<VkBuffer> for AllocationBuilder {
fn build(self, buffer: VkBuffer) -> Result<Allocation> {
let create_info = self.vma_allocation_create_info();
let mut allocation = MaybeUninit::uninit();
let mut allocation_info = MaybeUninit::uninit();
let result = unsafe {
vmaAllocateMemoryForBuffer(
self.allocator,
buffer,
&create_info,
allocation.as_mut_ptr(),
allocation_info.as_mut_ptr(),
)
};
if result == VK_SUCCESS {
let mut allocation = Allocation::new(
self.allocator,
unsafe { allocation.assume_init() },
AllocationType::MemoryOnly,
unsafe { allocation_info.assume_init() },
);
allocation.bind_buffer_memory(buffer)?;
Ok(allocation)
} else {
Err(anyhow::Error::msg(format!(
"Failed allocating memory for Buffer: {:?}",
result
)))
}
}
}
impl Build<VkImage> for AllocationBuilder {
fn build(self, image: VkImage) -> Result<Allocation> {
let create_info = self.vma_allocation_create_info();
let mut allocation = MaybeUninit::uninit();
let mut allocation_info = MaybeUninit::uninit();
let result = unsafe {
vmaAllocateMemoryForImage(
self.allocator,
image,
&create_info,
allocation.as_mut_ptr(),
allocation_info.as_mut_ptr(),
)
};
if result == VK_SUCCESS {
let mut allocation = Allocation::new(
self.allocator,
unsafe { allocation.assume_init() },
AllocationType::MemoryOnly,
unsafe { allocation_info.assume_init() },
);
allocation.bind_image_memory(image)?;
Ok(allocation)
} else {
Err(anyhow::Error::msg(format!(
"Failed allocating memory for Image: {:?}",
result
)))
}
}
}
impl Build<&VkImageCreateInfo> for AllocationBuilder {
fn build(self, image_create_info: &VkImageCreateInfo) -> Result<Allocation> {
let create_info = self.vma_allocation_create_info();
let mut image = MaybeUninit::uninit();
let mut allocation = MaybeUninit::uninit();
let mut allocation_info = MaybeUninit::uninit();
let result = unsafe {
vmaCreateImage(
self.allocator,
image_create_info,
&create_info,
image.as_mut_ptr(),
allocation.as_mut_ptr(),
allocation_info.as_mut_ptr(),
)
};
if result == VK_SUCCESS {
Ok(Allocation {
allocator: self.allocator,
allocation: unsafe { allocation.assume_init() },
allocation_type: AllocationType::Image(unsafe { image.assume_init() }),
allocation_info: unsafe { allocation_info.assume_init() },
})
} else {
Err(anyhow::Error::msg(format!(
"Failed creating Image and allocating memory for Image: {:?}",
result
)))
}
}
}
impl Build<&VkBufferCreateInfo> for AllocationBuilder {
fn build(self, buffer_create_info: &VkBufferCreateInfo) -> Result<Allocation> {
let create_info = self.vma_allocation_create_info();
let mut buffer = MaybeUninit::uninit();
let mut allocation = MaybeUninit::uninit();
let mut allocation_info = MaybeUninit::uninit();
let result = unsafe {
vmaCreateBuffer(
self.allocator,
buffer_create_info,
&create_info,
buffer.as_mut_ptr(),
allocation.as_mut_ptr(),
allocation_info.as_mut_ptr(),
)
};
if result == VK_SUCCESS {
Ok(Allocation {
allocator: self.allocator,
allocation: unsafe { allocation.assume_init() },
allocation_type: AllocationType::Buffer(unsafe { buffer.assume_init() }),
allocation_info: unsafe { allocation_info.assume_init() },
})
} else {
Err(anyhow::Error::msg(format!(
"Failed creating Buffer and allocating memory for Buffer: {:?}",
result
)))
}
}
}
impl AllocationBuilder {
pub(crate) fn new(allocator: VmaAllocator) -> Self {
AllocationBuilder {
allocator,
flags: VmaAllocationCreateFlags::default(),
usage: VmaMemoryUsage::VMA_MEMORY_USAGE_UNKNOWN,
required_flags: VkMemoryPropertyFlagBits::default(),
preferred_flags: VkMemoryPropertyFlagBits::default(),
memory_type_bits: 0,
priority: 0.0,
pool: None,
}
}
}
#[derive(Debug, Clone)]
enum AllocationType {
Buffer(VkBuffer),
Image(VkImage),
MemoryOnly,
}
#[derive(Debug, Clone)]
pub struct Allocation {
allocator: VmaAllocator,
allocation: VmaAllocation,
allocation_type: AllocationType,
allocation_info: VmaAllocationInfo,
}
unsafe impl Send for Allocation {}
unsafe impl Sync for Allocation {}
impl Allocation {
fn new(
allocator: VmaAllocator,
allocation: VmaAllocation,
allocation_type: AllocationType,
allocation_info: VmaAllocationInfo,
) -> Self {
Self {
allocator,
allocation,
allocation_type,
allocation_info,
}
}
pub fn memory_type_index(&self) -> u32 {
self.allocation_info.memoryType
}
pub fn device_memory(&self) -> VkDeviceMemory {
self.allocation_info.deviceMemory
}
pub fn offset(&self) -> VkDeviceSize {
self.allocation_info.offset
}
pub fn size(&self) -> VkDeviceSize {
self.allocation_info.size
}
pub fn map<T: Clone>(&self, length: VkDeviceSize) -> Result<VkMappedMemory<'_, T>> {
let mut data = MaybeUninit::uninit();
let result = unsafe { vmaMapMemory(self.allocator, self.allocation, data.as_mut_ptr()) };
let slice =
unsafe { slice::from_raw_parts_mut(data.assume_init() as *mut T, length as usize) };
if result == VK_SUCCESS {
let mut mapped_memory = VkMappedMemory::new(slice);
let allocation = self.allocation;
let allocator = self.allocator;
mapped_memory.set_unmap(move || unsafe { vmaUnmapMemory(allocator, allocation) });
Ok(mapped_memory)
} else {
Err(anyhow::Error::msg(format!(
"Mapping VkMemory failed: {:?}",
result
)))
}
}
pub fn buffer(&self) -> VkBuffer {
match self.allocation_type {
AllocationType::Buffer(buffer) => buffer,
AllocationType::Image(_) => panic!("Allocation is a VkBuffer"),
AllocationType::MemoryOnly => panic!("Allocation is memory only"),
}
}
pub fn image(&self) -> VkImage {
match self.allocation_type {
AllocationType::Buffer(_) => panic!("Allocation is a VkImage"),
AllocationType::Image(image) => image,
AllocationType::MemoryOnly => panic!("Allocation is memory only"),
}
}
pub fn bind_buffer_memory(&mut self, buffer: VkBuffer) -> Result<()> {
#[cfg(debug_assertions)]
{
match self.allocation_type {
AllocationType::Buffer(_) => panic!("allocation already bound to buffer"),
AllocationType::Image(_) => panic!("allocation already bound to image"),
AllocationType::MemoryOnly => (),
}
}
let result = unsafe { vmaBindBufferMemory(self.allocator, self.allocation, buffer) };
if result == VK_SUCCESS {
Ok(())
} else {
Err(anyhow::Error::msg(format!(
"Failed binding Buffer to memory: {:?}",
result
)))
}
}
pub fn bind_image_memory(&mut self, image: VkImage) -> Result<()> {
#[cfg(debug_assertions)]
{
match self.allocation_type {
AllocationType::Buffer(_) => panic!("allocation already bound to buffer"),
AllocationType::Image(_) => panic!("allocation already bound to image"),
AllocationType::MemoryOnly => (),
}
}
let result = unsafe { vmaBindImageMemory(self.allocator, self.allocation, image) };
if result == VK_SUCCESS {
Ok(())
} else {
Err(anyhow::Error::msg(format!(
"Failed binding Image to memory: {:?}",
result
)))
}
}
}
impl Drop for Allocation {
fn drop(&mut self) {
match self.allocation_type {
AllocationType::Buffer(buffer) => unsafe {
vmaDestroyBuffer(self.allocator, buffer, self.allocation)
},
AllocationType::Image(image) => unsafe {
vmaDestroyImage(self.allocator, image, self.allocation)
},
AllocationType::MemoryOnly => unsafe { vmaFreeMemory(self.allocator, self.allocation) },
}
}
}
impl VmaAllocationCreateInfo {
pub fn new(
flags: impl Into<VmaAllocationCreateFlags>,
usage: VmaMemoryUsage,
required_flags: VkMemoryPropertyFlagBits,
preferred_flags: VkMemoryPropertyFlagBits,
memory_type_bits: u32,
priority: f32,
) -> Self {
VmaAllocationCreateInfo {
flags: flags.into(),
usage,
requiredFlags: required_flags,
preferredFlags: preferred_flags,
memoryTypeBits: memory_type_bits,
pool: ptr::null_mut(),
pUserData: ptr::null_mut(),
priority,
}
}
pub fn set_pool(&mut self, pool: VmaPool) {
self.pool = pool;
}
// pub fn set_user_data<T>(&mut self, data: &mut T) {
// self.pUserData = data as *mut T as *mut _
// }
}

171
vma-rs/src/allocator.rs Normal file
View file

@ -0,0 +1,171 @@
use crate::vma_bindings::*;
use anyhow::Result;
use vulkan_sys::prelude::*;
use crate::allocation::AllocationBuilder;
use std::mem::MaybeUninit;
use std::ptr;
pub struct AllocatorBuilder {
flags: VmaAllocatorCreateFlags,
preferred_large_heap_block_size: VkDeviceSize,
allocation_callbacks: Option<VkAllocationCallbacks>,
device_memory_callbacks: Option<VmaDeviceMemoryCallbacks>,
frame_in_use_count: u32,
heap_size_limits: Vec<VkDeviceSize>,
vulkan_functions: Option<VmaVulkanFunctions>,
record_settings: Option<VmaRecordSettings>,
}
impl AllocatorBuilder {
pub fn set_flags(mut self, flags: VmaAllocatorCreateFlags) -> Self {
self.flags = flags.into();
self
}
pub fn set_preferred_large_heap_block_size(mut self, size: VkDeviceSize) -> Self {
self.preferred_large_heap_block_size = size;
self
}
pub fn set_allocation_callbacks(mut self, allocation_callbacks: VkAllocationCallbacks) -> Self {
self.allocation_callbacks = Some(allocation_callbacks);
self
}
pub fn set_device_memory_callbacks(
mut self,
device_memory_callbacks: VmaDeviceMemoryCallbacks,
) -> Self {
self.device_memory_callbacks = Some(device_memory_callbacks);
self
}
pub fn set_frame_in_use_count(mut self, use_count: u32) -> Self {
self.frame_in_use_count = use_count;
self
}
pub fn set_heap_size_limits(mut self, heap_size_limits: Vec<VkDeviceSize>) -> Self {
self.heap_size_limits = heap_size_limits;
self
}
pub fn set_vulkan_functions(mut self, vulkan_functions: VmaVulkanFunctions) -> Self {
self.vulkan_functions = Some(vulkan_functions);
self
}
pub fn set_record_settings(mut self, record_settings: VmaRecordSettings) -> Self {
self.record_settings = Some(record_settings);
self
}
pub fn build(
self,
instance: VkInstance,
device: VkDevice,
physical_device: VkPhysicalDevice,
vulkan_api_version: u32,
) -> Result<Allocator> {
let allocator_create_info = VmaAllocatorCreateInfo {
flags: self.flags,
physicalDevice: physical_device,
instance,
device,
vulkanApiVersion: vulkan_api_version,
preferredLargeHeapBlockSize: self.preferred_large_heap_block_size,
pAllocationCallbacks: match &self.allocation_callbacks {
Some(callbacks) => callbacks as *const _,
None => ptr::null(),
},
pDeviceMemoryCallbacks: match &self.device_memory_callbacks {
Some(callbacks) => callbacks as *const _,
None => ptr::null(),
},
frameInUseCount: self.frame_in_use_count,
pHeapSizeLimit: if self.heap_size_limits.is_empty() {
ptr::null()
} else {
self.heap_size_limits.as_ptr()
},
pVulkanFunctions: match &self.vulkan_functions {
Some(functions) => functions as *const _,
None => ptr::null(),
},
pRecordSettings: match &self.record_settings {
Some(settings) => settings as *const _,
None => ptr::null(),
},
};
let mut allocator = MaybeUninit::uninit();
let result = unsafe { vmaCreateAllocator(&allocator_create_info, allocator.as_mut_ptr()) };
if result == VK_SUCCESS {
Ok(Allocator {
allocator: unsafe { allocator.assume_init() },
})
} else {
Err(anyhow::Error::msg(format!(
"Failed creating memory allocator: {:?}",
result
)))
}
}
}
impl Default for AllocatorBuilder {
fn default() -> Self {
AllocatorBuilder {
flags: 0,
preferred_large_heap_block_size: 0,
allocation_callbacks: None,
device_memory_callbacks: None,
frame_in_use_count: 0,
heap_size_limits: Vec::new(),
vulkan_functions: None,
record_settings: None,
}
}
}
#[derive(Debug, Clone)]
pub struct Allocator {
allocator: VmaAllocator,
}
impl Allocator {
pub fn allocate(&self) -> AllocationBuilder {
AllocationBuilder::new(self.allocator)
}
pub fn statistics(&self) -> VmaStats {
let mut stats = MaybeUninit::uninit();
unsafe {
vmaCalculateStats(self.allocator, stats.as_mut_ptr());
stats.assume_init()
}
}
}
unsafe impl Send for Allocator {}
unsafe impl Sync for Allocator {}
impl Allocator {
pub fn builder() -> AllocatorBuilder {
AllocatorBuilder::default()
}
}

View file

@ -0,0 +1,15 @@
use crate::vma_bindings::*;
#[derive(Debug, Clone)]
pub struct AllocatorPool {
pool: VmaPool,
}
unsafe impl Send for AllocatorPool {}
unsafe impl Sync for AllocatorPool {}
impl AllocatorPool {
pub(crate) fn pool(&self) -> VmaPool {
self.pool
}
}

7
vma-rs/src/lib.rs Normal file
View file

@ -0,0 +1,7 @@
mod vma_bindings;
pub mod allocation;
pub mod allocator;
pub mod allocator_pool;
pub mod prelude;

6
vma-rs/src/prelude.rs Normal file
View file

@ -0,0 +1,6 @@
pub use crate::allocation::*;
pub use crate::allocator::*;
pub use crate::vma_bindings::{
VmaAllocatorCreateFlagBits::*, VmaMemoryUsage, VmaMemoryUsage::*, VmaStats, VmaVulkanFunctions,
};

2202
vma-rs/src/vma_bindings.rs Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,2 @@
#define VMA_IMPLEMENTATION
#include "vk_mem_alloc.h"

File diff suppressed because it is too large Load diff

183
vma-rs/vulkan/vk_icd.h Normal file
View file

@ -0,0 +1,183 @@
//
// File: vk_icd.h
//
/*
* Copyright (c) 2015-2016 The Khronos Group Inc.
* Copyright (c) 2015-2016 Valve Corporation
* Copyright (c) 2015-2016 LunarG, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
#ifndef VKICD_H
#define VKICD_H
#include "vulkan.h"
#include <stdbool.h>
// Loader-ICD version negotiation API. Versions add the following features:
// Version 0 - Initial. Doesn't support vk_icdGetInstanceProcAddr
// or vk_icdNegotiateLoaderICDInterfaceVersion.
// Version 1 - Add support for vk_icdGetInstanceProcAddr.
// Version 2 - Add Loader/ICD Interface version negotiation
// via vk_icdNegotiateLoaderICDInterfaceVersion.
// Version 3 - Add ICD creation/destruction of KHR_surface objects.
// Version 4 - Add unknown physical device extension qyering via
// vk_icdGetPhysicalDeviceProcAddr.
// Version 5 - Tells ICDs that the loader is now paying attention to the
// application version of Vulkan passed into the ApplicationInfo
// structure during vkCreateInstance. This will tell the ICD
// that if the loader is older, it should automatically fail a
// call for any API version > 1.0. Otherwise, the loader will
// manually determine if it can support the expected version.
#define CURRENT_LOADER_ICD_INTERFACE_VERSION 5
#define MIN_SUPPORTED_LOADER_ICD_INTERFACE_VERSION 0
#define MIN_PHYS_DEV_EXTENSION_ICD_INTERFACE_VERSION 4
typedef VkResult(VKAPI_PTR *PFN_vkNegotiateLoaderICDInterfaceVersion)(uint32_t *pVersion);
// This is defined in vk_layer.h which will be found by the loader, but if an ICD is building against this
// file directly, it won't be found.
#ifndef PFN_GetPhysicalDeviceProcAddr
typedef PFN_vkVoidFunction(VKAPI_PTR *PFN_GetPhysicalDeviceProcAddr)(VkInstance instance, const char *pName);
#endif
/*
* The ICD must reserve space for a pointer for the loader's dispatch
* table, at the start of <each object>.
* The ICD must initialize this variable using the SET_LOADER_MAGIC_VALUE macro.
*/
#define ICD_LOADER_MAGIC 0x01CDC0DE
typedef union {
uintptr_t loaderMagic;
void *loaderData;
} VK_LOADER_DATA;
static inline void set_loader_magic_value(void *pNewObject) {
VK_LOADER_DATA *loader_info = (VK_LOADER_DATA *)pNewObject;
loader_info->loaderMagic = ICD_LOADER_MAGIC;
}
static inline bool valid_loader_magic_value(void *pNewObject) {
const VK_LOADER_DATA *loader_info = (VK_LOADER_DATA *)pNewObject;
return (loader_info->loaderMagic & 0xffffffff) == ICD_LOADER_MAGIC;
}
/*
* Windows and Linux ICDs will treat VkSurfaceKHR as a pointer to a struct that
* contains the platform-specific connection and surface information.
*/
typedef enum {
VK_ICD_WSI_PLATFORM_MIR,
VK_ICD_WSI_PLATFORM_WAYLAND,
VK_ICD_WSI_PLATFORM_WIN32,
VK_ICD_WSI_PLATFORM_XCB,
VK_ICD_WSI_PLATFORM_XLIB,
VK_ICD_WSI_PLATFORM_ANDROID,
VK_ICD_WSI_PLATFORM_MACOS,
VK_ICD_WSI_PLATFORM_IOS,
VK_ICD_WSI_PLATFORM_DISPLAY,
VK_ICD_WSI_PLATFORM_HEADLESS,
VK_ICD_WSI_PLATFORM_METAL,
} VkIcdWsiPlatform;
typedef struct {
VkIcdWsiPlatform platform;
} VkIcdSurfaceBase;
#ifdef VK_USE_PLATFORM_MIR_KHR
typedef struct {
VkIcdSurfaceBase base;
MirConnection *connection;
MirSurface *mirSurface;
} VkIcdSurfaceMir;
#endif // VK_USE_PLATFORM_MIR_KHR
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
typedef struct {
VkIcdSurfaceBase base;
struct wl_display *display;
struct wl_surface *surface;
} VkIcdSurfaceWayland;
#endif // VK_USE_PLATFORM_WAYLAND_KHR
#ifdef VK_USE_PLATFORM_WIN32_KHR
typedef struct {
VkIcdSurfaceBase base;
HINSTANCE hinstance;
HWND hwnd;
} VkIcdSurfaceWin32;
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_XCB_KHR
typedef struct {
VkIcdSurfaceBase base;
xcb_connection_t *connection;
xcb_window_t window;
} VkIcdSurfaceXcb;
#endif // VK_USE_PLATFORM_XCB_KHR
#ifdef VK_USE_PLATFORM_XLIB_KHR
typedef struct {
VkIcdSurfaceBase base;
Display *dpy;
Window window;
} VkIcdSurfaceXlib;
#endif // VK_USE_PLATFORM_XLIB_KHR
#ifdef VK_USE_PLATFORM_ANDROID_KHR
typedef struct {
VkIcdSurfaceBase base;
struct ANativeWindow *window;
} VkIcdSurfaceAndroid;
#endif // VK_USE_PLATFORM_ANDROID_KHR
#ifdef VK_USE_PLATFORM_MACOS_MVK
typedef struct {
VkIcdSurfaceBase base;
const void *pView;
} VkIcdSurfaceMacOS;
#endif // VK_USE_PLATFORM_MACOS_MVK
#ifdef VK_USE_PLATFORM_IOS_MVK
typedef struct {
VkIcdSurfaceBase base;
const void *pView;
} VkIcdSurfaceIOS;
#endif // VK_USE_PLATFORM_IOS_MVK
typedef struct {
VkIcdSurfaceBase base;
VkDisplayModeKHR displayMode;
uint32_t planeIndex;
uint32_t planeStackIndex;
VkSurfaceTransformFlagBitsKHR transform;
float globalAlpha;
VkDisplayPlaneAlphaFlagBitsKHR alphaMode;
VkExtent2D imageExtent;
} VkIcdSurfaceDisplay;
typedef struct {
VkIcdSurfaceBase base;
} VkIcdSurfaceHeadless;
#ifdef VK_USE_PLATFORM_METAL_EXT
typedef struct {
VkIcdSurfaceBase base;
const CAMetalLayer *pLayer;
} VkIcdSurfaceMetal;
#endif // VK_USE_PLATFORM_METAL_EXT
#endif // VKICD_H

202
vma-rs/vulkan/vk_layer.h Normal file
View file

@ -0,0 +1,202 @@
//
// File: vk_layer.h
//
/*
* Copyright (c) 2015-2017 The Khronos Group Inc.
* Copyright (c) 2015-2017 Valve Corporation
* Copyright (c) 2015-2017 LunarG, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/* Need to define dispatch table
* Core struct can then have ptr to dispatch table at the top
* Along with object ptrs for current and next OBJ
*/
#pragma once
#include "vulkan.h"
#if defined(__GNUC__) && __GNUC__ >= 4
#define VK_LAYER_EXPORT __attribute__((visibility("default")))
#elif defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590)
#define VK_LAYER_EXPORT __attribute__((visibility("default")))
#else
#define VK_LAYER_EXPORT
#endif
#define MAX_NUM_UNKNOWN_EXTS 250
// Loader-Layer version negotiation API. Versions add the following features:
// Versions 0/1 - Initial. Doesn't support vk_layerGetPhysicalDeviceProcAddr
// or vk_icdNegotiateLoaderLayerInterfaceVersion.
// Version 2 - Add support for vk_layerGetPhysicalDeviceProcAddr and
// vk_icdNegotiateLoaderLayerInterfaceVersion.
#define CURRENT_LOADER_LAYER_INTERFACE_VERSION 2
#define MIN_SUPPORTED_LOADER_LAYER_INTERFACE_VERSION 1
#define VK_CURRENT_CHAIN_VERSION 1
// Typedef for use in the interfaces below
typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_GetPhysicalDeviceProcAddr)(VkInstance instance, const char* pName);
// Version negotiation values
typedef enum VkNegotiateLayerStructType {
LAYER_NEGOTIATE_UNINTIALIZED = 0,
LAYER_NEGOTIATE_INTERFACE_STRUCT = 1,
} VkNegotiateLayerStructType;
// Version negotiation structures
typedef struct VkNegotiateLayerInterface {
VkNegotiateLayerStructType sType;
void *pNext;
uint32_t loaderLayerInterfaceVersion;
PFN_vkGetInstanceProcAddr pfnGetInstanceProcAddr;
PFN_vkGetDeviceProcAddr pfnGetDeviceProcAddr;
PFN_GetPhysicalDeviceProcAddr pfnGetPhysicalDeviceProcAddr;
} VkNegotiateLayerInterface;
// Version negotiation functions
typedef VkResult (VKAPI_PTR *PFN_vkNegotiateLoaderLayerInterfaceVersion)(VkNegotiateLayerInterface *pVersionStruct);
// Function prototype for unknown physical device extension command
typedef VkResult(VKAPI_PTR *PFN_PhysDevExt)(VkPhysicalDevice phys_device);
// ------------------------------------------------------------------------------------------------
// CreateInstance and CreateDevice support structures
/* Sub type of structure for instance and device loader ext of CreateInfo.
* When sType == VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO
* or sType == VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO
* then VkLayerFunction indicates struct type pointed to by pNext
*/
typedef enum VkLayerFunction_ {
VK_LAYER_LINK_INFO = 0,
VK_LOADER_DATA_CALLBACK = 1,
VK_LOADER_LAYER_CREATE_DEVICE_CALLBACK = 2
} VkLayerFunction;
typedef struct VkLayerInstanceLink_ {
struct VkLayerInstanceLink_ *pNext;
PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr;
PFN_GetPhysicalDeviceProcAddr pfnNextGetPhysicalDeviceProcAddr;
} VkLayerInstanceLink;
/*
* When creating the device chain the loader needs to pass
* down information about it's device structure needed at
* the end of the chain. Passing the data via the
* VkLayerDeviceInfo avoids issues with finding the
* exact instance being used.
*/
typedef struct VkLayerDeviceInfo_ {
void *device_info;
PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr;
} VkLayerDeviceInfo;
typedef VkResult (VKAPI_PTR *PFN_vkSetInstanceLoaderData)(VkInstance instance,
void *object);
typedef VkResult (VKAPI_PTR *PFN_vkSetDeviceLoaderData)(VkDevice device,
void *object);
typedef VkResult (VKAPI_PTR *PFN_vkLayerCreateDevice)(VkInstance instance, VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkDevice *pDevice, PFN_vkGetInstanceProcAddr layerGIPA, PFN_vkGetDeviceProcAddr *nextGDPA);
typedef void (VKAPI_PTR *PFN_vkLayerDestroyDevice)(VkDevice physicalDevice, const VkAllocationCallbacks *pAllocator, PFN_vkDestroyDevice destroyFunction);
typedef struct {
VkStructureType sType; // VK_STRUCTURE_TYPE_LOADER_INSTANCE_CREATE_INFO
const void *pNext;
VkLayerFunction function;
union {
VkLayerInstanceLink *pLayerInfo;
PFN_vkSetInstanceLoaderData pfnSetInstanceLoaderData;
struct {
PFN_vkLayerCreateDevice pfnLayerCreateDevice;
PFN_vkLayerDestroyDevice pfnLayerDestroyDevice;
} layerDevice;
} u;
} VkLayerInstanceCreateInfo;
typedef struct VkLayerDeviceLink_ {
struct VkLayerDeviceLink_ *pNext;
PFN_vkGetInstanceProcAddr pfnNextGetInstanceProcAddr;
PFN_vkGetDeviceProcAddr pfnNextGetDeviceProcAddr;
} VkLayerDeviceLink;
typedef struct {
VkStructureType sType; // VK_STRUCTURE_TYPE_LOADER_DEVICE_CREATE_INFO
const void *pNext;
VkLayerFunction function;
union {
VkLayerDeviceLink *pLayerInfo;
PFN_vkSetDeviceLoaderData pfnSetDeviceLoaderData;
} u;
} VkLayerDeviceCreateInfo;
#ifdef __cplusplus
extern "C" {
#endif
VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct);
typedef enum VkChainType {
VK_CHAIN_TYPE_UNKNOWN = 0,
VK_CHAIN_TYPE_ENUMERATE_INSTANCE_EXTENSION_PROPERTIES = 1,
VK_CHAIN_TYPE_ENUMERATE_INSTANCE_LAYER_PROPERTIES = 2,
VK_CHAIN_TYPE_ENUMERATE_INSTANCE_VERSION = 3,
} VkChainType;
typedef struct VkChainHeader {
VkChainType type;
uint32_t version;
uint32_t size;
} VkChainHeader;
typedef struct VkEnumerateInstanceExtensionPropertiesChain {
VkChainHeader header;
VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceExtensionPropertiesChain *, const char *, uint32_t *,
VkExtensionProperties *);
const struct VkEnumerateInstanceExtensionPropertiesChain *pNextLink;
#if defined(__cplusplus)
inline VkResult CallDown(const char *pLayerName, uint32_t *pPropertyCount, VkExtensionProperties *pProperties) const {
return pfnNextLayer(pNextLink, pLayerName, pPropertyCount, pProperties);
}
#endif
} VkEnumerateInstanceExtensionPropertiesChain;
typedef struct VkEnumerateInstanceLayerPropertiesChain {
VkChainHeader header;
VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceLayerPropertiesChain *, uint32_t *, VkLayerProperties *);
const struct VkEnumerateInstanceLayerPropertiesChain *pNextLink;
#if defined(__cplusplus)
inline VkResult CallDown(uint32_t *pPropertyCount, VkLayerProperties *pProperties) const {
return pfnNextLayer(pNextLink, pPropertyCount, pProperties);
}
#endif
} VkEnumerateInstanceLayerPropertiesChain;
typedef struct VkEnumerateInstanceVersionChain {
VkChainHeader header;
VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceVersionChain *, uint32_t *);
const struct VkEnumerateInstanceVersionChain *pNextLink;
#if defined(__cplusplus)
inline VkResult CallDown(uint32_t *pApiVersion) const {
return pfnNextLayer(pNextLink, pApiVersion);
}
#endif
} VkEnumerateInstanceVersionChain;
#ifdef __cplusplus
}
#endif

View file

@ -0,0 +1,92 @@
//
// File: vk_platform.h
//
/*
** Copyright (c) 2014-2017 The Khronos Group Inc.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
#ifndef VK_PLATFORM_H_
#define VK_PLATFORM_H_
#ifdef __cplusplus
extern "C"
{
#endif // __cplusplus
/*
***************************************************************************************************
* Platform-specific directives and type declarations
***************************************************************************************************
*/
/* Platform-specific calling convention macros.
*
* Platforms should define these so that Vulkan clients call Vulkan commands
* with the same calling conventions that the Vulkan implementation expects.
*
* VKAPI_ATTR - Placed before the return type in function declarations.
* Useful for C++11 and GCC/Clang-style function attribute syntax.
* VKAPI_CALL - Placed after the return type in function declarations.
* Useful for MSVC-style calling convention syntax.
* VKAPI_PTR - Placed between the '(' and '*' in function pointer types.
*
* Function declaration: VKAPI_ATTR void VKAPI_CALL vkCommand(void);
* Function pointer type: typedef void (VKAPI_PTR *PFN_vkCommand)(void);
*/
#if defined(_WIN32)
// On Windows, Vulkan commands use the stdcall convention
#define VKAPI_ATTR
#define VKAPI_CALL __stdcall
#define VKAPI_PTR VKAPI_CALL
#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH < 7
#error "Vulkan isn't supported for the 'armeabi' NDK ABI"
#elif defined(__ANDROID__) && defined(__ARM_ARCH) && __ARM_ARCH >= 7 && defined(__ARM_32BIT_STATE)
// On Android 32-bit ARM targets, Vulkan functions use the "hardfloat"
// calling convention, i.e. float parameters are passed in registers. This
// is true even if the rest of the application passes floats on the stack,
// as it does by default when compiling for the armeabi-v7a NDK ABI.
#define VKAPI_ATTR __attribute__((pcs("aapcs-vfp")))
#define VKAPI_CALL
#define VKAPI_PTR VKAPI_ATTR
#else
// On other platforms, use the default calling convention
#define VKAPI_ATTR
#define VKAPI_CALL
#define VKAPI_PTR
#endif
#include <stddef.h>
#if !defined(VK_NO_STDINT_H)
#if defined(_MSC_VER) && (_MSC_VER < 1600)
typedef signed __int8 int8_t;
typedef unsigned __int8 uint8_t;
typedef signed __int16 int16_t;
typedef unsigned __int16 uint16_t;
typedef signed __int32 int32_t;
typedef unsigned __int32 uint32_t;
typedef signed __int64 int64_t;
typedef unsigned __int64 uint64_t;
#else
#include <stdint.h>
#endif
#endif // !defined(VK_NO_STDINT_H)
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus
#endif

View file

@ -0,0 +1,69 @@
//
// File: vk_sdk_platform.h
//
/*
* Copyright (c) 2015-2016 The Khronos Group Inc.
* Copyright (c) 2015-2016 Valve Corporation
* Copyright (c) 2015-2016 LunarG, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef VK_SDK_PLATFORM_H
#define VK_SDK_PLATFORM_H
#if defined(_WIN32)
#define NOMINMAX
#ifndef __cplusplus
#undef inline
#define inline __inline
#endif // __cplusplus
#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/)
// C99:
// Microsoft didn't implement C99 in Visual Studio; but started adding it with
// VS2013. However, VS2013 still didn't have snprintf(). The following is a
// work-around (Note: The _CRT_SECURE_NO_WARNINGS macro must be set in the
// "CMakeLists.txt" file).
// NOTE: This is fixed in Visual Studio 2015.
#define snprintf _snprintf
#endif
#define strdup _strdup
#endif // _WIN32
// Check for noexcept support using clang, with fallback to Windows or GCC version numbers
#ifndef NOEXCEPT
#if defined(__clang__)
#if __has_feature(cxx_noexcept)
#define HAS_NOEXCEPT
#endif
#else
#if defined(__GXX_EXPERIMENTAL_CXX0X__) && __GNUC__ * 10 + __GNUC_MINOR__ >= 46
#define HAS_NOEXCEPT
#else
#if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023026 && defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS
#define HAS_NOEXCEPT
#endif
#endif
#endif
#ifdef HAS_NOEXCEPT
#define NOEXCEPT noexcept
#else
#define NOEXCEPT
#endif
#endif
#endif // VK_SDK_PLATFORM_H

86
vma-rs/vulkan/vulkan.h Normal file
View file

@ -0,0 +1,86 @@
#ifndef VULKAN_H_
#define VULKAN_H_ 1
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
#include "vk_platform.h"
#include "vulkan_core.h"
#ifdef VK_USE_PLATFORM_ANDROID_KHR
#include "vulkan_android.h"
#endif
#ifdef VK_USE_PLATFORM_FUCHSIA
#include <zircon/types.h>
#include "vulkan_fuchsia.h"
#endif
#ifdef VK_USE_PLATFORM_IOS_MVK
#include "vulkan_ios.h"
#endif
#ifdef VK_USE_PLATFORM_MACOS_MVK
#include "vulkan_macos.h"
#endif
#ifdef VK_USE_PLATFORM_METAL_EXT
#include "vulkan_metal.h"
#endif
#ifdef VK_USE_PLATFORM_VI_NN
#include "vulkan_vi.h"
#endif
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
#include <wayland-client.h>
#include "vulkan_wayland.h"
#endif
#ifdef VK_USE_PLATFORM_WIN32_KHR
#include <windows.h>
#include "vulkan_win32.h"
#endif
#ifdef VK_USE_PLATFORM_XCB_KHR
#include <xcb/xcb.h>
#include "vulkan_xcb.h"
#endif
#ifdef VK_USE_PLATFORM_XLIB_KHR
#include <X11/Xlib.h>
#include "vulkan_xlib.h"
#endif
#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
#include <X11/Xlib.h>
#include <X11/extensions/Xrandr.h>
#include "vulkan_xlib_xrandr.h"
#endif
#ifdef VK_USE_PLATFORM_GGP
#include <ggp_c/vulkan_types.h>
#include "vulkan_ggp.h"
#endif
#endif // VULKAN_H_

View file

@ -0,0 +1,122 @@
#ifndef VULKAN_ANDROID_H_
#define VULKAN_ANDROID_H_ 1
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_KHR_android_surface 1
struct ANativeWindow;
#define VK_KHR_ANDROID_SURFACE_SPEC_VERSION 6
#define VK_KHR_ANDROID_SURFACE_EXTENSION_NAME "VK_KHR_android_surface"
typedef VkFlags VkAndroidSurfaceCreateFlagsKHR;
typedef struct VkAndroidSurfaceCreateInfoKHR {
VkStructureType sType;
const void* pNext;
VkAndroidSurfaceCreateFlagsKHR flags;
struct ANativeWindow* window;
} VkAndroidSurfaceCreateInfoKHR;
typedef VkResult (VKAPI_PTR *PFN_vkCreateAndroidSurfaceKHR)(VkInstance instance, const VkAndroidSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateAndroidSurfaceKHR(
VkInstance instance,
const VkAndroidSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
#endif
#define VK_ANDROID_external_memory_android_hardware_buffer 1
struct AHardwareBuffer;
#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_SPEC_VERSION 3
#define VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME "VK_ANDROID_external_memory_android_hardware_buffer"
typedef struct VkAndroidHardwareBufferUsageANDROID {
VkStructureType sType;
void* pNext;
uint64_t androidHardwareBufferUsage;
} VkAndroidHardwareBufferUsageANDROID;
typedef struct VkAndroidHardwareBufferPropertiesANDROID {
VkStructureType sType;
void* pNext;
VkDeviceSize allocationSize;
uint32_t memoryTypeBits;
} VkAndroidHardwareBufferPropertiesANDROID;
typedef struct VkAndroidHardwareBufferFormatPropertiesANDROID {
VkStructureType sType;
void* pNext;
VkFormat format;
uint64_t externalFormat;
VkFormatFeatureFlags formatFeatures;
VkComponentMapping samplerYcbcrConversionComponents;
VkSamplerYcbcrModelConversion suggestedYcbcrModel;
VkSamplerYcbcrRange suggestedYcbcrRange;
VkChromaLocation suggestedXChromaOffset;
VkChromaLocation suggestedYChromaOffset;
} VkAndroidHardwareBufferFormatPropertiesANDROID;
typedef struct VkImportAndroidHardwareBufferInfoANDROID {
VkStructureType sType;
const void* pNext;
struct AHardwareBuffer* buffer;
} VkImportAndroidHardwareBufferInfoANDROID;
typedef struct VkMemoryGetAndroidHardwareBufferInfoANDROID {
VkStructureType sType;
const void* pNext;
VkDeviceMemory memory;
} VkMemoryGetAndroidHardwareBufferInfoANDROID;
typedef struct VkExternalFormatANDROID {
VkStructureType sType;
void* pNext;
uint64_t externalFormat;
} VkExternalFormatANDROID;
typedef VkResult (VKAPI_PTR *PFN_vkGetAndroidHardwareBufferPropertiesANDROID)(VkDevice device, const struct AHardwareBuffer* buffer, VkAndroidHardwareBufferPropertiesANDROID* pProperties);
typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryAndroidHardwareBufferANDROID)(VkDevice device, const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo, struct AHardwareBuffer** pBuffer);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkGetAndroidHardwareBufferPropertiesANDROID(
VkDevice device,
const struct AHardwareBuffer* buffer,
VkAndroidHardwareBufferPropertiesANDROID* pProperties);
VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryAndroidHardwareBufferANDROID(
VkDevice device,
const VkMemoryGetAndroidHardwareBufferInfoANDROID* pInfo,
struct AHardwareBuffer** pBuffer);
#endif
#ifdef __cplusplus
}
#endif
#endif

10722
vma-rs/vulkan/vulkan_core.h Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,57 @@
#ifndef VULKAN_FUCHSIA_H_
#define VULKAN_FUCHSIA_H_ 1
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_FUCHSIA_imagepipe_surface 1
#define VK_FUCHSIA_IMAGEPIPE_SURFACE_SPEC_VERSION 1
#define VK_FUCHSIA_IMAGEPIPE_SURFACE_EXTENSION_NAME "VK_FUCHSIA_imagepipe_surface"
typedef VkFlags VkImagePipeSurfaceCreateFlagsFUCHSIA;
typedef struct VkImagePipeSurfaceCreateInfoFUCHSIA {
VkStructureType sType;
const void* pNext;
VkImagePipeSurfaceCreateFlagsFUCHSIA flags;
zx_handle_t imagePipeHandle;
} VkImagePipeSurfaceCreateInfoFUCHSIA;
typedef VkResult (VKAPI_PTR *PFN_vkCreateImagePipeSurfaceFUCHSIA)(VkInstance instance, const VkImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateImagePipeSurfaceFUCHSIA(
VkInstance instance,
const VkImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
#endif
#ifdef __cplusplus
}
#endif
#endif

View file

@ -0,0 +1,68 @@
#ifndef VULKAN_GGP_H_
#define VULKAN_GGP_H_ 1
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_GGP_stream_descriptor_surface 1
#define VK_GGP_STREAM_DESCRIPTOR_SURFACE_SPEC_VERSION 1
#define VK_GGP_STREAM_DESCRIPTOR_SURFACE_EXTENSION_NAME "VK_GGP_stream_descriptor_surface"
typedef VkFlags VkStreamDescriptorSurfaceCreateFlagsGGP;
typedef struct VkStreamDescriptorSurfaceCreateInfoGGP {
VkStructureType sType;
const void* pNext;
VkStreamDescriptorSurfaceCreateFlagsGGP flags;
GgpStreamDescriptor streamDescriptor;
} VkStreamDescriptorSurfaceCreateInfoGGP;
typedef VkResult (VKAPI_PTR *PFN_vkCreateStreamDescriptorSurfaceGGP)(VkInstance instance, const VkStreamDescriptorSurfaceCreateInfoGGP* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateStreamDescriptorSurfaceGGP(
VkInstance instance,
const VkStreamDescriptorSurfaceCreateInfoGGP* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
#endif
#define VK_GGP_frame_token 1
#define VK_GGP_FRAME_TOKEN_SPEC_VERSION 1
#define VK_GGP_FRAME_TOKEN_EXTENSION_NAME "VK_GGP_frame_token"
typedef struct VkPresentFrameTokenGGP {
VkStructureType sType;
const void* pNext;
GgpFrameToken frameToken;
} VkPresentFrameTokenGGP;
#ifdef __cplusplus
}
#endif
#endif

View file

@ -0,0 +1,57 @@
#ifndef VULKAN_IOS_H_
#define VULKAN_IOS_H_ 1
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_MVK_ios_surface 1
#define VK_MVK_IOS_SURFACE_SPEC_VERSION 2
#define VK_MVK_IOS_SURFACE_EXTENSION_NAME "VK_MVK_ios_surface"
typedef VkFlags VkIOSSurfaceCreateFlagsMVK;
typedef struct VkIOSSurfaceCreateInfoMVK {
VkStructureType sType;
const void* pNext;
VkIOSSurfaceCreateFlagsMVK flags;
const void* pView;
} VkIOSSurfaceCreateInfoMVK;
typedef VkResult (VKAPI_PTR *PFN_vkCreateIOSSurfaceMVK)(VkInstance instance, const VkIOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateIOSSurfaceMVK(
VkInstance instance,
const VkIOSSurfaceCreateInfoMVK* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
#endif
#ifdef __cplusplus
}
#endif
#endif

View file

@ -0,0 +1,57 @@
#ifndef VULKAN_MACOS_H_
#define VULKAN_MACOS_H_ 1
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_MVK_macos_surface 1
#define VK_MVK_MACOS_SURFACE_SPEC_VERSION 2
#define VK_MVK_MACOS_SURFACE_EXTENSION_NAME "VK_MVK_macos_surface"
typedef VkFlags VkMacOSSurfaceCreateFlagsMVK;
typedef struct VkMacOSSurfaceCreateInfoMVK {
VkStructureType sType;
const void* pNext;
VkMacOSSurfaceCreateFlagsMVK flags;
const void* pView;
} VkMacOSSurfaceCreateInfoMVK;
typedef VkResult (VKAPI_PTR *PFN_vkCreateMacOSSurfaceMVK)(VkInstance instance, const VkMacOSSurfaceCreateInfoMVK* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateMacOSSurfaceMVK(
VkInstance instance,
const VkMacOSSurfaceCreateInfoMVK* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
#endif
#ifdef __cplusplus
}
#endif
#endif

View file

@ -0,0 +1,64 @@
#ifndef VULKAN_METAL_H_
#define VULKAN_METAL_H_ 1
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_EXT_metal_surface 1
#ifdef __OBJC__
@class CAMetalLayer;
#else
typedef void CAMetalLayer;
#endif
#define VK_EXT_METAL_SURFACE_SPEC_VERSION 1
#define VK_EXT_METAL_SURFACE_EXTENSION_NAME "VK_EXT_metal_surface"
typedef VkFlags VkMetalSurfaceCreateFlagsEXT;
typedef struct VkMetalSurfaceCreateInfoEXT {
VkStructureType sType;
const void* pNext;
VkMetalSurfaceCreateFlagsEXT flags;
const CAMetalLayer* pLayer;
} VkMetalSurfaceCreateInfoEXT;
typedef VkResult (VKAPI_PTR *PFN_vkCreateMetalSurfaceEXT)(VkInstance instance, const VkMetalSurfaceCreateInfoEXT* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateMetalSurfaceEXT(
VkInstance instance,
const VkMetalSurfaceCreateInfoEXT* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
#endif
#ifdef __cplusplus
}
#endif
#endif

57
vma-rs/vulkan/vulkan_vi.h Normal file
View file

@ -0,0 +1,57 @@
#ifndef VULKAN_VI_H_
#define VULKAN_VI_H_ 1
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_NN_vi_surface 1
#define VK_NN_VI_SURFACE_SPEC_VERSION 1
#define VK_NN_VI_SURFACE_EXTENSION_NAME "VK_NN_vi_surface"
typedef VkFlags VkViSurfaceCreateFlagsNN;
typedef struct VkViSurfaceCreateInfoNN {
VkStructureType sType;
const void* pNext;
VkViSurfaceCreateFlagsNN flags;
void* window;
} VkViSurfaceCreateInfoNN;
typedef VkResult (VKAPI_PTR *PFN_vkCreateViSurfaceNN)(VkInstance instance, const VkViSurfaceCreateInfoNN* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateViSurfaceNN(
VkInstance instance,
const VkViSurfaceCreateInfoNN* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
#endif
#ifdef __cplusplus
}
#endif
#endif

View file

@ -0,0 +1,64 @@
#ifndef VULKAN_WAYLAND_H_
#define VULKAN_WAYLAND_H_ 1
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_KHR_wayland_surface 1
#define VK_KHR_WAYLAND_SURFACE_SPEC_VERSION 6
#define VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME "VK_KHR_wayland_surface"
typedef VkFlags VkWaylandSurfaceCreateFlagsKHR;
typedef struct VkWaylandSurfaceCreateInfoKHR {
VkStructureType sType;
const void* pNext;
VkWaylandSurfaceCreateFlagsKHR flags;
struct wl_display* display;
struct wl_surface* surface;
} VkWaylandSurfaceCreateInfoKHR;
typedef VkResult (VKAPI_PTR *PFN_vkCreateWaylandSurfaceKHR)(VkInstance instance, const VkWaylandSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, struct wl_display* display);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateWaylandSurfaceKHR(
VkInstance instance,
const VkWaylandSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWaylandPresentationSupportKHR(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
struct wl_display* display);
#endif
#ifdef __cplusplus
}
#endif
#endif

View file

@ -0,0 +1,328 @@
#ifndef VULKAN_WIN32_H_
#define VULKAN_WIN32_H_ 1
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_KHR_win32_surface 1
#define VK_KHR_WIN32_SURFACE_SPEC_VERSION 6
#define VK_KHR_WIN32_SURFACE_EXTENSION_NAME "VK_KHR_win32_surface"
typedef VkFlags VkWin32SurfaceCreateFlagsKHR;
typedef struct VkWin32SurfaceCreateInfoKHR {
VkStructureType sType;
const void* pNext;
VkWin32SurfaceCreateFlagsKHR flags;
HINSTANCE hinstance;
HWND hwnd;
} VkWin32SurfaceCreateInfoKHR;
typedef VkResult (VKAPI_PTR *PFN_vkCreateWin32SurfaceKHR)(VkInstance instance, const VkWin32SurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateWin32SurfaceKHR(
VkInstance instance,
const VkWin32SurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceWin32PresentationSupportKHR(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex);
#endif
#define VK_KHR_external_memory_win32 1
#define VK_KHR_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1
#define VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_KHR_external_memory_win32"
typedef struct VkImportMemoryWin32HandleInfoKHR {
VkStructureType sType;
const void* pNext;
VkExternalMemoryHandleTypeFlagBits handleType;
HANDLE handle;
LPCWSTR name;
} VkImportMemoryWin32HandleInfoKHR;
typedef struct VkExportMemoryWin32HandleInfoKHR {
VkStructureType sType;
const void* pNext;
const SECURITY_ATTRIBUTES* pAttributes;
DWORD dwAccess;
LPCWSTR name;
} VkExportMemoryWin32HandleInfoKHR;
typedef struct VkMemoryWin32HandlePropertiesKHR {
VkStructureType sType;
void* pNext;
uint32_t memoryTypeBits;
} VkMemoryWin32HandlePropertiesKHR;
typedef struct VkMemoryGetWin32HandleInfoKHR {
VkStructureType sType;
const void* pNext;
VkDeviceMemory memory;
VkExternalMemoryHandleTypeFlagBits handleType;
} VkMemoryGetWin32HandleInfoKHR;
typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleKHR)(VkDevice device, const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle);
typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandlePropertiesKHR)(VkDevice device, VkExternalMemoryHandleTypeFlagBits handleType, HANDLE handle, VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleKHR(
VkDevice device,
const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo,
HANDLE* pHandle);
VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandlePropertiesKHR(
VkDevice device,
VkExternalMemoryHandleTypeFlagBits handleType,
HANDLE handle,
VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties);
#endif
#define VK_KHR_win32_keyed_mutex 1
#define VK_KHR_WIN32_KEYED_MUTEX_SPEC_VERSION 1
#define VK_KHR_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_KHR_win32_keyed_mutex"
typedef struct VkWin32KeyedMutexAcquireReleaseInfoKHR {
VkStructureType sType;
const void* pNext;
uint32_t acquireCount;
const VkDeviceMemory* pAcquireSyncs;
const uint64_t* pAcquireKeys;
const uint32_t* pAcquireTimeouts;
uint32_t releaseCount;
const VkDeviceMemory* pReleaseSyncs;
const uint64_t* pReleaseKeys;
} VkWin32KeyedMutexAcquireReleaseInfoKHR;
#define VK_KHR_external_semaphore_win32 1
#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_SPEC_VERSION 1
#define VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME "VK_KHR_external_semaphore_win32"
typedef struct VkImportSemaphoreWin32HandleInfoKHR {
VkStructureType sType;
const void* pNext;
VkSemaphore semaphore;
VkSemaphoreImportFlags flags;
VkExternalSemaphoreHandleTypeFlagBits handleType;
HANDLE handle;
LPCWSTR name;
} VkImportSemaphoreWin32HandleInfoKHR;
typedef struct VkExportSemaphoreWin32HandleInfoKHR {
VkStructureType sType;
const void* pNext;
const SECURITY_ATTRIBUTES* pAttributes;
DWORD dwAccess;
LPCWSTR name;
} VkExportSemaphoreWin32HandleInfoKHR;
typedef struct VkD3D12FenceSubmitInfoKHR {
VkStructureType sType;
const void* pNext;
uint32_t waitSemaphoreValuesCount;
const uint64_t* pWaitSemaphoreValues;
uint32_t signalSemaphoreValuesCount;
const uint64_t* pSignalSemaphoreValues;
} VkD3D12FenceSubmitInfoKHR;
typedef struct VkSemaphoreGetWin32HandleInfoKHR {
VkStructureType sType;
const void* pNext;
VkSemaphore semaphore;
VkExternalSemaphoreHandleTypeFlagBits handleType;
} VkSemaphoreGetWin32HandleInfoKHR;
typedef VkResult (VKAPI_PTR *PFN_vkImportSemaphoreWin32HandleKHR)(VkDevice device, const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo);
typedef VkResult (VKAPI_PTR *PFN_vkGetSemaphoreWin32HandleKHR)(VkDevice device, const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkImportSemaphoreWin32HandleKHR(
VkDevice device,
const VkImportSemaphoreWin32HandleInfoKHR* pImportSemaphoreWin32HandleInfo);
VKAPI_ATTR VkResult VKAPI_CALL vkGetSemaphoreWin32HandleKHR(
VkDevice device,
const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo,
HANDLE* pHandle);
#endif
#define VK_KHR_external_fence_win32 1
#define VK_KHR_EXTERNAL_FENCE_WIN32_SPEC_VERSION 1
#define VK_KHR_EXTERNAL_FENCE_WIN32_EXTENSION_NAME "VK_KHR_external_fence_win32"
typedef struct VkImportFenceWin32HandleInfoKHR {
VkStructureType sType;
const void* pNext;
VkFence fence;
VkFenceImportFlags flags;
VkExternalFenceHandleTypeFlagBits handleType;
HANDLE handle;
LPCWSTR name;
} VkImportFenceWin32HandleInfoKHR;
typedef struct VkExportFenceWin32HandleInfoKHR {
VkStructureType sType;
const void* pNext;
const SECURITY_ATTRIBUTES* pAttributes;
DWORD dwAccess;
LPCWSTR name;
} VkExportFenceWin32HandleInfoKHR;
typedef struct VkFenceGetWin32HandleInfoKHR {
VkStructureType sType;
const void* pNext;
VkFence fence;
VkExternalFenceHandleTypeFlagBits handleType;
} VkFenceGetWin32HandleInfoKHR;
typedef VkResult (VKAPI_PTR *PFN_vkImportFenceWin32HandleKHR)(VkDevice device, const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo);
typedef VkResult (VKAPI_PTR *PFN_vkGetFenceWin32HandleKHR)(VkDevice device, const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo, HANDLE* pHandle);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkImportFenceWin32HandleKHR(
VkDevice device,
const VkImportFenceWin32HandleInfoKHR* pImportFenceWin32HandleInfo);
VKAPI_ATTR VkResult VKAPI_CALL vkGetFenceWin32HandleKHR(
VkDevice device,
const VkFenceGetWin32HandleInfoKHR* pGetWin32HandleInfo,
HANDLE* pHandle);
#endif
#define VK_NV_external_memory_win32 1
#define VK_NV_EXTERNAL_MEMORY_WIN32_SPEC_VERSION 1
#define VK_NV_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME "VK_NV_external_memory_win32"
typedef struct VkImportMemoryWin32HandleInfoNV {
VkStructureType sType;
const void* pNext;
VkExternalMemoryHandleTypeFlagsNV handleType;
HANDLE handle;
} VkImportMemoryWin32HandleInfoNV;
typedef struct VkExportMemoryWin32HandleInfoNV {
VkStructureType sType;
const void* pNext;
const SECURITY_ATTRIBUTES* pAttributes;
DWORD dwAccess;
} VkExportMemoryWin32HandleInfoNV;
typedef VkResult (VKAPI_PTR *PFN_vkGetMemoryWin32HandleNV)(VkDevice device, VkDeviceMemory memory, VkExternalMemoryHandleTypeFlagsNV handleType, HANDLE* pHandle);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkGetMemoryWin32HandleNV(
VkDevice device,
VkDeviceMemory memory,
VkExternalMemoryHandleTypeFlagsNV handleType,
HANDLE* pHandle);
#endif
#define VK_NV_win32_keyed_mutex 1
#define VK_NV_WIN32_KEYED_MUTEX_SPEC_VERSION 2
#define VK_NV_WIN32_KEYED_MUTEX_EXTENSION_NAME "VK_NV_win32_keyed_mutex"
typedef struct VkWin32KeyedMutexAcquireReleaseInfoNV {
VkStructureType sType;
const void* pNext;
uint32_t acquireCount;
const VkDeviceMemory* pAcquireSyncs;
const uint64_t* pAcquireKeys;
const uint32_t* pAcquireTimeoutMilliseconds;
uint32_t releaseCount;
const VkDeviceMemory* pReleaseSyncs;
const uint64_t* pReleaseKeys;
} VkWin32KeyedMutexAcquireReleaseInfoNV;
#define VK_EXT_full_screen_exclusive 1
#define VK_EXT_FULL_SCREEN_EXCLUSIVE_SPEC_VERSION 4
#define VK_EXT_FULL_SCREEN_EXCLUSIVE_EXTENSION_NAME "VK_EXT_full_screen_exclusive"
typedef enum VkFullScreenExclusiveEXT {
VK_FULL_SCREEN_EXCLUSIVE_DEFAULT_EXT = 0,
VK_FULL_SCREEN_EXCLUSIVE_ALLOWED_EXT = 1,
VK_FULL_SCREEN_EXCLUSIVE_DISALLOWED_EXT = 2,
VK_FULL_SCREEN_EXCLUSIVE_APPLICATION_CONTROLLED_EXT = 3,
VK_FULL_SCREEN_EXCLUSIVE_BEGIN_RANGE_EXT = VK_FULL_SCREEN_EXCLUSIVE_DEFAULT_EXT,
VK_FULL_SCREEN_EXCLUSIVE_END_RANGE_EXT = VK_FULL_SCREEN_EXCLUSIVE_APPLICATION_CONTROLLED_EXT,
VK_FULL_SCREEN_EXCLUSIVE_RANGE_SIZE_EXT = (VK_FULL_SCREEN_EXCLUSIVE_APPLICATION_CONTROLLED_EXT - VK_FULL_SCREEN_EXCLUSIVE_DEFAULT_EXT + 1),
VK_FULL_SCREEN_EXCLUSIVE_MAX_ENUM_EXT = 0x7FFFFFFF
} VkFullScreenExclusiveEXT;
typedef struct VkSurfaceFullScreenExclusiveInfoEXT {
VkStructureType sType;
void* pNext;
VkFullScreenExclusiveEXT fullScreenExclusive;
} VkSurfaceFullScreenExclusiveInfoEXT;
typedef struct VkSurfaceCapabilitiesFullScreenExclusiveEXT {
VkStructureType sType;
void* pNext;
VkBool32 fullScreenExclusiveSupported;
} VkSurfaceCapabilitiesFullScreenExclusiveEXT;
typedef struct VkSurfaceFullScreenExclusiveWin32InfoEXT {
VkStructureType sType;
const void* pNext;
HMONITOR hmonitor;
} VkSurfaceFullScreenExclusiveWin32InfoEXT;
typedef VkResult (VKAPI_PTR *PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT)(VkPhysicalDevice physicalDevice, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, uint32_t* pPresentModeCount, VkPresentModeKHR* pPresentModes);
typedef VkResult (VKAPI_PTR *PFN_vkAcquireFullScreenExclusiveModeEXT)(VkDevice device, VkSwapchainKHR swapchain);
typedef VkResult (VKAPI_PTR *PFN_vkReleaseFullScreenExclusiveModeEXT)(VkDevice device, VkSwapchainKHR swapchain);
typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceGroupSurfacePresentModes2EXT)(VkDevice device, const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo, VkDeviceGroupPresentModeFlagsKHR* pModes);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkGetPhysicalDeviceSurfacePresentModes2EXT(
VkPhysicalDevice physicalDevice,
const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
uint32_t* pPresentModeCount,
VkPresentModeKHR* pPresentModes);
VKAPI_ATTR VkResult VKAPI_CALL vkAcquireFullScreenExclusiveModeEXT(
VkDevice device,
VkSwapchainKHR swapchain);
VKAPI_ATTR VkResult VKAPI_CALL vkReleaseFullScreenExclusiveModeEXT(
VkDevice device,
VkSwapchainKHR swapchain);
VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceGroupSurfacePresentModes2EXT(
VkDevice device,
const VkPhysicalDeviceSurfaceInfo2KHR* pSurfaceInfo,
VkDeviceGroupPresentModeFlagsKHR* pModes);
#endif
#ifdef __cplusplus
}
#endif
#endif

View file

@ -0,0 +1,65 @@
#ifndef VULKAN_XCB_H_
#define VULKAN_XCB_H_ 1
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_KHR_xcb_surface 1
#define VK_KHR_XCB_SURFACE_SPEC_VERSION 6
#define VK_KHR_XCB_SURFACE_EXTENSION_NAME "VK_KHR_xcb_surface"
typedef VkFlags VkXcbSurfaceCreateFlagsKHR;
typedef struct VkXcbSurfaceCreateInfoKHR {
VkStructureType sType;
const void* pNext;
VkXcbSurfaceCreateFlagsKHR flags;
xcb_connection_t* connection;
xcb_window_t window;
} VkXcbSurfaceCreateInfoKHR;
typedef VkResult (VKAPI_PTR *PFN_vkCreateXcbSurfaceKHR)(VkInstance instance, const VkXcbSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, xcb_connection_t* connection, xcb_visualid_t visual_id);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateXcbSurfaceKHR(
VkInstance instance,
const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXcbPresentationSupportKHR(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
xcb_connection_t* connection,
xcb_visualid_t visual_id);
#endif
#ifdef __cplusplus
}
#endif
#endif

View file

@ -0,0 +1,65 @@
#ifndef VULKAN_XLIB_H_
#define VULKAN_XLIB_H_ 1
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_KHR_xlib_surface 1
#define VK_KHR_XLIB_SURFACE_SPEC_VERSION 6
#define VK_KHR_XLIB_SURFACE_EXTENSION_NAME "VK_KHR_xlib_surface"
typedef VkFlags VkXlibSurfaceCreateFlagsKHR;
typedef struct VkXlibSurfaceCreateInfoKHR {
VkStructureType sType;
const void* pNext;
VkXlibSurfaceCreateFlagsKHR flags;
Display* dpy;
Window window;
} VkXlibSurfaceCreateInfoKHR;
typedef VkResult (VKAPI_PTR *PFN_vkCreateXlibSurfaceKHR)(VkInstance instance, const VkXlibSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, Display* dpy, VisualID visualID);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateXlibSurfaceKHR(
VkInstance instance,
const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceXlibPresentationSupportKHR(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
Display* dpy,
VisualID visualID);
#endif
#ifdef __cplusplus
}
#endif
#endif

View file

@ -0,0 +1,55 @@
#ifndef VULKAN_XLIB_XRANDR_H_
#define VULKAN_XLIB_XRANDR_H_ 1
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
#define VK_EXT_acquire_xlib_display 1
#define VK_EXT_ACQUIRE_XLIB_DISPLAY_SPEC_VERSION 1
#define VK_EXT_ACQUIRE_XLIB_DISPLAY_EXTENSION_NAME "VK_EXT_acquire_xlib_display"
typedef VkResult (VKAPI_PTR *PFN_vkAcquireXlibDisplayEXT)(VkPhysicalDevice physicalDevice, Display* dpy, VkDisplayKHR display);
typedef VkResult (VKAPI_PTR *PFN_vkGetRandROutputDisplayEXT)(VkPhysicalDevice physicalDevice, Display* dpy, RROutput rrOutput, VkDisplayKHR* pDisplay);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkAcquireXlibDisplayEXT(
VkPhysicalDevice physicalDevice,
Display* dpy,
VkDisplayKHR display);
VKAPI_ATTR VkResult VKAPI_CALL vkGetRandROutputDisplayEXT(
VkPhysicalDevice physicalDevice,
Display* dpy,
RROutput rrOutput,
VkDisplayKHR* pDisplay);
#endif
#ifdef __cplusplus
}
#endif
#endif

7
vulkan-rs/.vscode/settings.json vendored Normal file
View file

@ -0,0 +1,7 @@
{
"workbench.colorCustomizations": {
"activityBar.background": "#3F214A",
"titleBar.activeBackground": "#592F68",
"titleBar.activeForeground": "#FDFCFE"
}
}

13
vulkan-rs/Cargo.toml Normal file
View file

@ -0,0 +1,13 @@
[package]
name = "vulkan-rs"
version = "0.1.0"
authors = ["hodasemi <superschneider@t-online.de>"]
edition = "2021"
[dependencies]
image = "0.24.5"
vulkan-sys = { path = "../vulkan-sys" }
vma-rs = { path = "../vma-rs" }
anyhow = { version = "1.0.68", features = ["backtrace"] }
cgmath = "0.18.0"
assetpath = { path = "../assetpath" }

View file

@ -0,0 +1,451 @@
use crate::prelude::*;
use anyhow::Result;
use cgmath::{Matrix, Matrix4, One};
use core::slice;
use std::{
mem,
sync::{Arc, Mutex},
};
enum AccelerationStructureBuilderData {
TopLevel(Vec<VkAccelerationStructureInstanceKHR>),
BottomLevel(Vec<VkAccelerationStructureGeometryKHR>, Vec<u32>),
}
pub struct AccelerationStructureBuilder {
flags: VkAccelerationStructureCreateFlagBitsKHR,
capture_replay_address: Option<VkDeviceAddress>,
data: AccelerationStructureBuilderData,
}
impl AccelerationStructureBuilder {
pub fn add_instance(
mut self,
blas: &Arc<AccelerationStructure>,
transform: Option<Matrix4<f32>>,
instance_flags: impl Into<VkGeometryInstanceFlagBitsKHR>,
) -> Self {
match &mut self.data {
AccelerationStructureBuilderData::TopLevel(instances) => {
let transposed: [[f32; 4]; 4] = match transform {
Some(transform) => transform.transpose(),
None => Matrix4::one(),
}
.into();
let instance = VkAccelerationStructureInstanceKHR::new(
VkTransformMatrixKHR::from(transposed),
instances.len() as u32,
0xFF,
0,
instance_flags,
blas.address(),
);
instances.push(instance);
}
AccelerationStructureBuilderData::BottomLevel(_, _) => {
panic!("can not add acceleration structures to bottom level as")
}
}
self
}
pub fn add_vertices<T: Send + Sync + 'static>(
mut self,
vertex_buffer: &Arc<Buffer<T>>,
transform: Option<Arc<Buffer<Matrix4<f32>>>>,
flags: impl Into<VkGeometryFlagBitsKHR>,
) -> Self {
match &mut self.data {
AccelerationStructureBuilderData::TopLevel(_) => {
panic!("can not add buffers to top level as")
}
AccelerationStructureBuilderData::BottomLevel(geometries, primitive_counts) => {
let geometry = VkAccelerationStructureGeometryKHR::new(
VK_GEOMETRY_TYPE_TRIANGLES_KHR,
VkAccelerationStructureGeometryDataKHR::from(
VkAccelerationStructureGeometryTrianglesDataKHR::new(
VK_FORMAT_R32G32B32_SFLOAT,
vertex_buffer.device_address().into(),
mem::size_of::<T>() as VkDeviceSize,
vertex_buffer.size() as u32,
VK_INDEX_TYPE_NONE_KHR,
VkDeviceOrHostAddressConstKHR::null(),
match &transform {
Some(transform_buffer) => transform_buffer.device_address().into(),
None => VkDeviceOrHostAddressConstKHR::null(),
},
),
),
flags,
);
let primitive_count = (vertex_buffer.size() / 3) as u32;
geometries.push(geometry);
primitive_counts.push(primitive_count);
}
}
self
}
pub fn set_flags(mut self, flags: impl Into<VkAccelerationStructureCreateFlagBitsKHR>) -> Self {
self.flags = flags.into();
self
}
pub fn set_replay_address(mut self, capture_replay_address: VkDeviceAddress) -> Self {
self.capture_replay_address = Some(capture_replay_address);
self
}
pub fn build(
self,
device: Arc<Device>,
recorder: &mut CommandBufferRecorder<'_>,
) -> Result<Arc<AccelerationStructure>> {
let build_flags = VK_BUILD_ACCELERATION_STRUCTURE_PREFER_FAST_TRACE_BIT_KHR
| VK_BUILD_ACCELERATION_STRUCTURE_ALLOW_UPDATE_BIT_KHR;
let (acceleration_structure, generation_data, result_buffer, build_sizes_info) =
match self.data {
AccelerationStructureBuilderData::TopLevel(instances) => {
let instances_buffer = Buffer::builder()
.set_data(&instances)
.set_usage(
VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR
| VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT,
)
.set_memory_usage(MemoryUsage::CpuToGpu)
.build(device.clone())?;
let device_address: VkDeviceOrHostAddressConstKHR =
instances_buffer.device_address().into();
let geometry = VkAccelerationStructureGeometryKHR::new(
VK_GEOMETRY_TYPE_INSTANCES_KHR,
VkAccelerationStructureGeometryDataKHR::from(
VkAccelerationStructureGeometryInstancesDataKHR::from(device_address),
),
VK_GEOMETRY_OPAQUE_BIT_KHR,
);
let mut geometry_info = VkAccelerationStructureBuildGeometryInfoKHR::minimal(
VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR,
build_flags,
VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR,
);
geometry_info.set_geometry(slice::from_ref(&geometry));
let max_primitive_counts = 1;
let build_sizes_info = device.get_acceleration_structure_build_sizes(
VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR,
&geometry_info,
&max_primitive_counts,
);
let result_buffer = Buffer::builder()
.set_usage(VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR)
.set_memory_usage(MemoryUsage::GpuOnly)
.set_size(build_sizes_info.accelerationStructureSize)
.build(device.clone())?;
let as_ci = VkAccelerationStructureCreateInfoKHR::new(
self.flags,
result_buffer.vk_handle(),
0,
build_sizes_info.accelerationStructureSize,
VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR,
self.capture_replay_address.unwrap_or(0),
);
(
device.create_acceleration_structure(&as_ci, None)?,
AccelerationStructureGenerationData::from((
instances,
geometry,
instances_buffer,
)),
result_buffer,
build_sizes_info,
)
}
AccelerationStructureBuilderData::BottomLevel(geometries, primitive_counts) => {
let mut geometry_info = VkAccelerationStructureBuildGeometryInfoKHR::minimal(
VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR,
build_flags,
VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR,
);
geometry_info.set_geometry(geometries.as_slice());
let max_primitive_counts = *primitive_counts
.iter()
.max()
.expect("empty primitive counts");
let build_sizes_info = device.get_acceleration_structure_build_sizes(
VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR,
&geometry_info,
&max_primitive_counts,
);
let result_buffer = Buffer::builder()
.set_usage(VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR)
.set_memory_usage(MemoryUsage::GpuOnly)
.set_size(build_sizes_info.accelerationStructureSize)
.build(device.clone())?;
let as_ci = VkAccelerationStructureCreateInfoKHR::new(
self.flags,
result_buffer.vk_handle(),
0,
build_sizes_info.accelerationStructureSize,
VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR,
self.capture_replay_address.unwrap_or(0),
);
(
device.create_acceleration_structure(&as_ci, None)?,
AccelerationStructureGenerationData::from((geometries, primitive_counts)),
result_buffer,
build_sizes_info,
)
}
};
let acceleration_structure = Arc::new(AccelerationStructure {
device: device.clone(),
acceleration_structure,
result_buffer,
scratch_buffer: Mutex::new(AccelerationStructure::create_scratch_buffer(
&device,
build_sizes_info.buildScratchSize,
device
.physical_device()
.acceleration_structure_properties()
.minAccelerationStructureScratchOffsetAlignment as VkDeviceSize,
)?),
update_scratch_buffer_size: build_sizes_info.updateScratchSize,
generation_data,
build_flags,
});
acceleration_structure.generate(
recorder,
VkAccelerationStructureKHR::NULL_HANDLE,
VK_BUILD_ACCELERATION_STRUCTURE_MODE_BUILD_KHR,
)?;
Ok(acceleration_structure)
}
}
pub struct AccelerationStructure {
device: Arc<Device>,
acceleration_structure: VkAccelerationStructureKHR,
result_buffer: Arc<Buffer<u8>>,
scratch_buffer: Mutex<Arc<Buffer<u8>>>,
update_scratch_buffer_size: VkDeviceSize,
generation_data: AccelerationStructureGenerationData,
build_flags: VkBuildAccelerationStructureFlagBitsKHR,
}
impl AccelerationStructure {
pub fn bottom_level() -> AccelerationStructureBuilder {
AccelerationStructureBuilder {
flags: 0.into(),
capture_replay_address: None,
data: AccelerationStructureBuilderData::BottomLevel(Vec::new(), Vec::new()),
}
}
pub fn top_level() -> AccelerationStructureBuilder {
AccelerationStructureBuilder {
flags: 0.into(),
capture_replay_address: None,
data: AccelerationStructureBuilderData::TopLevel(Vec::new()),
}
}
pub fn result_buffer(&self) -> &Arc<Buffer<u8>> {
&self.result_buffer
}
fn address(&self) -> VkDeviceAddress {
self.device.get_acceleration_structure_device_address(
&VkAccelerationStructureDeviceAddressInfoKHR::new(self.acceleration_structure),
)
}
pub fn update(&self, buffer_recorder: &mut CommandBufferRecorder<'_>) -> Result<()> {
*self.scratch_buffer.lock().unwrap() = Self::create_scratch_buffer(
&self.device,
self.update_scratch_buffer_size,
self.device
.physical_device()
.acceleration_structure_properties()
.minAccelerationStructureScratchOffsetAlignment as VkDeviceSize,
)?;
self.generate(
buffer_recorder,
self.acceleration_structure,
VK_BUILD_ACCELERATION_STRUCTURE_MODE_UPDATE_KHR,
)
}
fn generate(
&self,
buffer_recorder: &mut CommandBufferRecorder<'_>,
src: VkAccelerationStructureKHR,
mode: VkBuildAccelerationStructureModeKHR,
) -> Result<()> {
match &self.generation_data {
AccelerationStructureGenerationData::TopLevel(instances, geometry, _buffer) => {
let mut build_info = VkAccelerationStructureBuildGeometryInfoKHR::new(
VK_ACCELERATION_STRUCTURE_TYPE_TOP_LEVEL_KHR,
self.build_flags,
mode,
src,
self.acceleration_structure,
self.scratch_buffer.lock().unwrap().device_address().into(),
);
let geometry_slice: &[&VkAccelerationStructureGeometryKHR] = &[geometry];
build_info.set_geometry(geometry_slice);
buffer_recorder.build_acceleration_structures(
&[build_info],
&[&[VkAccelerationStructureBuildRangeInfoKHR::new(
instances.len() as u32,
0,
0,
0,
)]],
);
}
AccelerationStructureGenerationData::BottomLevel(geometries, range_infos) => {
let mut build_info = VkAccelerationStructureBuildGeometryInfoKHR::new(
VK_ACCELERATION_STRUCTURE_TYPE_BOTTOM_LEVEL_KHR,
self.build_flags,
mode,
src,
self.acceleration_structure,
self.scratch_buffer.lock().unwrap().device_address().into(),
);
build_info.set_geometry(geometries.as_slice());
buffer_recorder
.build_acceleration_structures(&[build_info], &[range_infos.as_slice()]);
}
};
buffer_recorder.memory_barrier(
VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR,
VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR,
VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR,
VK_PIPELINE_STAGE_ACCELERATION_STRUCTURE_BUILD_BIT_KHR,
);
Ok(())
}
#[inline]
fn create_scratch_buffer(
device: &Arc<Device>,
size: VkDeviceSize,
alignment: VkDeviceSize,
) -> Result<Arc<Buffer<u8>>> {
Buffer::builder()
.set_usage(
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT,
)
.set_memory_usage(MemoryUsage::GpuOnly)
.set_size(size)
.force_alignment(alignment)
.build(device.clone())
}
}
impl Drop for AccelerationStructure {
fn drop(&mut self) {
self.device
.destroy_acceleration_structure(self.acceleration_structure, None);
}
}
impl_vk_handle!(
AccelerationStructure,
VkAccelerationStructureKHR,
acceleration_structure
);
enum AccelerationStructureGenerationData {
TopLevel(
Vec<VkAccelerationStructureInstanceKHR>,
VkAccelerationStructureGeometryKHR,
Arc<Buffer<VkAccelerationStructureInstanceKHR>>,
),
BottomLevel(
Vec<VkAccelerationStructureGeometryKHR>,
Vec<VkAccelerationStructureBuildRangeInfoKHR>,
),
}
impl
From<(
Vec<VkAccelerationStructureInstanceKHR>,
VkAccelerationStructureGeometryKHR,
Arc<Buffer<VkAccelerationStructureInstanceKHR>>,
)> for AccelerationStructureGenerationData
{
fn from(
(instances, geometry, buffer): (
Vec<VkAccelerationStructureInstanceKHR>,
VkAccelerationStructureGeometryKHR,
Arc<Buffer<VkAccelerationStructureInstanceKHR>>,
),
) -> Self {
Self::TopLevel(instances, geometry, buffer)
}
}
impl From<(Vec<VkAccelerationStructureGeometryKHR>, Vec<u32>)>
for AccelerationStructureGenerationData
{
fn from(
(geometries, primitive_counts): (Vec<VkAccelerationStructureGeometryKHR>, Vec<u32>),
) -> Self {
Self::BottomLevel(
geometries,
primitive_counts
.iter()
.map(|&count| VkAccelerationStructureBuildRangeInfoKHR::new(count, 0, 0, 0))
.collect(),
)
}
}

157
vulkan-rs/src/address.rs Normal file
View file

@ -0,0 +1,157 @@
use crate::prelude::*;
use anyhow::Result;
use std::fmt;
#[derive(Clone)]
pub enum Address {
DeviceAddress(VkDeviceAddress),
HostAddressMut(VkDeviceOrHostAddressKHR),
HostAddressConst(VkDeviceOrHostAddressConstKHR),
}
impl Address {
#[inline]
pub fn device_address_mut(&self) -> Result<VkDeviceOrHostAddressKHR> {
match self {
Self::DeviceAddress(address) => Ok(VkDeviceOrHostAddressKHR::from(*address)),
Self::HostAddressMut(_) => Err(anyhow::Error::msg(
"Wrong device address format. Expected: DeviceAddress found: HostAddressMut",
)),
Self::HostAddressConst(_) => Err(anyhow::Error::msg(
"Wrong device address format. Expected: DeviceAddress found: HostAddressConst",
)),
}
}
#[inline]
pub fn device_address_const(&self) -> Result<VkDeviceOrHostAddressConstKHR> {
match self {
Self::DeviceAddress(address) => Ok(VkDeviceOrHostAddressConstKHR::from(*address)),
Self::HostAddressMut(_) => Err(anyhow::Error::msg(
"Wrong device address format. Expected: DeviceAddress found: HostAddressMut",
)),
Self::HostAddressConst(_) => Err(anyhow::Error::msg(
"Wrong device address format. Expected: DeviceAddress found: HostAddressConst",
)),
}
}
#[inline]
pub fn host_address_mut(&self) -> Result<VkDeviceOrHostAddressKHR> {
match self {
Self::DeviceAddress(_) => Err(anyhow::Error::msg(
"Wrong device address format. Expected: HostAddressMut found: DeviceAddress",
)),
Self::HostAddressMut(address) => Ok(*address),
Self::HostAddressConst(_) => Err(anyhow::Error::msg(
"Wrong device address format. Expected: HostAddressMut found: HostAddressConst",
)),
}
}
#[inline]
pub fn host_address_const(&self) -> Result<VkDeviceOrHostAddressConstKHR> {
match self {
Self::DeviceAddress(_) => Err(anyhow::Error::msg(
"Wrong device address format. Expected: HostAddressConst found: DeviceAddress",
)),
Self::HostAddressMut(_) => Err(anyhow::Error::msg(
"Wrong device address format. Expected: HostAddressConst found: HostAddressMut",
)),
Self::HostAddressConst(address) => Ok(*address),
}
}
#[inline]
pub fn is_device_address(&self) -> bool {
match self {
Self::DeviceAddress(_) => true,
_ => false,
}
}
#[inline]
pub fn is_host_address(&self) -> bool {
match self {
Self::DeviceAddress(_) => false,
Self::HostAddressConst(_) => true,
Self::HostAddressMut(_) => true,
}
}
#[inline]
pub fn is_host_address_mut(&self) -> bool {
match self {
Self::DeviceAddress(_) => false,
Self::HostAddressConst(_) => false,
Self::HostAddressMut(_) => true,
}
}
#[inline]
pub fn is_host_address_const(&self) -> bool {
match self {
Self::DeviceAddress(_) => false,
Self::HostAddressConst(_) => true,
Self::HostAddressMut(_) => false,
}
}
}
impl<'a, T> From<&'a mut T> for Address {
fn from(reference: &'a mut T) -> Self {
Self::HostAddressMut(VkDeviceOrHostAddressKHR::from(reference))
}
}
impl<'a, T> From<&'a T> for Address {
fn from(reference: &'a T) -> Self {
Self::HostAddressConst(VkDeviceOrHostAddressConstKHR::from(reference))
}
}
impl From<VkDeviceAddress> for Address {
fn from(device_address: VkDeviceAddress) -> Self {
Self::DeviceAddress(device_address)
}
}
impl Into<VkDeviceOrHostAddressKHR> for Address {
fn into(self) -> VkDeviceOrHostAddressKHR {
match self {
Self::DeviceAddress(address) => VkDeviceOrHostAddressKHR::from(address),
Self::HostAddressMut(address) => address,
Self::HostAddressConst(_) => {
panic!("Called Into<VkDeviceOrHostAddressKHR> on HostAddressConst")
}
}
}
}
impl Into<VkDeviceOrHostAddressConstKHR> for Address {
fn into(self) -> VkDeviceOrHostAddressConstKHR {
match self {
Self::DeviceAddress(address) => VkDeviceOrHostAddressConstKHR::from(address),
Self::HostAddressMut(address) => address.into(),
Self::HostAddressConst(address) => address,
}
}
}
impl Into<VkDeviceAddress> for Address {
fn into(self) -> VkDeviceAddress {
match self {
Self::DeviceAddress(address) => address,
Self::HostAddressMut(address) => address.device_address(),
Self::HostAddressConst(address) => address.device_address(),
}
}
}
impl fmt::Debug for Address {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Address").finish()
}
}

288
vulkan-rs/src/buffer.rs Normal file
View file

@ -0,0 +1,288 @@
use crate::prelude::*;
use anyhow::Result;
use std;
use std::mem;
use std::sync::Arc;
pub struct BufferBuilder<'a, T> {
flags: VkBufferCreateFlagBits,
usage: VkBufferUsageFlagBits,
memory_usage: Option<MemoryUsage>,
sharing_mode: VkSharingMode,
data: Option<&'a [T]>,
size: VkDeviceSize,
alignment: Option<VkDeviceSize>,
}
impl<'a, T> BufferBuilder<'a, T> {
pub fn set_memory_usage(mut self, usage: MemoryUsage) -> Self {
self.memory_usage = Some(usage);
self
}
pub fn set_usage(mut self, usage: impl Into<VkBufferUsageFlagBits>) -> Self {
self.usage = usage.into();
self
}
pub fn set_data(mut self, data: &'a [T]) -> Self {
self.data = Some(data);
self
}
pub fn set_size(mut self, size: VkDeviceSize) -> Self {
self.size = size;
self
}
pub fn set_sharing_mode(mut self, sharing_mode: VkSharingMode) -> Self {
self.sharing_mode = sharing_mode;
self
}
pub fn set_flags(mut self, flags: impl Into<VkBufferCreateFlagBits>) -> Self {
self.flags = flags.into();
self
}
pub fn force_alignment(mut self, alignment: VkDeviceSize) -> Self {
self.alignment = Some(alignment);
self
}
}
impl<'a, T: Clone + Send + Sync + 'static> BufferBuilder<'a, T> {
pub fn build(self, device: Arc<Device>) -> Result<Arc<Buffer<T>>> {
let size = match self.data {
Some(data) => data.len() as VkDeviceSize,
None => self.size,
};
if size == 0 {
panic!("Vulkan buffer size must not be zero");
}
// create buffer
let buffer_ci = VkBufferCreateInfo::new(
self.flags,
size * mem::size_of::<T>() as VkDeviceSize,
self.usage,
self.sharing_mode,
&[],
);
let buffer = device.create_buffer(&buffer_ci)?;
// create memory
let memory = match self.alignment {
Some(alignment) => {
let mut memory_requirements = device.buffer_memory_requirements(buffer);
memory_requirements.alignment = alignment;
Memory::forced_requirements(
&device,
memory_requirements,
buffer,
MemoryUsage::into_vma(self.memory_usage),
)?
}
None => {
Memory::buffer_memory(&device, buffer, MemoryUsage::into_vma(self.memory_usage))?
}
};
let buffer = Arc::new(Buffer {
device,
buffer,
memory,
_usage: self.usage,
_sharing_mode: self.sharing_mode,
size,
});
if let Some(data) = self.data {
buffer.fill(data)?;
}
Ok(buffer)
}
}
#[derive(Debug)]
pub struct Buffer<T> {
device: Arc<Device>,
buffer: VkBuffer,
memory: Arc<Memory<T>>,
_usage: VkBufferUsageFlagBits,
_sharing_mode: VkSharingMode,
size: VkDeviceSize,
}
impl<T: Clone + Send + Sync + 'static> Buffer<T> {
pub fn fill(&self, data: &[T]) -> Result<()> {
let mut buffer_map = self.map(data.len() as VkDeviceSize)?;
buffer_map.copy(data);
Ok(())
}
pub fn map(&self, length: VkDeviceSize) -> Result<VkMappedMemory<'_, T>> {
self.memory.map(length)
}
pub fn map_complete(&self) -> Result<VkMappedMemory<'_, T>> {
self.memory.map(self.size)
}
pub fn into_device_local(
self: &Arc<Buffer<T>>,
buffer_recorder: &mut CommandBufferRecorder<'_>,
access_mask: impl Into<VkAccessFlagBits>,
stage: impl Into<VkPipelineStageFlagBits>,
usage: impl Into<VkBufferUsageFlagBits>,
) -> Result<Arc<Buffer<T>>> {
let new_usage = usage.into() | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
let device_local_buffer = Buffer::builder()
.set_memory_usage(MemoryUsage::GpuOnly)
.set_usage(new_usage)
.set_size(self.size)
.build(self.device.clone())?;
// copy complete buffer
buffer_recorder.copy_buffer(
self,
&device_local_buffer,
&[VkBufferCopy {
srcOffset: 0,
dstOffset: 0,
size: self.byte_size(),
}],
);
// make sure buffer is copied before using it
buffer_recorder.buffer_barrier(
&device_local_buffer,
VK_ACCESS_TRANSFER_WRITE_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
access_mask,
stage,
);
Ok(device_local_buffer)
}
}
impl<T> Buffer<T> {
pub fn builder<'a>() -> BufferBuilder<'a, T> {
BufferBuilder {
flags: 0u32.into(),
usage: 0u32.into(),
memory_usage: None,
sharing_mode: VK_SHARING_MODE_EXCLUSIVE,
data: None,
size: 0,
alignment: None,
}
}
pub fn byte_size(&self) -> VkDeviceSize {
self.size * mem::size_of::<T>() as VkDeviceSize
}
pub fn size(&self) -> VkDeviceSize {
self.size
}
pub fn device_address(&self) -> Address {
self.device.get_buffer_device_address(self.buffer)
}
}
impl<T> VulkanDevice for Buffer<T> {
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl_vk_handle_t!(Buffer, VkBuffer, buffer);
impl<T> VkHandle<VkDeviceMemory> for Buffer<T> {
fn vk_handle(&self) -> VkDeviceMemory {
self.memory.vk_handle()
}
}
impl<'a, T> VkHandle<VkDeviceMemory> for &'a Buffer<T> {
fn vk_handle(&self) -> VkDeviceMemory {
self.memory.vk_handle()
}
}
impl<T> VkHandle<VkDeviceMemory> for Arc<Buffer<T>> {
fn vk_handle(&self) -> VkDeviceMemory {
self.memory.vk_handle()
}
}
impl<'a, T> VkHandle<VkDeviceMemory> for &'a Arc<Buffer<T>> {
fn vk_handle(&self) -> VkDeviceMemory {
self.memory.vk_handle()
}
}
impl<T> Drop for Buffer<T> {
fn drop(&mut self) {
self.device.destroy_buffer(self.buffer);
}
}
// use crate::{ffi::*, handle_ffi_result};
impl<T> FFIBufferTrait for Buffer<T> {
fn byte_size(&self) -> VkDeviceSize {
self.byte_size()
}
}
pub trait FFIBufferTrait {
fn byte_size(&self) -> VkDeviceSize;
}
pub struct FFIBuffer {
trait_obj: Box<dyn FFIBufferTrait>,
}
impl FFIBuffer {
fn byte_size(&self) -> VkDeviceSize {
self.trait_obj.byte_size()
}
}
#[no_mangle]
pub extern "C" fn create_buffer(_device: *const Device) -> *const FFIBuffer {
todo!()
}
#[no_mangle]
pub extern "C" fn byte_size(buffer: *const FFIBuffer) -> VkDeviceSize {
unsafe { &*buffer }.byte_size()
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,88 @@
use crate::prelude::*;
use anyhow::Result;
use std::sync::Arc;
pub(crate) struct CommandPoolBuilder {
flags: VkCommandPoolCreateFlagBits,
queue_family_index: u32,
}
impl CommandPoolBuilder {
pub(crate) fn set_flags(mut self, flags: impl Into<VkCommandPoolCreateFlagBits>) -> Self {
self.flags = flags.into();
self
}
pub(crate) fn set_queue_family_index(mut self, queue_family_index: u32) -> Self {
self.queue_family_index = queue_family_index;
self
}
pub(crate) fn build(self, device: Arc<Device>) -> Result<Arc<CommandPool>> {
let command_pool_ci = VkCommandPoolCreateInfo::new(self.flags, self.queue_family_index);
let command_pool = device.create_command_pool(&command_pool_ci)?;
Ok(Arc::new(CommandPool {
device,
command_pool,
}))
}
}
#[derive(Debug)]
pub(crate) struct CommandPool {
device: Arc<Device>,
command_pool: VkCommandPool,
}
impl CommandPool {
pub(crate) fn builder() -> CommandPoolBuilder {
CommandPoolBuilder {
flags: VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT
| VK_COMMAND_POOL_CREATE_TRANSIENT_BIT,
queue_family_index: 0,
}
}
}
impl VulkanDevice for CommandPool {
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl_vk_handle!(CommandPool, VkCommandPool, command_pool);
impl Drop for CommandPool {
fn drop(&mut self) {
self.device.destroy_command_pool(self.command_pool);
}
}
// use crate::{ffi::*, handle_ffi_result};
// #[no_mangle]
// pub extern "C" fn create_command_pool(
// flags: VkCommandPoolCreateFlagBits,
// queue_family_index: u32,
// device: *const Device,
// ) -> *const CommandPool {
// let device = unsafe { Arc::from_raw(device) };
// let pool_res = CommandPool::builder()
// .set_flags(flags)
// .set_queue_family_index(queue_family_index)
// .build(device);
// handle_ffi_result!(pool_res)
// }
// #[no_mangle]
// pub extern "C" fn destroy_command_pool(command_pool: *const CommandPool) {
// let _pool = unsafe { Arc::from_raw(command_pool) };
// }

View file

@ -0,0 +1,62 @@
use crate::prelude::*;
use anyhow::Result;
use std::sync::Arc;
pub enum DeferredOperationResult {
Success,
Pending,
OperationResult(VkResult),
}
pub struct DeferredOperation {
device: Arc<Device>,
deferred_operation: VkDeferredOperationKHR,
}
impl DeferredOperation {
pub fn new(device: Arc<Device>) -> Result<Arc<Self>> {
let deferred_operation = device.create_deferred_operation(None)?;
Ok(Arc::new(DeferredOperation {
device,
deferred_operation,
}))
}
pub fn max_concurrency(&self) -> u32 {
self.device
.get_deferred_operation_max_concurrency(self.deferred_operation)
}
pub fn result(&self) -> DeferredOperationResult {
let result = self
.device
.get_deferred_operation_result(self.deferred_operation);
match result {
VK_SUCCESS => DeferredOperationResult::Success,
VK_NOT_READY => DeferredOperationResult::Pending,
_ => DeferredOperationResult::OperationResult(result),
}
}
pub fn join(&self) -> DeferredOperationResult {
let result = self.device.deferred_operation_join(self.deferred_operation);
match result {
VK_SUCCESS => DeferredOperationResult::Success,
VK_THREAD_IDLE_KHR | VK_THREAD_DONE_KHR => DeferredOperationResult::Pending,
_ => DeferredOperationResult::OperationResult(result),
}
}
}
impl Drop for DeferredOperation {
fn drop(&mut self) {
self.device
.destroy_deferred_operation(self.deferred_operation, None);
}
}

View file

@ -0,0 +1,160 @@
use crate::prelude::*;
use anyhow::Result;
use std::sync::Arc;
pub struct DescriptorPoolBuilder {
layout: Option<Arc<DescriptorSetLayout>>,
descriptor_count: u32,
flags: VkDescriptorPoolCreateFlagBits,
}
impl DescriptorPoolBuilder {
pub fn set_flags(mut self, flags: impl Into<VkDescriptorPoolCreateFlagBits>) -> Self {
self.flags |= flags.into();
self
}
pub fn set_descriptor_set_count(mut self, count: u32) -> Self {
self.descriptor_count = count;
self
}
pub fn set_layout(mut self, layout: Arc<DescriptorSetLayout>) -> Self {
self.layout = Some(layout);
self
}
pub fn build(self, device: Arc<Device>) -> Result<Arc<DescriptorPool>> {
if cfg!(debug_assertions) {
if self.layout.is_none() {
panic!("no layout set!");
}
if self.descriptor_count == 0 {
panic!("descriptor count must be greater than 0");
}
}
let layout = self.layout.expect("descriptor set layout was not set!");
let descriptor_pool_ci =
VkDescriptorPoolCreateInfo::new(self.flags, self.descriptor_count, layout.pool_sizes());
let descriptor_pool = device.create_descriptor_pool(&descriptor_pool_ci)?;
Ok(Arc::new(DescriptorPool {
device,
descriptor_pool,
descriptor_set_layout: layout,
}))
}
}
#[derive(Debug)]
pub struct DescriptorPool {
device: Arc<Device>,
descriptor_pool: VkDescriptorPool,
descriptor_set_layout: Arc<DescriptorSetLayout>,
}
impl DescriptorPool {
pub fn builder() -> DescriptorPoolBuilder {
DescriptorPoolBuilder {
layout: None,
descriptor_count: 1,
flags: VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT.into(),
}
}
pub fn reset(&self) -> Result<()> {
self.device
.reset_descriptor_pool(self.descriptor_pool, VK_DESCRIPTOR_POOL_RESET_NULL_BIT)
}
pub fn prepare_set(self: &Arc<Self>) -> DescriptorSetBuilder {
DescriptorSet::builder(self.device.clone(), self.clone())
}
}
impl VulkanDevice for DescriptorPool {
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl_vk_handle!(DescriptorPool, VkDescriptorPool, descriptor_pool);
impl VkHandle<VkDescriptorSetLayout> for DescriptorPool {
fn vk_handle(&self) -> VkDescriptorSetLayout {
self.descriptor_set_layout.vk_handle()
}
}
impl<'a> VkHandle<VkDescriptorSetLayout> for &'a DescriptorPool {
fn vk_handle(&self) -> VkDescriptorSetLayout {
self.descriptor_set_layout.vk_handle()
}
}
impl VkHandle<VkDescriptorSetLayout> for Arc<DescriptorPool> {
fn vk_handle(&self) -> VkDescriptorSetLayout {
self.descriptor_set_layout.vk_handle()
}
}
impl<'a> VkHandle<VkDescriptorSetLayout> for &'a Arc<DescriptorPool> {
fn vk_handle(&self) -> VkDescriptorSetLayout {
self.descriptor_set_layout.vk_handle()
}
}
impl Drop for DescriptorPool {
fn drop(&mut self) {
self.device.destroy_descriptor_pool(self.descriptor_pool);
}
}
use crate::{ffi::*, handle_ffi_result};
#[no_mangle]
pub extern "C" fn create_descriptor_pool(
flags: VkDescriptorPoolCreateFlagBits,
descriptor_count: u32,
descriptor_set_layout: *const DescriptorSetLayout,
device: *const Device,
) -> *const DescriptorPool {
let device = unsafe { Arc::from_raw(device) };
let layout = unsafe { Arc::from_raw(descriptor_set_layout) };
let pool_res = DescriptorPool::builder()
.set_flags(flags)
.set_descriptor_set_count(descriptor_count)
.set_layout(layout)
.build(device);
handle_ffi_result!(pool_res)
}
#[no_mangle]
pub extern "C" fn reset_descriptor_pool(descriptor_pool: *const DescriptorPool) -> bool {
let pool = unsafe { Arc::from_raw(descriptor_pool) };
match pool.reset() {
Ok(_) => true,
Err(err) => {
update_last_error(err);
false
}
}
}
#[no_mangle]
pub extern "C" fn destroy_descriptor_pool(descriptor_pool: *const DescriptorPool) {
let _pool = unsafe { Arc::from_raw(descriptor_pool) };
}

View file

@ -0,0 +1,319 @@
use crate::prelude::*;
use anyhow::Result;
use std::any::Any;
use std::collections::HashMap;
use std::slice;
use std::sync::{Arc, Mutex};
#[derive(Debug)]
pub struct DescriptorWrite {
binding: u32,
descriptor_type: VkDescriptorType,
inner: InnerWrite,
handles: Vec<Arc<dyn Any + Send + Sync>>,
}
#[derive(Debug)]
enum InnerWrite {
Buffers(Vec<VkDescriptorBufferInfo>),
Images(Vec<VkDescriptorImageInfo>),
AS(
(
VkWriteDescriptorSetAccelerationStructureKHR,
Vec<VkAccelerationStructureKHR>,
),
),
}
impl DescriptorWrite {
pub fn uniform_buffers<T: Send + Sync + 'static>(
binding: u32,
buffers: &[&Arc<Buffer<T>>],
) -> Self {
DescriptorWrite {
binding,
descriptor_type: VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
inner: InnerWrite::Buffers(
buffers
.iter()
.map(|buffer| VkDescriptorBufferInfo {
buffer: buffer.vk_handle(),
offset: 0,
range: buffer.byte_size(),
})
.collect(),
),
handles: buffers
.iter()
.map(|b| (*b).clone() as Arc<dyn Any + Send + Sync>)
.collect(),
}
}
pub fn storage_buffers<T: Send + Sync + 'static>(
binding: u32,
buffers: &[&Arc<Buffer<T>>],
) -> Self {
DescriptorWrite {
binding,
descriptor_type: VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
inner: InnerWrite::Buffers(
buffers
.iter()
.map(|buffer| VkDescriptorBufferInfo {
buffer: buffer.vk_handle(),
offset: 0,
range: buffer.byte_size(),
})
.collect(),
),
handles: buffers
.iter()
.map(|b| (*b).clone() as Arc<dyn Any + Send + Sync>)
.collect(),
}
}
pub fn combined_samplers(binding: u32, images: &[&Arc<Image>]) -> Self {
DescriptorWrite {
binding,
descriptor_type: VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
inner: InnerWrite::Images(
images
.iter()
.map(|image| VkDescriptorImageInfo {
sampler: image
.sampler()
.as_ref()
.expect("image has no sampler attached")
.vk_handle(),
imageView: image.vk_handle(),
imageLayout: image.image_layout(),
})
.collect(),
),
handles: images
.iter()
.map(|i| (*i).clone() as Arc<dyn Any + Send + Sync>)
.collect(),
}
}
pub fn storage_images(binding: u32, images: &[&Arc<Image>]) -> Self {
DescriptorWrite {
binding,
descriptor_type: VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
inner: InnerWrite::Images(
images
.iter()
.map(|image| VkDescriptorImageInfo {
sampler: VkSampler::NULL_HANDLE,
imageView: image.vk_handle(),
imageLayout: image.image_layout(),
})
.collect(),
),
handles: images
.iter()
.map(|i| (*i).clone() as Arc<dyn Any + Send + Sync>)
.collect(),
}
}
pub fn acceleration_structures(
binding: u32,
acceleration_structures: &[&Arc<AccelerationStructure>],
) -> Self {
let vk_as: Vec<VkAccelerationStructureKHR> = acceleration_structures
.iter()
.map(|a| a.vk_handle())
.collect();
let mut write = DescriptorWrite {
binding,
descriptor_type: VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR,
inner: InnerWrite::AS((
VkWriteDescriptorSetAccelerationStructureKHR::default(),
vk_as,
)),
handles: acceleration_structures
.iter()
.map(|a| (*a).clone() as Arc<dyn Any + Send + Sync>)
.collect(),
};
if let InnerWrite::AS((vk_write_as, vk_as)) = &mut write.inner {
vk_write_as.set_acceleration_structures(vk_as);
}
write
}
pub fn change_image_layout(mut self, image_layout: VkImageLayout) -> Self {
if let InnerWrite::Images(ref mut infos) = self.inner {
for info in infos {
info.imageLayout = image_layout;
}
}
self
}
fn vk_write(&self, write: &mut VkWriteDescriptorSet) {
match &self.inner {
InnerWrite::Buffers(buffer_infos) => {
write.set_buffer_infos(buffer_infos);
}
InnerWrite::Images(image_infos) => {
write.set_image_infos(image_infos);
}
InnerWrite::AS((as_write, _)) => {
write.descriptorCount = as_write.accelerationStructureCount;
write.chain(as_write);
}
}
}
}
pub struct DescriptorSetBuilder {
device: Arc<Device>,
descriptor_pool: Arc<DescriptorPool>,
variable_desc_counts: Vec<u32>,
variable_descriptor_count: VkDescriptorSetVariableDescriptorCountAllocateInfoEXT,
}
impl DescriptorSetBuilder {
pub fn set_variable_descriptor_counts(mut self, descriptor_counts: &[u32]) -> Self {
self.variable_desc_counts = descriptor_counts.to_vec();
self
}
pub fn allocate(mut self) -> Result<Arc<DescriptorSet>> {
let layout = self.descriptor_pool.vk_handle();
let mut descriptor_set_ci = VkDescriptorSetAllocateInfo::new(
self.descriptor_pool.vk_handle(),
slice::from_ref(&layout),
);
if !self.variable_desc_counts.is_empty() {
self.variable_descriptor_count
.set_descriptor_counts(&self.variable_desc_counts);
descriptor_set_ci.chain(&self.variable_descriptor_count);
}
let descriptor_set = self.device.allocate_descriptor_sets(&descriptor_set_ci)?[0];
Ok(Arc::new(DescriptorSet {
device: self.device,
pool: self.descriptor_pool,
descriptor_set,
handles: Mutex::new(HashMap::new()),
}))
}
}
#[derive(Debug)]
pub struct DescriptorSet {
device: Arc<Device>,
pool: Arc<DescriptorPool>,
descriptor_set: VkDescriptorSet,
handles: Mutex<HashMap<u32, Vec<Arc<dyn Any + Send + Sync>>>>,
}
impl DescriptorSet {
pub(crate) fn builder(
device: Arc<Device>,
descriptor_pool: Arc<DescriptorPool>,
) -> DescriptorSetBuilder {
DescriptorSetBuilder {
device,
descriptor_pool,
variable_desc_counts: Vec::new(),
variable_descriptor_count: VkDescriptorSetVariableDescriptorCountAllocateInfoEXT::new(
&[],
),
}
}
// TODO: add update function for VkCopyDescriptorSet
pub fn update(&self, writes: &[DescriptorWrite]) -> Result<()> {
debug_assert!(!writes.is_empty());
let mut vk_writes = Vec::new();
let mut handles_lock = self.handles.lock().unwrap();
for write in writes {
let mut write_desc = VkWriteDescriptorSet::new(
self.descriptor_set,
write.binding,
0,
write.descriptor_type,
);
write.vk_write(&mut write_desc);
vk_writes.push(write_desc);
match handles_lock.get_mut(&write.binding) {
Some(val) => *val = write.handles.clone(),
None => {
handles_lock.insert(write.binding, write.handles.clone());
}
}
}
self.device
.update_descriptor_sets(vk_writes.as_slice(), &[]);
Ok(())
}
}
impl VulkanDevice for DescriptorSet {
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl_vk_handle!(DescriptorSet, VkDescriptorSet, descriptor_set);
impl VkHandle<VkDescriptorSetLayout> for DescriptorSet {
fn vk_handle(&self) -> VkDescriptorSetLayout {
self.pool.vk_handle()
}
}
impl<'a> VkHandle<VkDescriptorSetLayout> for &'a DescriptorSet {
fn vk_handle(&self) -> VkDescriptorSetLayout {
self.pool.vk_handle()
}
}
impl VkHandle<VkDescriptorSetLayout> for Arc<DescriptorSet> {
fn vk_handle(&self) -> VkDescriptorSetLayout {
self.pool.vk_handle()
}
}
impl<'a> VkHandle<VkDescriptorSetLayout> for &'a Arc<DescriptorSet> {
fn vk_handle(&self) -> VkDescriptorSetLayout {
self.pool.vk_handle()
}
}
impl Drop for DescriptorSet {
fn drop(&mut self) {
if let Err(error) = self
.device
.free_descriptor_sets(self.pool.vk_handle(), &[self.descriptor_set])
{
println!("{}", error);
}
}
}

View file

@ -0,0 +1,130 @@
use crate::prelude::*;
use anyhow::Result;
use std::sync::Arc;
pub struct DescriptorSetLayoutBuilder {
layout_bindings: Vec<VkDescriptorSetLayoutBinding>,
indexing_flags: Vec<VkDescriptorBindingFlagBitsEXT>,
flags: VkDescriptorSetLayoutCreateFlagBits,
}
impl DescriptorSetLayoutBuilder {
pub fn add_layout_binding(
mut self,
binding: u32,
descriptor_type: VkDescriptorType,
stage_flags: impl Into<VkShaderStageFlagBits>,
indexing_flags: impl Into<VkDescriptorBindingFlagBitsEXT>,
) -> Self {
self.layout_bindings.push(VkDescriptorSetLayoutBinding::new(
binding,
descriptor_type,
stage_flags,
));
let flags = indexing_flags.into();
self.indexing_flags.push(flags);
if (flags & VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT) != 0 {
self.flags |= VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT;
}
self
}
pub fn change_descriptor_count(mut self, count: u32) -> Self {
if let Some(binding) = self.layout_bindings.last_mut() {
binding.descriptorCount = count;
}
self
}
pub fn set_flags(mut self, flags: impl Into<VkDescriptorSetLayoutCreateFlagBits>) -> Self {
self.flags = flags.into();
self
}
pub fn build(self, device: Arc<Device>) -> Result<Arc<DescriptorSetLayout>> {
let mut descriptor_set_ci =
VkDescriptorSetLayoutCreateInfo::new(self.flags, &self.layout_bindings);
let binding_flags_ci =
VkDescriptorSetLayoutBindingFlagsCreateInfoEXT::new(&self.indexing_flags);
if device.enabled_extensions().descriptor_indexing {
descriptor_set_ci.chain(&binding_flags_ci);
/*
if device.enabled_extensions().maintenance3 {
let mut layout_support = VkDescriptorSetLayoutSupport::default();
let variable_support =
VkDescriptorSetVariableDescriptorCountLayoutSupportEXT::default();
layout_support.chain(&variable_support);
device.descriptor_set_layout_support(&descriptor_set_ci, &mut layout_support);
}
*/
}
let descriptor_set_layout = device.create_descriptor_set_layout(&descriptor_set_ci)?;
let mut pool_sizes = Vec::new();
for layout_binding in &self.layout_bindings {
pool_sizes.push(VkDescriptorPoolSize {
ty: layout_binding.descriptorType,
descriptorCount: layout_binding.descriptorCount,
});
}
Ok(Arc::new(DescriptorSetLayout {
device,
descriptor_set_layout,
pool_sizes,
}))
}
}
#[derive(Debug)]
pub struct DescriptorSetLayout {
device: Arc<Device>,
descriptor_set_layout: VkDescriptorSetLayout,
pool_sizes: Vec<VkDescriptorPoolSize>,
}
impl DescriptorSetLayout {
pub fn builder() -> DescriptorSetLayoutBuilder {
DescriptorSetLayoutBuilder {
layout_bindings: Vec::new(),
indexing_flags: Vec::new(),
flags: 0u32.into(),
}
}
pub fn pool_sizes(&self) -> &[VkDescriptorPoolSize] {
self.pool_sizes.as_slice()
}
}
impl VulkanDevice for DescriptorSetLayout {
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl_vk_handle!(
DescriptorSetLayout,
VkDescriptorSetLayout,
descriptor_set_layout
);
impl Drop for DescriptorSetLayout {
fn drop(&mut self) {
self.device
.destroy_descriptor_set_layout(self.descriptor_set_layout);
}
}

2943
vulkan-rs/src/device.rs Normal file

File diff suppressed because it is too large Load diff

88
vulkan-rs/src/fence.rs Normal file
View file

@ -0,0 +1,88 @@
use crate::prelude::*;
use anyhow::Result;
use std::{sync::Arc, time::Duration};
pub struct FenceBuilder {
signaled: bool,
}
impl FenceBuilder {
pub fn set_signaled(mut self, signaled: bool) -> Self {
self.signaled = signaled;
self
}
pub fn build(self, device: Arc<Device>) -> Result<Arc<Fence>> {
let flag: VkFenceCreateFlagBits = if self.signaled {
VK_FENCE_CREATE_SIGNALED_BIT.into()
} else {
0u32.into()
};
let fence_ci = VkFenceCreateInfo::new(flag);
let fence = device.create_fence(&fence_ci)?;
Ok(Arc::new(Fence { device, fence }))
}
}
#[derive(Debug)]
pub struct Fence {
device: Arc<Device>,
fence: VkFence,
}
impl Fence {
pub fn builder() -> FenceBuilder {
FenceBuilder { signaled: false }
}
pub fn wait(self: &Arc<Self>, timeout: Duration) -> Result<()> {
self.device.wait_for_fences(&[self], true, timeout)
}
pub fn reset(&self) -> bool {
self.device.reset_fences(&[self.fence]).is_ok()
}
}
impl VulkanDevice for Fence {
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl_vk_handle!(Fence, VkFence, fence);
impl Drop for Fence {
fn drop(&mut self) {
self.device.destroy_fence(self.fence);
}
}
use crate::{ffi::*, handle_ffi_result};
#[no_mangle]
pub extern "C" fn create_fence(signaled: bool, device: *const Device) -> *const Fence {
let device = unsafe { Arc::from_raw(device) };
let fence_res = Fence::builder().set_signaled(signaled).build(device);
handle_ffi_result!(fence_res)
}
#[no_mangle]
pub extern "C" fn reset_fence(fence: *const Fence) -> bool {
let fence = unsafe { Arc::from_raw(fence) };
fence.reset()
}
#[no_mangle]
pub extern "C" fn destroy_fence(fence: *const Fence) {
let _fence = unsafe { Arc::from_raw(fence) };
}

70
vulkan-rs/src/ffi.rs Normal file
View file

@ -0,0 +1,70 @@
use std::cell::RefCell;
use std::os::raw::{c_char, c_int};
#[macro_export]
macro_rules! handle_ffi_result {
($result: expr) => {
match $result {
Ok(value) => Arc::into_raw(value),
Err(error) => {
update_last_error(error);
std::ptr::null()
}
}
};
}
thread_local! {
static LAST_ERROR:RefCell<Option<Box<String>>> = RefCell::new(None);
}
pub(crate) fn update_last_error(err: anyhow::Error) {
LAST_ERROR.with(|prev| {
*prev.borrow_mut() = Some(Box::new(format!("{:?}", err)));
});
}
pub(crate) fn take_last_error() -> Option<Box<String>> {
LAST_ERROR.with(|prev| prev.borrow_mut().take())
}
#[no_mangle]
pub extern "C" fn last_error_length() -> c_int {
LAST_ERROR.with(|prev| match *prev.borrow() {
Some(ref err) => err.to_string().len() as c_int + 1,
None => 0,
})
}
#[no_mangle]
pub unsafe extern "C" fn last_error_message(buffer: *mut c_char, length: c_int) -> c_int {
if buffer.is_null() {
return -1;
}
let last_error = match take_last_error() {
Some(err) => err,
None => return 0,
};
let error_message = last_error.to_string();
let buffer = std::slice::from_raw_parts_mut(buffer as *mut u8, length as usize);
if error_message.len() >= buffer.len() {
return -1;
}
std::ptr::copy_nonoverlapping(
error_message.as_ptr(),
buffer.as_mut_ptr(),
error_message.len(),
);
// Add a trailing null so people using the string as a `char *` don't
// accidentally read into garbage.
buffer[error_message.len()] = 0;
error_message.len() as c_int
}

View file

@ -0,0 +1,145 @@
use crate::prelude::*;
use anyhow::Result;
use std::sync::Arc;
pub struct FramebufferBuilder<'a> {
render_pass: Option<&'a Arc<RenderPass>>,
attachments: Vec<&'a Arc<Image>>,
width: u32,
height: u32,
layers: u32,
}
impl<'a> FramebufferBuilder<'a> {
pub fn set_render_pass(mut self, render_pass: &'a Arc<RenderPass>) -> Self {
self.render_pass = Some(render_pass);
self
}
pub fn add_attachment(mut self, image: &'a Arc<Image>) -> Self {
self.attachments.push(image);
self
}
pub fn set_width(mut self, width: u32) -> Self {
self.width = width;
self
}
pub fn set_height(mut self, height: u32) -> Self {
self.height = height;
self
}
pub fn set_layer_count(mut self, layers: u32) -> Self {
self.layers = layers;
self
}
pub fn build(mut self, device: Arc<Device>) -> Result<Arc<Framebuffer>> {
if self.attachments.is_empty() {
panic!("no attachments added!");
}
// if width or height are not set, use first attachment as reference
// may not work, if images have different sizes
if self.width == 0 || self.height == 0 {
self.width = self.attachments[0].width();
self.height = self.attachments[0].height();
}
let mut image_views = Vec::with_capacity(self.attachments.len());
let mut images = Vec::with_capacity(self.attachments.len());
for attachment in self.attachments {
image_views.push(attachment.vk_handle());
images.push(attachment.clone());
}
let framebuffer_ci = VkFramebufferCreateInfo::new(
VK_FRAMEBUFFER_CREATE_NULL_BIT,
match self.render_pass {
Some(render_pass) => render_pass.vk_handle(),
None => panic!("no render pass set!"),
},
&image_views,
self.width,
self.height,
self.layers,
);
let framebuffer = device.create_framebuffer(&framebuffer_ci)?;
Ok(Arc::new(Framebuffer {
device,
framebuffer,
images,
width: self.width,
height: self.height,
}))
}
}
#[derive(Debug)]
pub struct Framebuffer {
device: Arc<Device>,
framebuffer: VkFramebuffer,
images: Vec<Arc<Image>>,
width: u32,
height: u32,
}
impl Framebuffer {
pub fn builder<'a>() -> FramebufferBuilder<'a> {
FramebufferBuilder {
render_pass: None,
attachments: Vec::new(),
width: 0,
height: 0,
layers: 1,
}
}
pub fn width(&self) -> u32 {
self.width
}
pub fn height(&self) -> u32 {
self.height
}
pub fn attachments(&self) -> &[Arc<Image>] {
&self.images
}
pub fn image(&self, index: usize) -> &Arc<Image> {
&self.images[index]
}
pub fn image_count(&self) -> usize {
self.images.len()
}
}
impl VulkanDevice for Framebuffer {
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl_vk_handle!(Framebuffer, VkFramebuffer, framebuffer);
impl Drop for Framebuffer {
fn drop(&mut self) {
self.device.destroy_framebuffer(self.framebuffer);
}
}

1163
vulkan-rs/src/image.rs Normal file

File diff suppressed because it is too large Load diff

1128
vulkan-rs/src/instance.rs Normal file

File diff suppressed because it is too large Load diff

59
vulkan-rs/src/lib.rs Normal file
View file

@ -0,0 +1,59 @@
//! `vulkan` module is a collection of abstractions for vulkan functions
#![deny(rust_2018_idioms)]
pub mod prelude;
#[macro_use]
mod macros;
// mod error;
// pub use error::Result;
pub mod acceleration_structure;
pub mod address;
pub mod buffer;
pub mod commandbuffer;
pub mod commandpool;
pub mod deferred_operation;
pub mod descriptorpool;
pub mod descriptorset;
pub mod descriptorsetlayout;
pub mod device;
pub mod fence;
pub mod framebuffer;
pub mod image;
pub mod instance;
pub mod memory;
pub mod physicaldevice;
pub mod pipeline;
pub mod pipelinecache;
pub mod pipelinelayout;
pub mod pipelines;
pub mod querypool;
pub mod queue;
pub mod render_target;
pub mod renderpass;
pub mod semaphore;
pub mod shadermodule;
pub mod surface;
pub mod swapchain;
pub mod ffi;
mod sampler_manager;
#[derive(Clone, Debug)]
pub enum OutOfDate<T> {
Ok(T),
OutOfDate,
TimeOut,
}
pub trait VkHandle<T> {
fn vk_handle(&self) -> T;
}
pub trait VulkanDevice {
fn device(&self) -> &std::sync::Arc<device::Device>;
}

165
vulkan-rs/src/macros.rs Normal file
View file

@ -0,0 +1,165 @@
macro_rules! impl_vk_handle {
($struct_name:ident, $target_name:ident, $value:ident) => {
impl VkHandle<$target_name> for $struct_name {
fn vk_handle(&self) -> $target_name {
self.$value
}
}
impl<'a> VkHandle<$target_name> for &'a $struct_name {
fn vk_handle(&self) -> $target_name {
self.$value
}
}
impl VkHandle<$target_name> for Arc<$struct_name> {
fn vk_handle(&self) -> $target_name {
self.$value
}
}
impl<'a> VkHandle<$target_name> for &'a Arc<$struct_name> {
fn vk_handle(&self) -> $target_name {
self.$value
}
}
};
}
macro_rules! impl_vk_handle_t {
($struct_name:ident, $target_name:ident, $value:ident) => {
impl<T> VkHandle<$target_name> for $struct_name<T> {
fn vk_handle(&self) -> $target_name {
self.$value
}
}
impl<'a, T> VkHandle<$target_name> for &'a $struct_name<T> {
fn vk_handle(&self) -> $target_name {
self.$value
}
}
impl<T> VkHandle<$target_name> for Arc<$struct_name<T>> {
fn vk_handle(&self) -> $target_name {
self.$value
}
}
impl<'a, T> VkHandle<$target_name> for &'a Arc<$struct_name<T>> {
fn vk_handle(&self) -> $target_name {
self.$value
}
}
};
}
macro_rules! Extensions {
($struct_name:ident, { $(($var:ident, $name:expr),)+ }) => {
pub struct $struct_name {
$(
pub $var: bool,
)+
raw_names: Vec<String>,
}
impl $struct_name {
pub fn into_list(self) -> Vec<VkString> {
let mut list = Vec::new();
$(
if self.$var {
list.push(VkString::new($name));
}
)+
list
}
pub fn as_list(&self) -> Vec<VkString> {
let mut list = Vec::new();
$(
if self.$var {
list.push(VkString::new($name));
}
)+
let mut raw_vk_names = self.raw_names.iter().map(|raw_name| VkString::new(raw_name)).collect();
list.append(&mut raw_vk_names);
list
}
pub fn from_list(list: &[VkString]) -> Self {
let mut extensions = Self::default();
$(
if list.contains(&VkString::new($name)) {
extensions.$var = true;
}
)+
extensions
}
pub fn check_availability(&self, other: &$struct_name) -> std::result::Result<(), Vec<String>> {
let mut missings = Vec::new();
// requested extensions is not available in other
$(
if self.$var && !other.$var {
missings.push(format!("{} is not available", $name));
}
)+
if missings.is_empty() {
Ok(())
} else {
Err(missings)
}
}
pub fn activate(&mut self, extension_name: &str) -> std::result::Result<(), String> {
if self.check(extension_name) {
return Ok(());
}
Err(format!("Extension ({}) currently not supported!", extension_name))
}
pub unsafe fn add_raw_name(&mut self, extension_name: &str) {
if self.check(extension_name) {
return;
}
println!("Add raw extension name: {}", extension_name);
self.raw_names.push(extension_name.to_string());
}
fn check(&mut self, extension_name: &str) -> bool {
$(
if extension_name == $name {
self.$var = true;
return true;
}
)+
false
}
}
impl Default for $struct_name {
fn default() -> Self {
$struct_name {
$(
$var: false,
)+
raw_names: Vec::new(),
}
}
}
};
}

179
vulkan-rs/src/memory.rs Normal file
View file

@ -0,0 +1,179 @@
use crate::prelude::*;
use anyhow::Result;
use vma_rs::prelude::*;
use std::marker::PhantomData;
use std::sync::Arc;
#[derive(Debug, Clone, PartialEq, Hash, Eq)]
pub enum MemoryUsage {
GpuOnly,
CpuOnly,
CpuToGpu,
GpuToCpu,
}
impl MemoryUsage {
pub fn into_vma(usage: Option<Self>) -> VmaMemoryUsage {
match usage {
Some(usage) => usage.into(),
None => VMA_MEMORY_USAGE_UNKNOWN,
}
}
}
impl Into<VmaMemoryUsage> for MemoryUsage {
fn into(self) -> VmaMemoryUsage {
match self {
Self::GpuOnly => VMA_MEMORY_USAGE_GPU_ONLY,
Self::CpuOnly => VMA_MEMORY_USAGE_CPU_ONLY,
Self::CpuToGpu => VMA_MEMORY_USAGE_CPU_TO_GPU,
Self::GpuToCpu => VMA_MEMORY_USAGE_GPU_TO_CPU,
}
}
}
#[derive(Debug)]
pub struct Memory<T> {
device: Arc<Device>,
allocation: Allocation,
data_type: PhantomData<T>,
}
impl<T> Memory<T> {
pub(crate) fn forced_requirements(
device: &Arc<Device>,
memory_requirements: VkMemoryRequirements,
buffer: VkBuffer,
memory_usage: VmaMemoryUsage,
) -> Result<Arc<Memory<T>>> {
let mut memory = Self::create_and_bind(device, memory_requirements, memory_usage, ())?;
if let Some(mut_memory) = Arc::get_mut(&mut memory) {
mut_memory.allocation.bind_buffer_memory(buffer)?;
}
Ok(memory)
}
pub(crate) fn buffer_memory(
device: &Arc<Device>,
buffer: VkBuffer,
memory_usage: VmaMemoryUsage,
) -> Result<Arc<Memory<T>>> {
let memory_requirements = device.buffer_memory_requirements(buffer);
Self::create_and_bind(device, memory_requirements, memory_usage, buffer)
}
pub(crate) fn image_memory(
device: &Arc<Device>,
image: VkImage,
memory_usage: VmaMemoryUsage,
) -> Result<Arc<Memory<T>>> {
let memory_requirements = device.image_memory_requirements(image);
Self::create_and_bind(device, memory_requirements, memory_usage, image)
}
pub(crate) fn vk_handle(&self) -> VkDeviceMemory {
self.allocation.device_memory()
}
}
trait MemoryBinder<T, K> {
fn create_and_bind(
device: &Arc<Device>,
memory_requirements: VkMemoryRequirements,
memory_usage: VmaMemoryUsage,
argument: T,
) -> Result<Arc<Memory<K>>>;
}
impl<K> MemoryBinder<(), K> for Memory<K> {
fn create_and_bind(
device: &Arc<Device>,
memory_requirements: VkMemoryRequirements,
memory_usage: VmaMemoryUsage,
_: (),
) -> Result<Arc<Memory<K>>> {
let allocation = device
.allocator()
.allocate()
.set_usage(memory_usage)
.set_memory_type_bits(memory_requirements.memoryTypeBits.into())
.build(&memory_requirements)?;
Ok(Arc::new(Memory {
device: device.clone(),
allocation,
data_type: PhantomData,
}))
}
}
impl<K> MemoryBinder<VkImage, K> for Memory<K> {
fn create_and_bind(
device: &Arc<Device>,
memory_requirements: VkMemoryRequirements,
memory_usage: VmaMemoryUsage,
image: VkImage,
) -> Result<Arc<Memory<K>>> {
let allocation = device
.allocator()
.allocate()
.set_usage(memory_usage)
.set_memory_type_bits(memory_requirements.memoryTypeBits.into())
.build(image)?;
Ok(Arc::new(Memory {
device: device.clone(),
allocation,
data_type: PhantomData,
}))
}
}
impl<K> MemoryBinder<VkBuffer, K> for Memory<K> {
fn create_and_bind(
device: &Arc<Device>,
memory_requirements: VkMemoryRequirements,
memory_usage: VmaMemoryUsage,
buffer: VkBuffer,
) -> Result<Arc<Memory<K>>> {
let allocation = device
.allocator()
.allocate()
.set_usage(memory_usage)
.set_memory_type_bits(memory_requirements.memoryTypeBits.into())
.build(buffer)?;
Ok(Arc::new(Memory {
device: device.clone(),
allocation,
data_type: PhantomData,
}))
}
}
impl<T> VulkanDevice for Memory<T> {
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl<T: Clone> Memory<T> {
pub fn map(&self, length: VkDeviceSize) -> Result<VkMappedMemory<'_, T>> {
self.allocation.map(length)
}
}

View file

@ -0,0 +1,337 @@
use crate::prelude::*;
use anyhow::Result;
use std::{ptr, sync::Arc};
#[derive(Debug)]
pub struct PhysicalDevice {
instance: Arc<Instance>,
physical_device: VkPhysicalDevice,
properties: VkPhysicalDeviceProperties,
features: VkPhysicalDeviceFeatures,
memory_properties: VkPhysicalDeviceMemoryProperties,
supported_extensions: Vec<VkString>,
// extension info
ray_tracing_properties: VkPhysicalDeviceRayTracingPropertiesKHR,
ray_tracing_features: VkPhysicalDeviceRayTracingFeaturesKHR,
acceleration_structure_properties: VkPhysicalDeviceAccelerationStructurePropertiesKHR,
acceleration_structure_features: VkPhysicalDeviceAccelerationStructureFeaturesKHR,
descriptor_indexing_features: VkPhysicalDeviceDescriptorIndexingFeaturesEXT,
descriptor_indexing_properties: VkPhysicalDeviceDescriptorIndexingPropertiesEXT,
buffer_device_address_features: VkPhysicalDeviceBufferDeviceAddressFeaturesEXT,
}
impl PhysicalDevice {
pub fn new(instance: Arc<Instance>) -> Result<Arc<PhysicalDevice>> {
let physical_devices = instance.enumerate_physical_devices()?;
let (mut physical_device, mut device_properties) = PhysicalDevice::find_phys_dev(
&instance,
&physical_devices,
VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU,
);
if physical_device.is_none() {
let (_physical_device, _device_properties) = PhysicalDevice::find_phys_dev(
&instance,
&physical_devices,
VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
);
if _physical_device.is_none() {
return Err(anyhow::Error::msg("Could not find an apropriate device"));
}
physical_device = _physical_device;
device_properties = _device_properties;
}
let exported_device = physical_device.unwrap();
let device_props = device_properties.unwrap();
Self::internal_new(instance, exported_device, device_props)
}
pub fn from_raw(
instance: Arc<Instance>,
physical_device: VkPhysicalDevice,
) -> Result<Arc<PhysicalDevice>> {
let properties = instance.physical_device_properties(physical_device);
Self::internal_new(instance, physical_device, properties)
}
fn internal_new(
instance: Arc<Instance>,
physical_device: VkPhysicalDevice,
properties: VkPhysicalDeviceProperties,
) -> Result<Arc<PhysicalDevice>> {
let device_features = instance.physical_device_features(physical_device);
let device_memory_properties = instance.physical_device_memory_properties(physical_device);
let extensions = Self::query_extensions(&instance, physical_device)?;
// get extension properties
let mut device_properties2 = VkPhysicalDeviceProperties2KHR::default();
// get ray tracing properties
let mut ray_tracing_properties = VkPhysicalDeviceRayTracingPropertiesKHR::default();
let mut acceleration_structure_properties =
VkPhysicalDeviceAccelerationStructurePropertiesKHR::default();
device_properties2.chain(&mut ray_tracing_properties);
device_properties2.chain(&mut acceleration_structure_properties);
// get descriptor indexing properties
let mut descriptor_indexing_properties =
VkPhysicalDeviceDescriptorIndexingPropertiesEXT::default();
device_properties2.chain(&mut descriptor_indexing_properties);
instance.physical_device_properties2(physical_device, &mut device_properties2);
// get extension features
let mut device_features2 = VkPhysicalDeviceFeatures2KHR::default();
// get ray tracing features
let mut ray_tracing_features = VkPhysicalDeviceRayTracingFeaturesKHR::default();
let mut acceleration_structure_features =
VkPhysicalDeviceAccelerationStructureFeaturesKHR::default();
device_features2.chain(&mut ray_tracing_features);
device_features2.chain(&mut acceleration_structure_features);
// get buffer device address features
let mut buffer_device_address_features =
VkPhysicalDeviceBufferDeviceAddressFeaturesEXT::default();
device_features2.chain(&mut buffer_device_address_features);
// get descriptor indexing features
let mut descriptor_indexing_features =
VkPhysicalDeviceDescriptorIndexingFeaturesEXT::default();
device_features2.chain(&mut descriptor_indexing_features);
instance.physical_device_features2(physical_device, &mut device_features2);
// clear pNext indices for later chaining
buffer_device_address_features.pNext = ptr::null_mut();
descriptor_indexing_features.pNext = ptr::null_mut();
ray_tracing_features.pNext = ptr::null_mut();
acceleration_structure_features.pNext = ptr::null_mut();
let (major, minor, patch) = VK_GET_VERSION(properties.apiVersion);
println!(
"\nVulkan Device ({}, Driver: {}, {}.{}.{})",
properties.device_name(),
properties.driverVersion,
major,
minor,
patch
);
Ok(Arc::new(PhysicalDevice {
instance,
physical_device,
properties,
features: device_features,
memory_properties: device_memory_properties,
supported_extensions: extensions,
ray_tracing_properties,
ray_tracing_features,
acceleration_structure_features,
acceleration_structure_properties,
descriptor_indexing_properties,
descriptor_indexing_features,
buffer_device_address_features,
}))
}
}
// getter
impl PhysicalDevice {
pub fn instance(&self) -> &Arc<Instance> {
&self.instance
}
pub fn features(&self) -> VkPhysicalDeviceFeatures {
self.features
}
pub fn memory_properties(&self) -> &VkPhysicalDeviceMemoryProperties {
&self.memory_properties
}
pub fn extensions(&self) -> &Vec<VkString> {
&self.supported_extensions
}
pub fn properties(&self) -> &VkPhysicalDeviceProperties {
&self.properties
}
pub fn ray_tracing_properties(&self) -> &VkPhysicalDeviceRayTracingPropertiesKHR {
&self.ray_tracing_properties
}
pub fn ray_tracing_features(&self) -> &VkPhysicalDeviceRayTracingFeaturesKHR {
&self.ray_tracing_features
}
pub fn acceleration_structure_features(
&self,
) -> &VkPhysicalDeviceAccelerationStructureFeaturesKHR {
&self.acceleration_structure_features
}
pub fn acceleration_structure_properties(
&self,
) -> &VkPhysicalDeviceAccelerationStructurePropertiesKHR {
&self.acceleration_structure_properties
}
pub fn descriptor_indexing_properties(
&self,
) -> &VkPhysicalDeviceDescriptorIndexingPropertiesEXT {
&self.descriptor_indexing_properties
}
pub fn descriptor_indexing_features(&self) -> &VkPhysicalDeviceDescriptorIndexingFeaturesEXT {
&self.descriptor_indexing_features
}
pub fn buffer_device_address_features(
&self,
) -> &VkPhysicalDeviceBufferDeviceAddressFeaturesEXT {
&self.buffer_device_address_features
}
pub fn check_optimal_format_features(
&self,
format: VkFormat,
usage: impl Into<VkImageUsageFlagBits>,
) -> bool {
let format_properties = self
.instance
.physical_device_format_properties(self.physical_device, format);
let features = Self::image_usage_into_features(usage.into());
if (format_properties.optimalTilingFeatures & features) == features {
return true;
}
false
}
pub fn check_linear_format_features(
&self,
format: VkFormat,
usage: impl Into<VkImageUsageFlagBits>,
) -> bool {
let format_properties = self
.instance
.physical_device_format_properties(self.physical_device, format);
let features = Self::image_usage_into_features(usage.into());
if (format_properties.linearTilingFeatures & features) == features {
return true;
}
false
}
pub fn check_buffer_format_features(
&self,
format: VkFormat,
features: impl Into<VkFormatFeatureFlagBits>,
) -> bool {
let format_properties = self
.instance
.physical_device_format_properties(self.physical_device, format);
let features = features.into();
if (format_properties.bufferFeatures & features) == features {
return true;
}
false
}
fn image_usage_into_features(usage: VkImageUsageFlagBits) -> VkFormatFeatureFlagBits {
let mut features = 0u32.into();
if (usage & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) == VK_IMAGE_USAGE_TRANSFER_SRC_BIT {
features |= VK_FORMAT_FEATURE_TRANSFER_SRC_BIT_KHR;
}
if (usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == VK_IMAGE_USAGE_TRANSFER_DST_BIT {
features |= VK_FORMAT_FEATURE_TRANSFER_DST_BIT_KHR;
}
if (usage & VK_IMAGE_USAGE_SAMPLED_BIT) == VK_IMAGE_USAGE_SAMPLED_BIT {
features |= VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT;
}
if (usage & VK_IMAGE_USAGE_STORAGE_BIT) == VK_IMAGE_USAGE_STORAGE_BIT {
features |= VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT;
}
if (usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) == VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT {
features |= VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT;
}
if (usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)
== VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT
{
features |= VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT;
}
features
}
}
impl_vk_handle!(PhysicalDevice, VkPhysicalDevice, physical_device);
// private
impl PhysicalDevice {
fn find_phys_dev(
instance: &Arc<Instance>,
physical_devices: &[VkPhysicalDevice],
device_type: VkPhysicalDeviceType,
) -> (Option<VkPhysicalDevice>, Option<VkPhysicalDeviceProperties>) {
for physical_device in physical_devices {
let properties = instance.physical_device_properties(*physical_device);
if properties.deviceType == device_type {
return (Some(*physical_device), Some(properties));
}
}
(None, None)
}
fn query_extensions(
instance: &Arc<Instance>,
physical_device: VkPhysicalDevice,
) -> Result<Vec<VkString>> {
let extensions = instance.enumerate_device_extensions(physical_device)?;
let mut vkstrings = Vec::new();
for extension_property in extensions {
vkstrings.push(extension_property.extension_name()?);
}
Ok(vkstrings)
}
}

85
vulkan-rs/src/pipeline.rs Normal file
View file

@ -0,0 +1,85 @@
use crate::prelude::*;
use anyhow::Result;
use std::sync::Arc;
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum PipelineType {
Graphics,
Compute,
RayTracing,
}
#[derive(Debug)]
pub struct Pipeline {
device: Arc<Device>,
pipeline_layout: Arc<PipelineLayout>,
pipeline_type: PipelineType,
pipeline: VkPipeline,
}
impl Pipeline {
pub(crate) fn new(
device: Arc<Device>,
pipeline_layout: Arc<PipelineLayout>,
pipeline_type: PipelineType,
pipeline: VkPipeline,
) -> Self {
Pipeline {
device,
pipeline_layout,
pipeline_type,
pipeline,
}
}
pub fn new_graphics() -> GraphicsPipelineBuilder {
GraphicsPipelineBuilder::default()
}
pub fn new_compute<'a>() -> ComputePipelineBuilder<'a> {
ComputePipelineBuilder::default()
}
pub fn new_ray_tracing<'a>() -> RayTracingPipelineBuilder<'a> {
RayTracingPipelineBuilder::default()
}
pub(crate) fn ray_tracing_shader_group_handles(
&self,
group_count: u32,
handle_size: u32,
) -> Result<Vec<u8>> {
if self.pipeline_type != PipelineType::RayTracing {
panic!("wrong pipeline type");
}
self.device
.get_ray_tracing_shader_group_handles(self.pipeline, 0, group_count, handle_size)
}
pub fn pipeline_layout(&self) -> &Arc<PipelineLayout> {
&self.pipeline_layout
}
pub fn pipeline_type(&self) -> PipelineType {
self.pipeline_type
}
}
impl VulkanDevice for Pipeline {
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl_vk_handle!(Pipeline, VkPipeline, pipeline);
impl Drop for Pipeline {
fn drop(&mut self) {
self.device.destroy_pipeline(self.pipeline);
}
}

View file

@ -0,0 +1,52 @@
use crate::prelude::*;
use anyhow::Result;
use std::sync::Arc;
#[derive(Debug)]
pub struct PipelineCache {
device: Arc<Device>,
pipeline_cache: VkPipelineCache,
}
impl PipelineCache {
pub fn new<T>(device: Arc<Device>, data: &T) -> Result<Arc<PipelineCache>> {
let mut pipeline_cache_ci =
VkPipelineCacheCreateInfo::new(VK_PIPELINE_CACHE_CREATE_NULL_BIT);
pipeline_cache_ci.set_data(data);
let pipeline_cache = device.create_pipeline_cache(&pipeline_cache_ci)?;
Ok(Arc::new(PipelineCache {
device,
pipeline_cache,
}))
}
pub fn get_data<T>(&self) -> Result<T> {
self.device.pipeline_cache_data(self.pipeline_cache)
}
pub fn merge(&self, src_caches: &[&Arc<PipelineCache>]) -> Result<()> {
let vk_caches: Vec<VkPipelineCache> = src_caches.iter().map(|c| c.vk_handle()).collect();
self.device
.merge_pipeline_cache(vk_caches.as_slice(), self.pipeline_cache)
}
}
impl VulkanDevice for PipelineCache {
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl_vk_handle!(PipelineCache, VkPipelineCache, pipeline_cache);
impl Drop for PipelineCache {
fn drop(&mut self) {
self.device.destroy_pipeline_cache(self.pipeline_cache);
}
}

View file

@ -0,0 +1,73 @@
use crate::prelude::*;
use anyhow::Result;
use std::sync::Arc;
#[derive(Debug)]
pub struct PipelineLayoutBuilder {
descriptor_set_layouts: Vec<VkDescriptorSetLayout>,
push_constant_ranges: Vec<VkPushConstantRange>,
}
impl PipelineLayoutBuilder {
pub fn add_descriptor_set_layout(
mut self,
descriptor_set_layout: &dyn VkHandle<VkDescriptorSetLayout>,
) -> Self {
self.descriptor_set_layouts
.push(descriptor_set_layout.vk_handle());
self
}
pub fn add_push_constant(mut self, push_constant: VkPushConstantRange) -> Self {
self.push_constant_ranges.push(push_constant);
self
}
pub fn build(self, device: Arc<Device>) -> Result<Arc<PipelineLayout>> {
let pipeline_layout_ci = VkPipelineLayoutCreateInfo::new(
VK_PIPELINE_LAYOUT_CREATE_NULL_BIT,
&self.descriptor_set_layouts,
&self.push_constant_ranges,
);
let pipeline_layout = device.create_pipeline_layout(&pipeline_layout_ci)?;
Ok(Arc::new(PipelineLayout {
device,
pipeline_layout,
}))
}
}
#[derive(Debug)]
pub struct PipelineLayout {
device: Arc<Device>,
pipeline_layout: VkPipelineLayout,
}
impl PipelineLayout {
pub fn builder() -> PipelineLayoutBuilder {
PipelineLayoutBuilder {
descriptor_set_layouts: Vec::new(),
push_constant_ranges: Vec::new(),
}
}
}
impl VulkanDevice for PipelineLayout {
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl_vk_handle!(PipelineLayout, VkPipelineLayout, pipeline_layout);
impl Drop for PipelineLayout {
fn drop(&mut self) {
self.device.destroy_pipeline_layout(self.pipeline_layout);
}
}

View file

@ -0,0 +1,84 @@
use anyhow::Result;
use crate::pipeline::PipelineType;
use crate::prelude::*;
use std::sync::Arc;
pub struct ComputePipelineBuilder<'a> {
shader_module: Option<&'a Arc<ShaderModule>>,
pipeline_cache: Option<&'a Arc<PipelineCache>>,
flags: VkPipelineCreateFlagBits,
}
impl<'a> ComputePipelineBuilder<'a> {
// TODO: add support for specialization constants
pub fn set_shader_module(mut self, shader_module: &'a Arc<ShaderModule>) -> Self {
if cfg!(debug_assertions) {
if self.shader_module.is_some() {
panic!("shader already set!");
}
if shader_module.shader_type() != ShaderType::Compute {
panic!("shader has wrong type!");
}
}
self.shader_module = Some(shader_module);
self
}
pub fn set_pipeline_cache(mut self, pipeline_cache: &'a Arc<PipelineCache>) -> Self {
self.pipeline_cache = Some(pipeline_cache);
self
}
pub fn set_flags(mut self, flags: impl Into<VkPipelineCreateFlagBits>) -> Self {
self.flags = flags.into();
self
}
pub fn build(
self,
device: &Arc<Device>,
pipeline_layout: &Arc<PipelineLayout>,
) -> Result<Arc<Pipeline>> {
let pipeline_ci = match self.shader_module {
Some(module) => VkComputePipelineCreateInfo::new(
self.flags,
module.pipeline_stage_info(),
pipeline_layout.vk_handle(),
),
None => {
return Err(anyhow::Error::msg(
"Required shader module could not be found",
))
}
};
let pipeline = device.create_compute_pipelines(
self.pipeline_cache.map(|cache| cache.vk_handle()),
&[pipeline_ci],
)?[0];
Ok(Arc::new(Pipeline::new(
device.clone(),
pipeline_layout.clone(),
PipelineType::Compute,
pipeline,
)))
}
}
impl<'a> Default for ComputePipelineBuilder<'a> {
fn default() -> Self {
ComputePipelineBuilder {
shader_module: None,
pipeline_cache: None,
flags: 0.into(),
}
}
}

View file

@ -0,0 +1,463 @@
use crate::pipeline::PipelineType;
use crate::prelude::*;
use anyhow::Result;
use std::sync::Arc;
pub struct GraphicsPipelineBuilder {
flags: VkPipelineCreateFlagBits,
pipeline_cache: Option<Arc<PipelineCache>>,
amd_rasterization_order: Option<VkPipelineRasterizationStateRasterizationOrderAMD>,
vertex_shader: Option<Arc<ShaderModule>>,
vertex_binding_description: Vec<VkVertexInputBindingDescription>,
vertex_attribute_description: Vec<VkVertexInputAttributeDescription>,
input_assembly: Option<VkPipelineInputAssemblyStateCreateInfo>,
tesselation_shader: Option<(Arc<ShaderModule>, Arc<ShaderModule>)>,
patch_control_points: u32,
geometry_shader: Option<Arc<ShaderModule>>,
fragment_shader: Option<Arc<ShaderModule>>,
viewports: Vec<VkViewport>,
scissors: Vec<VkRect2D>,
rasterization: Option<VkPipelineRasterizationStateCreateInfo>,
multisample: Option<VkPipelineMultisampleStateCreateInfo>,
depth_stencil: Option<VkPipelineDepthStencilStateCreateInfo>,
blend_attachments: Vec<VkPipelineColorBlendAttachmentState>,
color_blend: Option<VkPipelineColorBlendStateCreateInfo>,
dynamic_states: Vec<VkDynamicState>,
}
impl GraphicsPipelineBuilder {
// TODO: add support for specialization constants
pub fn set_vertex_shader(
mut self,
shader: Arc<ShaderModule>,
vertex_binding_description: Vec<VkVertexInputBindingDescription>,
vertex_attribute_description: Vec<VkVertexInputAttributeDescription>,
) -> Self {
if cfg!(debug_assertions) {
assert_eq!(shader.shader_type(), ShaderType::Vertex);
}
self.vertex_shader = Some(shader);
self.vertex_binding_description = vertex_binding_description;
self.vertex_attribute_description = vertex_attribute_description;
self
}
// TODO: add support for specialization constants
pub fn set_tesselation_shader(
mut self,
tesselation_control: Arc<ShaderModule>,
tesselation_evaluation: Arc<ShaderModule>,
patch_control_points: u32,
) -> Self {
if cfg!(debug_assertions) {
assert_eq!(
tesselation_control.shader_type(),
ShaderType::TesselationControl
);
assert_eq!(
tesselation_evaluation.shader_type(),
ShaderType::TesselationEvaluation
);
}
self.tesselation_shader = Some((tesselation_control, tesselation_evaluation));
self.patch_control_points = patch_control_points;
self
}
// TODO: add support for specialization constants
pub fn set_geometry_shader(mut self, shader: Arc<ShaderModule>) -> Self {
if cfg!(debug_assertions) {
assert_eq!(shader.shader_type(), ShaderType::Geometry);
}
self.geometry_shader = Some(shader);
self
}
// TODO: add support for specialization constants
pub fn set_fragment_shader(mut self, shader: Arc<ShaderModule>) -> Self {
if cfg!(debug_assertions) {
assert_eq!(shader.shader_type(), ShaderType::Fragment);
}
self.fragment_shader = Some(shader);
self
}
pub fn set_flags(mut self, flags: impl Into<VkPipelineCreateFlagBits>) -> Self {
self.flags = flags.into();
self
}
pub fn enable_rasterization_order(mut self, order: VkRasterizationOrderAMD) -> Self {
self.amd_rasterization_order = Some(
VkPipelineRasterizationStateRasterizationOrderAMD::new(order),
);
self
}
pub fn input_assembly(
mut self,
topology: VkPrimitiveTopology,
primitive_restart_enable: bool,
) -> Self {
self.input_assembly = Some(VkPipelineInputAssemblyStateCreateInfo::new(
0,
topology,
primitive_restart_enable,
));
self
}
pub fn default_rasterization(
mut self,
cull_mode: VkCullModeFlags,
front_face: VkFrontFace,
) -> Self {
self.rasterization = Some(VkPipelineRasterizationStateCreateInfo::new(
0,
false,
false,
VK_POLYGON_MODE_FILL,
cull_mode,
front_face,
false,
0.0,
0.0,
0.0,
1.0,
));
self
}
pub fn custom_rasterization(
mut self,
depth_clamp_enable: bool,
rasterization_discard_enable: bool,
polygon_mode: VkPolygonMode,
cull_mode: VkCullModeFlags,
front_face: VkFrontFace,
depth_bias_enable: bool,
depth_bias_constant_factor: f32,
depth_bias_clamp: f32,
depth_bias_slope_factor: f32,
line_width: f32,
) -> Self {
self.rasterization = Some(VkPipelineRasterizationStateCreateInfo::new(
0,
depth_clamp_enable,
rasterization_discard_enable,
polygon_mode,
cull_mode,
front_face,
depth_bias_enable,
depth_bias_constant_factor,
depth_bias_clamp,
depth_bias_slope_factor,
line_width,
));
self
}
pub fn default_multisample(mut self, sample_count: VkSampleCountFlags) -> Self {
self.multisample = Some(VkPipelineMultisampleStateCreateInfo::new(
0,
sample_count,
false,
0.0,
&[],
false,
false,
));
self
}
pub fn custom_multisample(
mut self,
sample_count: VkSampleCountFlags,
sample_shading_enable: bool,
min_sample_shading: f32,
sample_masks: &[VkSampleMask],
alpha_to_coverage_enable: bool,
alpha_to_one_enable: bool,
) -> Self {
self.multisample = Some(VkPipelineMultisampleStateCreateInfo::new(
0,
sample_count,
sample_shading_enable,
min_sample_shading,
sample_masks,
alpha_to_coverage_enable,
alpha_to_one_enable,
));
self
}
pub fn add_dynamic_state(mut self, dynamic_state: VkDynamicState) -> Self {
self.dynamic_states.push(dynamic_state);
self
}
pub fn default_depth_stencil(mut self, depth_test: bool, stencil_test: bool) -> Self {
let stencil_op_state = VkStencilOpState {
failOp: VK_STENCIL_OP_KEEP,
passOp: VK_STENCIL_OP_KEEP,
depthFailOp: VK_STENCIL_OP_KEEP,
compareOp: VK_COMPARE_OP_ALWAYS,
compareMask: 0,
writeMask: 0,
reference: 0,
};
self.depth_stencil = Some(VkPipelineDepthStencilStateCreateInfo::new(
VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_NULL_BIT,
depth_test,
depth_test,
VK_COMPARE_OP_LESS,
false,
stencil_test,
stencil_op_state.clone(),
stencil_op_state,
0.0,
0.0,
));
self
}
pub fn custom_depth_stencil(
mut self,
depth_test_enable: bool,
depth_write_enable: bool,
depth_compare_op: VkCompareOp,
depth_bounds_test_enable: bool,
stencil_test_enable: bool,
front: VkStencilOpState,
back: VkStencilOpState,
min_depth_bounds: f32,
max_depth_bounds: f32,
) -> Self {
self.depth_stencil = Some(VkPipelineDepthStencilStateCreateInfo::new(
0,
depth_test_enable,
depth_write_enable,
depth_compare_op,
depth_bounds_test_enable,
stencil_test_enable,
front,
back,
min_depth_bounds,
max_depth_bounds,
));
self
}
pub fn default_color_blend(
mut self,
attachments: Vec<VkPipelineColorBlendAttachmentState>,
) -> Self {
self.blend_attachments = attachments;
self.color_blend = Some(VkPipelineColorBlendStateCreateInfo::new(
0,
false,
VK_LOGIC_OP_NO_OP,
&self.blend_attachments,
[1.0, 1.0, 1.0, 1.0],
));
self
}
pub fn custom_color_blend(
mut self,
logic_op_enable: bool,
logic_op: VkLogicOp,
attachments: Vec<VkPipelineColorBlendAttachmentState>,
blend_constants: [f32; 4],
) -> Self {
self.blend_attachments = attachments;
self.color_blend = Some(VkPipelineColorBlendStateCreateInfo::new(
0,
logic_op_enable,
logic_op,
&self.blend_attachments,
blend_constants,
));
self
}
pub fn add_viewport(mut self, viewport: VkViewport) -> Self {
self.viewports.push(viewport);
self
}
pub fn add_scissor(mut self, scissor: VkRect2D) -> Self {
self.scissors.push(scissor);
self
}
pub fn build(
mut self,
device: Arc<Device>,
pipeline_layout: &Arc<PipelineLayout>,
render_pass: &Arc<RenderPass>,
subpass: u32,
) -> Result<Arc<Pipeline>> {
let mut rasterization = self.rasterization.expect("rasterization state is required");
if let Some(amd_rasterization_order) = &self.amd_rasterization_order {
if device.enabled_extensions().amd_rasterization_order {
rasterization.chain(amd_rasterization_order);
}
}
let vertex_input = VkPipelineVertexInputStateCreateInfo::new(
0,
&self.vertex_binding_description,
&self.vertex_attribute_description,
);
let mut stages = Vec::new();
if let Some(shader) = &self.vertex_shader {
stages.push(shader.pipeline_stage_info());
}
if let Some(shader) = &self.geometry_shader {
stages.push(shader.pipeline_stage_info());
}
if let Some((tesselation_control, tesselation_evaluation)) = &self.tesselation_shader {
stages.push(tesselation_control.pipeline_stage_info());
stages.push(tesselation_evaluation.pipeline_stage_info());
}
if let Some(shader) = &self.fragment_shader {
stages.push(shader.pipeline_stage_info());
}
if self.viewports.is_empty() {
self.dynamic_states.push(VK_DYNAMIC_STATE_VIEWPORT);
self.viewports.push(VkViewport::default());
}
if self.scissors.is_empty() {
self.dynamic_states.push(VK_DYNAMIC_STATE_SCISSOR);
self.scissors.push(VkRect2D::default());
}
let viewport_state =
VkPipelineViewportStateCreateInfo::new(0, &self.viewports, &self.scissors);
let tesselation = if self.patch_control_points != 0 {
Some(VkPipelineTessellationStateCreateInfo::new(
0,
self.patch_control_points,
))
} else {
None
};
let dynamic_state = VkPipelineDynamicStateCreateInfo::new(0, &self.dynamic_states);
let pipeline_ci = VkGraphicsPipelineCreateInfo::new(
self.flags,
&stages,
Some(&vertex_input),
self.input_assembly.as_ref(),
tesselation.as_ref(),
Some(&viewport_state),
&rasterization,
self.multisample.as_ref(),
self.depth_stencil.as_ref(),
self.color_blend.as_ref(),
Some(&dynamic_state),
pipeline_layout.vk_handle(),
render_pass.vk_handle(),
subpass,
);
let pipeline = device.create_graphics_pipelines(
self.pipeline_cache.map(|cache| cache.vk_handle()),
&[pipeline_ci],
)?[0];
Ok(Arc::new(Pipeline::new(
device,
pipeline_layout.clone(),
PipelineType::Graphics,
pipeline,
)))
}
}
impl Default for GraphicsPipelineBuilder {
fn default() -> Self {
GraphicsPipelineBuilder {
flags: 0.into(),
pipeline_cache: None,
amd_rasterization_order: None,
vertex_shader: None,
vertex_binding_description: Vec::new(),
vertex_attribute_description: Vec::new(),
input_assembly: None,
tesselation_shader: None,
patch_control_points: 0,
geometry_shader: None,
fragment_shader: None,
viewports: Vec::new(),
scissors: Vec::new(),
rasterization: None,
multisample: None,
depth_stencil: None,
blend_attachments: Vec::new(),
color_blend: None,
dynamic_states: Vec::new(),
}
}
}

View file

@ -0,0 +1,4 @@
pub mod compute_pipeline;
pub mod graphics_pipeline;
pub mod ray_tracing_pipeline;
pub mod shader_binding_table;

View file

@ -0,0 +1,270 @@
use crate::pipeline::PipelineType;
use crate::prelude::*;
use anyhow::Result;
use std::sync::Arc;
use super::shader_binding_table::ShaderBindingTableBuilder;
pub struct Library<'a> {
pipeline: &'a Arc<Pipeline>,
max_payload_size: u32,
max_attribute_size: u32,
}
impl<'a> Library<'a> {
pub fn new(
pipeline: &'a Arc<Pipeline>,
max_payload_size: u32,
max_attribute_size: u32,
) -> Self {
Library {
pipeline,
max_payload_size,
max_attribute_size,
}
}
}
pub struct RayTracingPipelineBuilder<'a> {
shader_modules: Vec<(Arc<ShaderModule>, Option<SpecializationConstants>)>,
shader_groups: Vec<VkRayTracingShaderGroupCreateInfoKHR>,
libraries: Vec<Library<'a>>,
dynamic_states: Vec<VkDynamicState>,
flags: VkPipelineCreateFlagBits,
max_recursion: u32,
shader_binding_table_builder: ShaderBindingTableBuilder,
pipeline_cache: Option<&'a Arc<PipelineCache>>,
}
impl<'a> RayTracingPipelineBuilder<'a> {
pub fn check_max_recursion(device: &Arc<Device>, max_recursion: u32) -> u32 {
max_recursion.min(
device
.physical_device()
.ray_tracing_properties()
.maxRayRecursionDepth,
)
}
pub fn add_dynamic_state(mut self, dynamic_state: VkDynamicState) -> Self {
self.dynamic_states.push(dynamic_state);
self
}
pub fn set_pipeline_cache(mut self, pipeline_cache: &'a Arc<PipelineCache>) -> Self {
self.pipeline_cache = Some(pipeline_cache);
self
}
pub fn set_flags(mut self, flags: impl Into<VkPipelineCreateFlagBits>) -> Self {
self.flags = flags.into();
self
}
pub fn add_library(mut self, library: Library<'a>) -> Self {
self.libraries.push(library);
self
}
pub fn add_shader(
mut self,
shader_module: Arc<ShaderModule>,
data: Option<Vec<u8>>,
specialization_constants: Option<SpecializationConstants>,
) -> Self {
self.shader_binding_table_builder = match shader_module.shader_type() {
ShaderType::RayGeneration => self
.shader_binding_table_builder
.add_ray_gen_program(self.shader_groups.len() as u32, data),
ShaderType::Miss => self
.shader_binding_table_builder
.add_miss_program(self.shader_groups.len() as u32, data),
_ => panic!(
"unsupported shader type: {:?}, expected RayGen or Miss Shader",
shader_module.shader_type()
),
};
let shader_index = self.shader_modules.len();
self.shader_modules
.push((shader_module, specialization_constants));
self.shader_groups
.push(VkRayTracingShaderGroupCreateInfoKHR::new(
VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR,
shader_index as u32,
VK_SHADER_UNUSED_KHR,
VK_SHADER_UNUSED_KHR,
VK_SHADER_UNUSED_KHR,
));
self
}
pub fn add_hit_shaders(
mut self,
shader_modules: impl IntoIterator<Item = (Arc<ShaderModule>, Option<SpecializationConstants>)>,
data: Option<Vec<u8>>,
) -> Self {
let mut group = VkRayTracingShaderGroupCreateInfoKHR::new(
VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR,
VK_SHADER_UNUSED_KHR,
VK_SHADER_UNUSED_KHR,
VK_SHADER_UNUSED_KHR,
VK_SHADER_UNUSED_KHR,
);
for (shader_module, specialization_constant) in shader_modules.into_iter() {
let shader_index = self.shader_modules.len() as u32;
match shader_module.shader_type() {
ShaderType::AnyHit => {
// sanity check
if cfg!(debug_assertions) && group.anyHitShader != VK_SHADER_UNUSED_KHR {
panic!("any hit shader already used in current hit group");
}
group.anyHitShader = shader_index;
}
ShaderType::ClosestHit => {
// sanity check
if cfg!(debug_assertions) && group.closestHitShader != VK_SHADER_UNUSED_KHR {
panic!("closest hit shader already used in current hit group");
}
group.closestHitShader = shader_index;
}
ShaderType::Intersection => {
// sanity check
if cfg!(debug_assertions) && group.intersectionShader != VK_SHADER_UNUSED_KHR {
panic!("intersection shader already used in current hit group");
}
group.intersectionShader = shader_index;
group.r#type = VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR;
}
_ => panic!("unsupported shader type: {:?}, expected AnyHit, ClosestHit or Intersection Shader", shader_module.shader_type()),
}
self.shader_modules
.push((shader_module, specialization_constant));
}
self.shader_binding_table_builder = self
.shader_binding_table_builder
.add_hit_group_program(self.shader_groups.len() as u32, data);
self.shader_groups.push(group);
self
}
pub fn max_recursion_depth(mut self, max_recursion_depth: u32) -> Self {
self.max_recursion = max_recursion_depth;
self
}
pub fn build(
mut self,
device: Arc<Device>,
pipeline_layout: &Arc<PipelineLayout>,
) -> Result<(Arc<Pipeline>, ShaderBindingTable)> {
let shader_stages: Vec<VkPipelineShaderStageCreateInfo> = self
.shader_modules
.iter()
.map(|(shader, specialization_constant)| {
let mut stage_info = shader.pipeline_stage_info();
if let Some(specialization_constant) = specialization_constant {
stage_info.set_specialization_info(specialization_constant.vk_handle());
}
stage_info
})
.collect();
// check that we dont exceed the gpu's capabilities
let max_recursion = Self::check_max_recursion(&device, self.max_recursion);
let pipeline = {
let mut libraries = Vec::with_capacity(self.libraries.len());
let mut library_interface = VkRayTracingPipelineInterfaceCreateInfoKHR::new(0, 0);
for library in self.libraries.iter() {
libraries.push(library.pipeline.vk_handle());
library_interface.maxPipelineRayPayloadSize = library_interface
.maxPipelineRayPayloadSize
.max(library.max_payload_size);
library_interface.maxPipelineRayHitAttributeSize = library_interface
.maxPipelineRayHitAttributeSize
.max(library.max_attribute_size);
}
let lib_create_info = VkPipelineLibraryCreateInfoKHR::new(&libraries);
let dynamic_states = VkPipelineDynamicStateCreateInfo::new(0, &self.dynamic_states);
device.create_ray_tracing_pipelines(
None,
self.pipeline_cache.map(|cache| cache.vk_handle()),
&[VkRayTracingPipelineCreateInfoKHR::new(
self.flags,
&shader_stages, // stages
&self.shader_groups, // groups
max_recursion,
&lib_create_info, // libraries
&library_interface, // library interfaces
&dynamic_states,
pipeline_layout.vk_handle(),
)],
None,
)?[0]
};
let pipeline = Arc::new(Pipeline::new(
device.clone(),
pipeline_layout.clone(),
PipelineType::RayTracing,
pipeline,
));
let sbt = self
.shader_binding_table_builder
.build(&device, &pipeline)?;
Ok((pipeline, sbt))
}
}
impl<'a> Default for RayTracingPipelineBuilder<'a> {
fn default() -> Self {
RayTracingPipelineBuilder {
shader_modules: Vec::new(),
shader_groups: Vec::new(),
flags: 0.into(),
max_recursion: 2,
libraries: Vec::new(),
dynamic_states: Vec::new(),
shader_binding_table_builder: ShaderBindingTableBuilder::new(),
pipeline_cache: None,
}
}
}

View file

@ -0,0 +1,289 @@
use crate::prelude::*;
use anyhow::Result;
use std::sync::Arc;
struct ShaderBindingTableEntry {
group_index: u32,
inline_data: Vec<u8>,
}
pub(crate) struct ShaderBindingTableBuilder {
ray_gen_entries: Vec<ShaderBindingTableEntry>,
miss_entries: Vec<ShaderBindingTableEntry>,
hit_group_entries: Vec<ShaderBindingTableEntry>,
}
pub struct ShaderBindingTable {
_sbt_buffer: Arc<Buffer<u8>>,
raygen_shader_binding_table: VkStridedDeviceAddressRegionKHR,
miss_shader_binding_table: VkStridedDeviceAddressRegionKHR,
hit_shader_binding_table: VkStridedDeviceAddressRegionKHR,
callable_shader_binding_table: VkStridedDeviceAddressRegionKHR,
}
impl ShaderBindingTable {
pub fn raygen_shader_binding_table(&self) -> &VkStridedDeviceAddressRegionKHR {
&self.raygen_shader_binding_table
}
pub fn miss_shader_binding_table(&self) -> &VkStridedDeviceAddressRegionKHR {
&self.miss_shader_binding_table
}
pub fn hit_shader_binding_table(&self) -> &VkStridedDeviceAddressRegionKHR {
&self.hit_shader_binding_table
}
pub fn callable_shader_binding_table(&self) -> &VkStridedDeviceAddressRegionKHR {
&self.callable_shader_binding_table
}
fn create(
sbt_buffer: Arc<Buffer<u8>>,
ray_gen_entry_size: VkDeviceSize,
ray_gen_entry_count: VkDeviceSize,
miss_offset: VkDeviceSize,
miss_entry_size: VkDeviceSize,
miss_entry_count: VkDeviceSize,
hit_group_offset: VkDeviceSize,
hit_group_entry_size: VkDeviceSize,
hit_group_entry_count: VkDeviceSize,
) -> Self {
let device_address: VkDeviceAddress = sbt_buffer.device_address().into();
ShaderBindingTable {
raygen_shader_binding_table: VkStridedDeviceAddressRegionKHR {
deviceAddress: device_address,
stride: ray_gen_entry_size,
size: ray_gen_entry_size * ray_gen_entry_count,
},
miss_shader_binding_table: VkStridedDeviceAddressRegionKHR {
deviceAddress: device_address + miss_offset,
stride: miss_entry_size,
size: miss_entry_size * miss_entry_count,
},
hit_shader_binding_table: VkStridedDeviceAddressRegionKHR {
deviceAddress: device_address + hit_group_offset,
stride: hit_group_entry_size,
size: hit_group_entry_size * hit_group_entry_count,
},
callable_shader_binding_table: VkStridedDeviceAddressRegionKHR {
deviceAddress: 0,
stride: 0,
size: 0,
},
_sbt_buffer: sbt_buffer,
}
}
}
impl ShaderBindingTableBuilder {
pub(crate) fn new() -> ShaderBindingTableBuilder {
ShaderBindingTableBuilder {
ray_gen_entries: Vec::new(),
miss_entries: Vec::new(),
hit_group_entries: Vec::new(),
}
}
pub(crate) fn add_ray_gen_program(mut self, group_index: u32, data: Option<Vec<u8>>) -> Self {
self.ray_gen_entries.push(ShaderBindingTableEntry {
group_index,
inline_data: match data {
Some(data) => data,
None => Vec::new(),
},
});
self
}
pub(crate) fn add_miss_program(mut self, group_index: u32, data: Option<Vec<u8>>) -> Self {
self.miss_entries.push(ShaderBindingTableEntry {
group_index,
inline_data: match data {
Some(data) => data,
None => Vec::new(),
},
});
self
}
pub(crate) fn add_hit_group_program(mut self, group_index: u32, data: Option<Vec<u8>>) -> Self {
self.hit_group_entries.push(ShaderBindingTableEntry {
group_index,
inline_data: match data {
Some(data) => data,
None => Vec::new(),
},
});
self
}
pub(crate) fn build(
&mut self,
device: &Arc<Device>,
pipeline: &Arc<Pipeline>,
) -> Result<ShaderBindingTable> {
let ray_tracing_properties = device.physical_device().ray_tracing_properties();
let prog_id_size = ray_tracing_properties.shaderGroupHandleSize;
let base_alignment = ray_tracing_properties.shaderGroupBaseAlignment;
let ray_gen_entry_size =
Self::entry_size(prog_id_size, &self.ray_gen_entries, prog_id_size as u64);
let miss_entry_size =
Self::entry_size(prog_id_size, &self.miss_entries, prog_id_size as u64);
let hit_group_entry_size =
Self::entry_size(prog_id_size, &self.hit_group_entries, prog_id_size as u64);
let sbt_size = (ray_gen_entry_size * self.ray_gen_entries.len() as VkDeviceSize)
.max(base_alignment as VkDeviceSize)
+ (miss_entry_size * self.miss_entries.len() as VkDeviceSize)
.max(base_alignment as VkDeviceSize)
+ hit_group_entry_size * self.hit_group_entries.len() as VkDeviceSize;
let group_count =
self.ray_gen_entries.len() + self.miss_entries.len() + self.hit_group_entries.len();
let shader_handle_storage =
pipeline.ray_tracing_shader_group_handles(group_count as u32, prog_id_size)?;
let mut sbt_data = vec![0; sbt_size as usize];
let mut offset = 0;
Self::copy_shader_data(
&mut sbt_data,
prog_id_size,
&mut offset,
&self.ray_gen_entries,
ray_gen_entry_size,
base_alignment,
&shader_handle_storage,
);
let miss_offset = offset;
Self::copy_shader_data(
&mut sbt_data,
prog_id_size,
&mut offset,
&self.miss_entries,
miss_entry_size,
base_alignment,
&shader_handle_storage,
);
let hit_group_offset = offset;
Self::copy_shader_data(
&mut sbt_data,
prog_id_size,
&mut offset,
&self.hit_group_entries,
hit_group_entry_size,
base_alignment,
&shader_handle_storage,
);
let sbt_buffer = Buffer::builder()
.set_usage(
VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR
| VK_BUFFER_USAGE_TRANSFER_SRC_BIT
| VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT,
)
.set_memory_usage(MemoryUsage::CpuToGpu)
.set_data(&sbt_data)
.build(device.clone())?;
Ok(ShaderBindingTable::create(
sbt_buffer,
ray_gen_entry_size,
self.ray_gen_entries.len() as VkDeviceSize,
miss_offset,
miss_entry_size,
self.miss_entries.len() as VkDeviceSize,
hit_group_offset,
hit_group_entry_size,
self.hit_group_entries.len() as VkDeviceSize,
))
}
}
impl ShaderBindingTableBuilder {
#[inline]
fn entry_size(
prog_id_size: u32,
entries: &[ShaderBindingTableEntry],
padding: u64,
) -> VkDeviceSize {
let mut max_args = 0;
for entry in entries {
max_args = max_args.max(entry.inline_data.len());
}
let mut entry_size = prog_id_size as VkDeviceSize + max_args as VkDeviceSize;
entry_size = Self::round_up(entry_size, padding);
entry_size
}
#[inline]
fn round_up(source: u64, value: u64) -> u64 {
((source) + (value) - 1) & !((value) - 1)
}
#[inline]
fn copy_shader_data(
sbt_data: &mut Vec<u8>,
prog_id_size: u32,
offset: &mut VkDeviceSize,
entries: &[ShaderBindingTableEntry],
_entry_size: VkDeviceSize,
base_alignment: u32,
shader_handle_storage: &[u8],
) {
for entry in entries {
// copy the shader identifier
{
let sbt_start = *offset as usize;
let sbt_end = sbt_start + prog_id_size as usize;
let shs_start = (entry.group_index * prog_id_size) as usize;
let shs_end = shs_start + prog_id_size as usize;
sbt_data[sbt_start..sbt_end]
.copy_from_slice(&shader_handle_storage[shs_start..shs_end]);
}
// copy data if present
if !entry.inline_data.is_empty() {
let tmp_offset = *offset + prog_id_size as VkDeviceSize;
let sbt_start = tmp_offset as usize;
let sbt_end = sbt_start + entry.inline_data.len();
sbt_data[sbt_start..sbt_end].copy_from_slice(&entry.inline_data);
}
*offset += prog_id_size as VkDeviceSize;
}
// increase offset with correct alignment
let modulo = *offset % base_alignment as VkDeviceSize;
if modulo != 0 {
*offset += base_alignment as VkDeviceSize - modulo;
}
}
}

49
vulkan-rs/src/prelude.rs Normal file
View file

@ -0,0 +1,49 @@
// vulkan structures
pub use super::address::Address;
pub use super::buffer::Buffer;
pub use super::commandbuffer::{
CommandBuffer, CommandBufferBuilder, CommandBufferRecorder, QueryEnable,
};
pub use super::deferred_operation::*;
pub use super::descriptorpool::DescriptorPool;
pub use super::descriptorset::*;
pub use super::descriptorsetlayout::DescriptorSetLayout;
pub use super::device::{Device, DeviceExtensions, DeviceFeatures};
pub use super::fence::Fence;
pub use super::framebuffer::{Framebuffer, FramebufferBuilder};
pub use super::image::*;
pub use super::instance::*;
pub use super::memory::{Memory, MemoryUsage};
pub use super::physicaldevice::PhysicalDevice;
pub use super::pipeline::Pipeline;
pub use super::pipelinecache::PipelineCache;
pub use super::pipelinelayout::{PipelineLayout, PipelineLayoutBuilder};
pub use super::querypool::QueryPool;
pub use super::queue::*;
pub use super::renderpass::RenderPass;
pub use super::sampler_manager::{Sampler, SamplerBuilder};
pub use super::semaphore::Semaphore;
pub use super::shadermodule::{
AddSpecializationConstant, ShaderModule, ShaderType, SpecializationConstants,
};
pub use super::surface::Surface;
pub use super::swapchain::Swapchain;
pub use super::pipelines::{
compute_pipeline::ComputePipelineBuilder, graphics_pipeline::GraphicsPipelineBuilder,
shader_binding_table::ShaderBindingTable,
};
pub use super::pipelines::ray_tracing_pipeline::RayTracingPipelineBuilder;
pub use super::acceleration_structure::{AccelerationStructure, AccelerationStructureBuilder};
pub use super::{OutOfDate, VkHandle, VulkanDevice};
pub use image;
pub use vulkan_sys::prelude::*;
pub use super::render_target::{
sub_pass::{ClearValue, CustomTarget, SubPass, SubPassBuilder},
RenderTarget,
};

View file

@ -0,0 +1,66 @@
use crate::prelude::*;
use anyhow::Result;
use std::mem;
use std::sync::Arc;
#[derive(Debug)]
pub struct QueryPool {
device: Arc<Device>,
query_pool: VkQueryPool,
query_count: u32,
}
impl QueryPool {
pub fn new(
device: Arc<Device>,
query_type: VkQueryType,
query_count: u32,
pipeline_statistics: impl Into<VkQueryPipelineStatisticFlagBits>,
) -> Result<Arc<QueryPool>> {
let query_pool_ci = VkQueryPoolCreateInfo::new(
VK_QUERY_POOL_CREATE_NULL_BIT,
query_type,
query_count,
pipeline_statistics,
);
let query_pool = device.create_query_pool(&query_pool_ci)?;
Ok(Arc::new(QueryPool {
device,
query_pool,
query_count,
}))
}
pub fn get_results(&self) -> Result<Vec<u64>> {
let mut data = vec![0; self.query_count as usize];
self.device.query_pool_results(
self.query_pool,
0,
self.query_count,
&mut data,
mem::size_of::<u64>() as u64,
VK_QUERY_RESULT_64_BIT,
)?;
Ok(data)
}
}
impl VulkanDevice for QueryPool {
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl_vk_handle!(QueryPool, VkQueryPool, query_pool);
impl Drop for QueryPool {
fn drop(&mut self) {
self.device.destroy_query_pool(self.query_pool);
}
}

242
vulkan-rs/src/queue.rs Normal file
View file

@ -0,0 +1,242 @@
use crate::prelude::*;
use anyhow::Result;
use std::{
slice,
sync::{Arc, Mutex},
time::Duration,
};
pub struct QueueRequestInfo {
pub queue_create_info: VkDeviceQueueCreateInfo,
pub queue_family_index: u32,
pub queue_index: u32,
}
#[derive(Debug)]
pub struct Queue {
device: Arc<Device>,
queue: VkQueue,
family_index: u32,
queue_index: u32,
}
impl Queue {
pub fn create_presentable_request_info(
physical_device: &Arc<PhysicalDevice>,
surface: &Arc<Surface>,
queue_type: impl Into<VkQueueFlagBits>,
) -> Result<QueueRequestInfo> {
let index =
Self::find_presentable_queue_index(physical_device, surface, queue_type.into())?;
let priorities = &[0.0f32];
Ok(QueueRequestInfo {
queue_create_info: VkDeviceQueueCreateInfo::new(0, index, priorities),
queue_family_index: index,
queue_index: 0,
})
}
pub fn create_non_presentable_request_info(
physical_device: &Arc<PhysicalDevice>,
queue_type: impl Into<VkQueueFlagBits>,
) -> Result<QueueRequestInfo> {
let index = Self::find_non_presentable_queue_index(physical_device, queue_type.into())?;
let priorities = &[0.0f32];
Ok(QueueRequestInfo {
queue_create_info: VkDeviceQueueCreateInfo::new(0, index, priorities),
queue_family_index: index,
queue_index: 0,
})
}
pub fn new(
device: Arc<Device>,
queue: VkQueue,
family_index: u32,
queue_index: u32,
) -> Arc<Mutex<Queue>> {
Arc::new(Mutex::new(Queue {
device,
queue,
family_index,
queue_index,
}))
}
pub fn family_index(&self) -> u32 {
self.family_index
}
pub fn queue_index(&self) -> u32 {
self.queue_index
}
/// really expensiv call, since its locks the queue until it is idle
pub fn submit(&self, fence: Option<&Arc<Fence>>, submits: &[SubmitInfo]) -> Result<()> {
let submit_infos: Vec<VkSubmitInfo> = submits.iter().map(|s| s.as_vk_submit()).collect();
let fence = match fence {
Some(fence) => fence.vk_handle(),
None => VkFence::NULL_HANDLE,
};
self.device
.queue_submit(self.queue, submit_infos.as_slice(), fence)
}
pub fn minimal_submit(
&self,
time_out: Duration,
command_buffers: &[Arc<CommandBuffer>],
) -> Result<()> {
let mut submit = SubmitInfo::default();
for command_buffer in command_buffers.iter() {
submit = submit.add_command_buffer(command_buffer);
}
let fence = Fence::builder().build(self.device.clone())?;
self.submit(Some(&fence), slice::from_ref(&submit))?;
// make sure command_buffer is ready
self.device.wait_for_fences(&[&fence], true, time_out)?;
Ok(())
}
pub fn present(
&self,
swapchains: &[&Arc<Swapchain>],
image_indices: &[u32],
wait_semaphores: &[&Arc<Semaphore>],
) -> Result<OutOfDate<()>> {
let wait_semaphores: Vec<VkSemaphore> =
wait_semaphores.iter().map(|sem| sem.vk_handle()).collect();
let swapchains: Vec<VkSwapchainKHR> = swapchains
.iter()
.map(|swapchain| swapchain.vk_handle())
.collect();
let present_info = VkPresentInfoKHR::new(
wait_semaphores.as_slice(),
swapchains.as_slice(),
image_indices,
&mut [],
);
self.device.queue_present(self.queue, &present_info)
}
pub fn wait_idle(&self) -> Result<()> {
self.device.queue_wait_idle(self.queue)
}
}
impl VulkanDevice for Queue {
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl_vk_handle!(Queue, VkQueue, queue);
impl Queue {
fn find_presentable_queue_index(
physical_device: &Arc<PhysicalDevice>,
surface: &Arc<Surface>,
flags: VkQueueFlagBits,
) -> Result<u32> {
let surface = surface.vk_handle();
let vk_physical_device = physical_device.vk_handle();
let queue_family_properties = physical_device
.instance()
.physical_device_queue_family_properties(vk_physical_device);
for (i, queue) in queue_family_properties.iter().enumerate() {
if (queue.queueFlagBits & flags) == flags {
let presentable = physical_device.instance().physical_device_surface_support(
vk_physical_device,
i as u32,
surface,
)?;
if presentable {
return Ok(i as u32);
}
}
}
Err(anyhow::Error::msg("Requested queue could not be found"))
}
fn find_non_presentable_queue_index(
physical_device: &Arc<PhysicalDevice>,
flags: VkQueueFlagBits,
) -> Result<u32> {
let vk_physical_device = physical_device.vk_handle();
let queue_family_properties = physical_device
.instance()
.physical_device_queue_family_properties(vk_physical_device);
for (i, queue) in queue_family_properties.iter().enumerate() {
if (queue.queueFlagBits & flags) == flags {
return Ok(i as u32);
}
}
Err(anyhow::Error::msg("Requested queue could not be found"))
}
}
#[derive(Default)]
pub struct SubmitInfo {
wait_semaphores: Vec<VkSemaphore>,
wait_stages: Vec<VkPipelineStageFlagBits>,
command_buffers: Vec<VkCommandBuffer>,
signal_semaphores: Vec<VkSemaphore>,
}
impl SubmitInfo {
pub fn add_wait_semaphore(mut self, wait_semaphore: impl VkHandle<VkSemaphore>) -> Self {
self.wait_semaphores.push(wait_semaphore.vk_handle());
self
}
pub fn add_wait_stage(mut self, wait_stage: impl Into<VkPipelineStageFlagBits>) -> Self {
self.wait_stages.push(wait_stage.into());
self
}
pub fn add_command_buffer(mut self, command_buffer: impl VkHandle<VkCommandBuffer>) -> Self {
self.command_buffers.push(command_buffer.vk_handle());
self
}
pub fn add_signal_semaphore(mut self, signal_semaphore: impl VkHandle<VkSemaphore>) -> Self {
self.signal_semaphores.push(signal_semaphore.vk_handle());
self
}
pub fn as_vk_submit(&self) -> VkSubmitInfo {
VkSubmitInfo::new(
self.wait_semaphores.as_slice(),
self.wait_stages.as_slice(),
self.command_buffers.as_slice(),
self.signal_semaphores.as_slice(),
)
}
}

View file

@ -0,0 +1,390 @@
use crate::prelude::*;
use anyhow::Result;
use std::sync::Arc;
pub mod sub_pass;
use sub_pass::{AttachmentInfo, AttachmentInfoUsage, SubPass};
pub struct RenderTargetBuilder {
sub_passes: Vec<SubPass>,
}
impl RenderTargetBuilder {
pub fn add_sub_pass(mut self, sub_pass: SubPass) -> Self {
self.sub_passes.push(sub_pass);
self
}
pub fn build(self, device: &Arc<Device>) -> Result<RenderTarget> {
#[cfg(debug_assertions)]
{
// sub passes must not be empty
assert!(!self.sub_passes.is_empty());
// sub passes must all have the same extent
let first_extent = self.sub_passes[0].extent();
for sub_pass in self.sub_passes.iter() {
assert!(sub_pass.extent() == first_extent);
}
}
// create render pass
// gather attachment descriptions
let mut attachments = Vec::new();
self.map_attachment(|attachment| {
attachments.push(attachment.description.clone());
});
// create attachment references
let mut attachment_references: Vec<SubPassAttachmentReferences> = Vec::new();
let mut attachment_index = 0;
// gather all color, depth and resolve attachment and add input attachments from previous sup passes
for sub_pass in self.sub_passes.iter() {
let mut references = SubPassAttachmentReferences::default();
references.offset = attachment_index as usize;
for attachment in sub_pass.attachments().iter() {
let attachment_reference = VkAttachmentReference {
attachment: attachment_index,
layout: attachment.layout,
};
match attachment.usage {
AttachmentInfoUsage::Output => {
references.color_attachments.push(attachment_reference);
}
AttachmentInfoUsage::Resolve => {
references.resolve_attachments.push(attachment_reference);
}
AttachmentInfoUsage::Depth => {
// make sure only 1 depth attachment is used per subpass
debug_assert!(
references.depth_stencil_attachment.is_none(),
"only 1 depth attachment per sub pass allowed"
);
references.depth_stencil_attachment = Some(attachment_reference);
}
}
attachment_index += 1;
}
// check if input infos are set
if let Some(input_info) = sub_pass.inputs() {
debug_assert!(input_info.sub_pass_index < attachment_references.len());
let input_pass_references = &attachment_references[input_info.sub_pass_index];
for input_index in input_info.input_indices.iter() {
references.input_attachments.push(VkAttachmentReference {
attachment: (input_index + input_pass_references.offset) as u32,
layout: VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
});
}
}
attachment_references.push(references);
}
// gather sub pass descriptions
let mut descriptions = Vec::new();
for attachment_reference in attachment_references.iter() {
descriptions.push(VkSubpassDescription::new(
0,
&attachment_reference.input_attachments,
&attachment_reference.color_attachments,
&attachment_reference.resolve_attachments,
attachment_reference.depth_stencil_attachment.as_ref(),
&[],
));
}
// gather sub pass dependencies
let mut dependencies = Vec::new();
dependencies.push(VkSubpassDependency::new(
VK_SUBPASS_EXTERNAL,
0,
VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
VK_ACCESS_MEMORY_READ_BIT,
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
VK_DEPENDENCY_BY_REGION_BIT,
));
for (index, sub_pass) in self.sub_passes.iter().enumerate() {
dependencies.push(VkSubpassDependency::new(
index as u32,
index as u32 + 1,
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
CommandBuffer::access_to_stage(sub_pass.output_usage()),
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
sub_pass.output_usage(),
VK_DEPENDENCY_BY_REGION_BIT,
));
}
if let Some(last_dependency) = dependencies.last_mut() {
last_dependency.dstSubpass = VK_SUBPASS_EXTERNAL;
last_dependency.dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT.into();
last_dependency.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT.into();
}
// dependencies.push(VkSubpassDependency::new(
// self.sub_passes.len() as u32 - 1,
// VK_SUBPASS_EXTERNAL,
// VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
// VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
// VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
// VK_ACCESS_MEMORY_READ_BIT,
// VK_DEPENDENCY_BY_REGION_BIT,
// ));
let render_pass =
RenderPass::new(device.clone(), &descriptions, &attachments, &dependencies)?;
// create frame buffers
let max_images = self.max_images();
let extent = self.sub_passes[0].extent();
let framebuffers: Result<Vec<Arc<Framebuffer>>> = (0..max_images)
.map(|i| {
let mut framebuffer_builder = Framebuffer::builder()
.set_render_pass(&render_pass)
.set_width(extent.width)
.set_height(extent.height);
for sub_pass in self.sub_passes.iter() {
for attachment in sub_pass.attachments().iter() {
framebuffer_builder =
framebuffer_builder.add_attachment(attachment.image(i));
}
}
framebuffer_builder.build(render_pass.device().clone())
})
.collect();
let mut clear_values = Vec::new();
self.map_attachment(|attachment| {
clear_values.push(attachment.clear_value.clone());
});
Ok(RenderTarget {
render_pass,
framebuffers: framebuffers?,
clear_values,
extent,
sub_passes: self.sub_passes,
})
}
#[inline]
fn max_images(&self) -> usize {
let mut max_images = 0;
for sub_pass in self.sub_passes.iter() {
max_images = max_images.max(sub_pass.max_images_per_attachment());
}
max_images
}
#[inline]
fn map_attachment<'a, F>(&'a self, mut f: F)
where
F: FnMut(&'a AttachmentInfo) -> (),
{
for sub_pass in self.sub_passes.iter() {
for attachment in sub_pass.attachments().iter() {
f(attachment);
}
}
}
}
#[derive(Default)]
struct SubPassAttachmentReferences {
offset: usize,
input_attachments: Vec<VkAttachmentReference>,
color_attachments: Vec<VkAttachmentReference>,
resolve_attachments: Vec<VkAttachmentReference>,
depth_stencil_attachment: Option<VkAttachmentReference>,
}
pub struct RenderTarget {
render_pass: Arc<RenderPass>,
framebuffers: Vec<Arc<Framebuffer>>,
clear_values: Vec<VkClearValue>,
extent: VkExtent2D,
sub_passes: Vec<SubPass>,
}
impl RenderTarget {
pub fn builder() -> RenderTargetBuilder {
RenderTargetBuilder {
sub_passes: Vec::new(),
}
}
pub fn render_pass(&self) -> &Arc<RenderPass> {
&self.render_pass
}
pub fn framebuffer(&self, index: usize) -> &Arc<Framebuffer> {
&self.framebuffers[index]
}
pub fn sub_pass(&self, index: usize) -> &SubPass {
&self.sub_passes[index]
}
pub fn width(&self) -> u32 {
self.extent.width
}
pub fn height(&self) -> u32 {
self.extent.height
}
pub fn begin(
&self,
buffer_recorder: &CommandBufferRecorder<'_>,
subpass_content: VkSubpassContents,
framebuffer_index: usize,
) {
let renderpass_begin = VkRenderPassBeginInfo::new(
self.render_pass.vk_handle(),
self.framebuffers[framebuffer_index].vk_handle(),
VkRect2D {
offset: VkOffset2D { x: 0, y: 0 },
extent: self.extent,
},
self.clear_values.as_slice(),
);
buffer_recorder.begin_render_pass(renderpass_begin, subpass_content);
}
pub fn next_subpass(
&self,
buffer_recorder: &CommandBufferRecorder<'_>,
subpass_content: VkSubpassContents,
) {
buffer_recorder.next_subpass(subpass_content);
}
pub fn end(&self, buffer_recorder: &CommandBufferRecorder<'_>) {
buffer_recorder.end_render_pass();
}
}
// impl<'a> RenderTargetBuilder<'a> {
// fn create_images_and_renderpass(&self, device: &Arc<Device>) -> Result<Arc<RenderPass>> {
// let subpass_descriptions = [match resolve_reference {
// Some(resvole_ref) => VkSubpassDescription::new(
// 0,
// &[],
// color_references.as_slice(),
// &[resvole_ref],
// match depth_reference {
// Some(ref depth_ref) => Some(depth_ref),
// None => None,
// },
// &[],
// ),
// None => VkSubpassDescription::new(
// 0,
// &[],
// color_references.as_slice(),
// &[],
// match depth_reference {
// Some(ref depth_ref) => Some(depth_ref),
// None => None,
// },
// &[],
// ),
// }];
// let dependencies = if color_references.is_empty() {
// // assume, that when no color references are given,
// // we want to store the depth information for later
// if depth_reference.is_some() {
// for attachment in &mut attachments {
// if attachment.format == VK_FORMAT_D16_UNORM {
// attachment.storeOp = VK_ATTACHMENT_STORE_OP_STORE;
// attachment.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
// break;
// }
// }
// }
// [
// VkSubpassDependency::new(
// VK_SUBPASS_EXTERNAL,
// 0,
// VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
// VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
// 0,
// VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
// | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
// VK_DEPENDENCY_BY_REGION_BIT,
// ),
// VkSubpassDependency::new(
// 0,
// VK_SUBPASS_EXTERNAL,
// VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT,
// VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
// VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT
// | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
// VK_ACCESS_SHADER_READ_BIT,
// VK_DEPENDENCY_BY_REGION_BIT,
// ),
// ]
// } else {
// [
// VkSubpassDependency::new(
// VK_SUBPASS_EXTERNAL,
// 0,
// VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
// VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
// VK_ACCESS_MEMORY_READ_BIT,
// VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
// VK_DEPENDENCY_BY_REGION_BIT,
// ),
// VkSubpassDependency::new(
// 0,
// VK_SUBPASS_EXTERNAL,
// VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
// VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
// VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
// VK_ACCESS_MEMORY_READ_BIT,
// VK_DEPENDENCY_BY_REGION_BIT,
// ),
// ]
// };
// let renderpass = RenderPass::new(
// device.clone(),
// &subpass_descriptions,
// attachments.as_slice(),
// &dependencies,
// )?;
// Ok(renderpass)
// }
// }

View file

@ -0,0 +1,439 @@
use crate::prelude::*;
use anyhow::Result;
use std::sync::{Arc, Mutex};
pub enum ClearValue {
Color([f32; 4]),
Depth(f32, u32),
}
pub struct CustomTarget {
pub usage: VkImageUsageFlagBits,
pub format: VkFormat,
pub clear_on_load: bool,
pub store_on_save: bool,
pub attach_sampler: bool,
pub use_as_input: bool,
pub clear_value: ClearValue,
}
impl CustomTarget {
pub fn depth() -> CustomTarget {
CustomTarget {
usage: VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT.into(),
format: VK_FORMAT_D16_UNORM,
clear_on_load: true,
store_on_save: false,
attach_sampler: false,
use_as_input: false,
clear_value: ClearValue::Depth(1.0, 0),
}
}
fn to_attachment_info(
&self,
device: &Arc<Device>,
queue: &Arc<Mutex<Queue>>,
width: u32,
height: u32,
sample_count: VkSampleCountFlags,
) -> Result<AttachmentInfo> {
let clear_operation = SubPassBuilder::clear_op(self.clear_on_load);
let store_operation = SubPassBuilder::store_op(self.store_on_save);
// set clear values
let clear_value = match self.clear_value {
ClearValue::Color(color) => VkClearValue::color(VkClearColorValue::float32(color)),
ClearValue::Depth(depth, stencil) => {
VkClearValue::depth_stencil(VkClearDepthStencilValue { depth, stencil })
}
};
// check for color attachment flag
let (format, aspect, description, usage, layout) =
if (self.usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) != 0 {
// set color attachment
let description = VkAttachmentDescription::new(
0,
self.format,
sample_count,
clear_operation,
store_operation,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_IMAGE_LAYOUT_UNDEFINED,
if self.attach_sampler || self.use_as_input {
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL
} else {
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
},
);
(
self.format,
VK_IMAGE_ASPECT_COLOR_BIT,
description,
AttachmentInfoUsage::Output,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
)
// check for depth attachment flag
} else if (self.usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) != 0 {
// set depth attachment
let description = VkAttachmentDescription::new(
0,
self.format,
sample_count,
clear_operation,
store_operation,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
);
// take format and aspect mask
(
self.format,
VK_IMAGE_ASPECT_DEPTH_BIT,
description,
AttachmentInfoUsage::Depth,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
)
} else {
// TODO: add more as required
unimplemented!();
};
let mut image_builder = Image::empty(width, height, self.usage, sample_count)
.format(format)
.aspect_mask(aspect);
if self.attach_sampler {
image_builder = image_builder.attach_sampler(Sampler::nearest_sampler().build(device)?);
}
let image = image_builder.build(device, queue)?;
match aspect {
VK_IMAGE_ASPECT_DEPTH_BIT => {
Image::convert_layout(&image, VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL)?
}
VK_IMAGE_ASPECT_COLOR_BIT => {
Image::convert_layout(&image, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL)?
}
_ => unimplemented!(),
}
Ok(AttachmentInfo {
images: vec![image],
clear_value,
layout,
description,
usage,
})
}
}
pub enum ResolveTarget {
CustomTarget(CustomTarget),
PreparedTargets(Vec<Arc<Image>>),
}
impl From<CustomTarget> for ResolveTarget {
fn from(custom_target: CustomTarget) -> Self {
Self::CustomTarget(custom_target)
}
}
impl From<Vec<Arc<Image>>> for ResolveTarget {
fn from(prepared_targets: Vec<Arc<Image>>) -> Self {
Self::PreparedTargets(prepared_targets)
}
}
impl<'a> From<&'a Vec<Arc<Image>>> for ResolveTarget {
fn from(prepared_targets: &'a Vec<Arc<Image>>) -> Self {
Self::PreparedTargets(prepared_targets.clone())
}
}
pub struct InputAttachmentInfo {
pub sub_pass_index: usize,
pub input_indices: Vec<usize>,
}
pub struct SubPassBuilder<'a> {
width: u32,
height: u32,
sample_count: VkSampleCountFlags,
target_infos: Vec<CustomTarget>,
input_info: Option<InputAttachmentInfo>,
// (images, index, clear_color, clear_on_load)
prepared_targets: Option<(&'a [Arc<Image>], usize, [f32; 4], bool)>,
resolve_targets: Vec<ResolveTarget>,
output_usage: VkAccessFlagBits,
}
impl<'a> SubPassBuilder<'a> {
pub fn set_sample_count(mut self, sample_count: VkSampleCountFlags) -> Self {
self.sample_count = sample_count;
self
}
pub fn add_target_info(mut self, target: CustomTarget) -> Self {
self.target_infos.push(target);
self
}
pub fn set_input_attachment_info(mut self, input_info: InputAttachmentInfo) -> Self {
self.input_info = Some(input_info);
self
}
pub fn set_output_usage(mut self, output_usage: impl Into<VkAccessFlagBits>) -> Self {
self.output_usage = output_usage.into();
self
}
pub fn set_prepared_targets(
mut self,
prepared_targets: &'a [Arc<Image>],
target_index: usize,
clear_color: impl Into<[f32; 4]>,
clear_on_load: bool,
) -> Self {
self.prepared_targets = Some((
prepared_targets,
target_index,
clear_color.into(),
clear_on_load,
));
self
}
pub fn add_resolve_targets(mut self, resolve_target: impl Into<ResolveTarget>) -> Self {
self.resolve_targets.push(resolve_target.into());
self
}
pub fn build(self, device: &Arc<Device>, queue: &Arc<Mutex<Queue>>) -> Result<SubPass> {
let attachments = self.create_images(device, queue)?;
Ok(SubPass {
extent: VkExtent2D {
width: self.width,
height: self.height,
},
input_info: self.input_info,
attachments,
output_usage: self.output_usage,
})
}
#[inline]
fn create_images(
&self,
device: &Arc<Device>,
queue: &Arc<Mutex<Queue>>,
) -> Result<Vec<AttachmentInfo>> {
// check for correct sample count
let checked_sample_count = device.max_supported_sample_count(self.sample_count);
// throw an error if we don't use muultisampling and have an resolve target
if checked_sample_count == VK_SAMPLE_COUNT_1_BIT && !self.resolve_targets.is_empty() {
panic!("Sample count 1 and using resolve target is not supported");
}
let mut attachment_infos = Vec::new();
for target_info in self.target_infos.iter() {
attachment_infos.push(target_info.to_attachment_info(
device,
queue,
self.width,
self.height,
self.sample_count,
)?);
}
// insert prepared images
if let Some((prepared_images, index, clear_color, clear_on_load)) = self.prepared_targets {
let clear_operation = Self::clear_op(clear_on_load);
attachment_infos.insert(
index,
AttachmentInfo {
images: prepared_images.iter().map(|image| image.clone()).collect(),
clear_value: VkClearValue::color(VkClearColorValue::float32(clear_color)),
layout: VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
description: VkAttachmentDescription::new(
0,
prepared_images[0].vk_format(),
VK_SAMPLE_COUNT_1_BIT,
clear_operation,
VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
prepared_images[0].image_layout(),
prepared_images[0].image_layout(),
),
usage: AttachmentInfoUsage::Output,
},
);
}
// add resolve target if possible
for resolve_target in self.resolve_targets.iter() {
match resolve_target {
ResolveTarget::CustomTarget(custom_target) => {
let mut attachment_info = custom_target.to_attachment_info(
device,
queue,
self.width,
self.height,
VK_SAMPLE_COUNT_1_BIT,
)?;
attachment_info.usage = AttachmentInfoUsage::Resolve;
attachment_infos.push(attachment_info);
}
ResolveTarget::PreparedTargets(prepared_targets) => {
attachment_infos.push(AttachmentInfo {
images: prepared_targets.iter().map(|image| image.clone()).collect(),
clear_value: VkClearValue::color(VkClearColorValue::float32([
0.0, 0.0, 0.0, 1.0,
])),
layout: VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
description: VkAttachmentDescription::new(
0,
prepared_targets[0].vk_format(),
VK_SAMPLE_COUNT_1_BIT,
VK_ATTACHMENT_LOAD_OP_CLEAR,
VK_ATTACHMENT_STORE_OP_STORE,
VK_ATTACHMENT_LOAD_OP_DONT_CARE,
VK_ATTACHMENT_STORE_OP_DONT_CARE,
VK_IMAGE_LAYOUT_UNDEFINED,
prepared_targets[0].image_layout(),
),
usage: AttachmentInfoUsage::Resolve,
});
}
}
}
Ok(attachment_infos)
}
#[inline]
fn clear_op(clear_on_load: bool) -> VkAttachmentLoadOp {
if clear_on_load {
VK_ATTACHMENT_LOAD_OP_CLEAR
} else {
VK_ATTACHMENT_LOAD_OP_LOAD
}
}
#[inline]
fn store_op(store_on_save: bool) -> VkAttachmentStoreOp {
if store_on_save {
VK_ATTACHMENT_STORE_OP_STORE
} else {
VK_ATTACHMENT_STORE_OP_DONT_CARE
}
}
}
#[derive(Eq, PartialEq, Hash, Clone, Copy)]
pub enum AttachmentInfoUsage {
Depth,
Resolve,
Output,
}
pub struct AttachmentInfo {
images: Vec<Arc<Image>>,
pub(crate) clear_value: VkClearValue,
pub(crate) layout: VkImageLayout,
pub(crate) description: VkAttachmentDescription,
pub(crate) usage: AttachmentInfoUsage,
}
impl AttachmentInfo {
pub fn image(&self, mut index: usize) -> &Arc<Image> {
debug_assert!(!self.images.is_empty());
if index >= self.images.len() {
index = self.images.len() - 1;
}
&self.images[index]
}
}
pub struct SubPass {
extent: VkExtent2D,
input_info: Option<InputAttachmentInfo>,
attachments: Vec<AttachmentInfo>,
output_usage: VkAccessFlagBits,
}
impl SubPass {
pub fn builder<'a>(width: u32, height: u32) -> SubPassBuilder<'a> {
SubPassBuilder {
width,
height,
sample_count: VK_SAMPLE_COUNT_1_BIT,
input_info: None,
target_infos: Vec::new(),
prepared_targets: None,
resolve_targets: Vec::new(),
output_usage: VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT
| VK_ACCESS_COLOR_ATTACHMENT_READ_BIT,
}
}
pub(crate) fn inputs(&self) -> Option<&InputAttachmentInfo> {
self.input_info.as_ref()
}
pub(crate) fn output_usage(&self) -> VkAccessFlagBits {
self.output_usage
}
pub fn extent(&self) -> VkExtent2D {
self.extent
}
pub fn attachments(&self) -> &[AttachmentInfo] {
&self.attachments
}
pub fn max_images_per_attachment(&self) -> usize {
let mut max_images = 0;
for attachment in self.attachments.iter() {
max_images = max_images.max(attachment.images.len());
}
max_images
}
}

View file

@ -0,0 +1,48 @@
use crate::prelude::*;
use anyhow::Result;
use std::sync::Arc;
#[derive(Debug)]
pub struct RenderPass {
device: Arc<Device>,
render_pass: VkRenderPass,
}
impl RenderPass {
pub fn new(
device: Arc<Device>,
sub_passes: &[VkSubpassDescription],
attachments: &[VkAttachmentDescription],
dependencies: &[VkSubpassDependency],
) -> Result<Arc<RenderPass>> {
let render_pass_ci = VkRenderPassCreateInfo::new(
VK_RENDERPASS_CREATE_NULL_BIT,
attachments,
sub_passes,
dependencies,
);
let render_pass = device.create_render_pass(&render_pass_ci)?;
Ok(Arc::new(RenderPass {
device,
render_pass,
}))
}
}
impl VulkanDevice for RenderPass {
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl_vk_handle!(RenderPass, VkRenderPass, render_pass);
impl Drop for RenderPass {
fn drop(&mut self) {
self.device.destroy_render_pass(self.render_pass);
}
}

View file

@ -0,0 +1,182 @@
use crate::prelude::*;
use anyhow::Result;
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
pub struct SamplerBuilder {
create_info: VkSamplerCreateInfo,
}
impl SamplerBuilder {
pub fn min_mag_filter(mut self, min_filter: VkFilter, mag_filter: VkFilter) -> Self {
self.create_info.minFilter = min_filter;
self.create_info.magFilter = mag_filter;
self
}
pub fn map_map_mode(mut self, mode: VkSamplerMipmapMode) -> Self {
self.create_info.mipmapMode = mode;
self
}
pub fn address_mode(
mut self,
u: VkSamplerAddressMode,
v: VkSamplerAddressMode,
w: VkSamplerAddressMode,
) -> Self {
self.create_info.addressModeU = u;
self.create_info.addressModeV = v;
self.create_info.addressModeW = w;
self
}
pub fn min_load_bias(mut self, bias: f32) -> Self {
self.create_info.mipLodBias = bias;
self
}
pub fn anisotropy(mut self, anisotropy: f32) -> Self {
self.create_info.anisotropyEnable = VK_TRUE;
self.create_info.maxAnisotropy = anisotropy;
self
}
pub fn compare(mut self, compare_op: VkCompareOp) -> Self {
self.create_info.compareEnable = VK_TRUE;
self.create_info.compareOp = compare_op;
self
}
pub fn min_max_lod(mut self, min_lod: f32, max_lod: f32) -> Self {
self.create_info.minLod = min_lod;
self.create_info.maxLod = max_lod;
self
}
pub fn border_color(mut self, border_color: VkBorderColor) -> Self {
self.create_info.borderColor = border_color;
self
}
pub fn coordinates<T>(mut self, unnormalized_coordinates: T) -> Self
where
T: Into<VkBool32>,
{
self.create_info.unnormalizedCoordinates = unnormalized_coordinates.into();
self
}
pub fn build(self, device: &Device) -> Result<Arc<Sampler>> {
device.create_sampler_from_manager(self.create_info)
}
}
#[derive(Debug)]
pub struct Sampler {
sampler: VkSampler,
}
impl Sampler {
pub fn nearest_sampler() -> SamplerBuilder {
SamplerBuilder {
create_info: VkSamplerCreateInfo::new(
0,
VK_FILTER_NEAREST,
VK_FILTER_NEAREST,
VK_SAMPLER_MIPMAP_MODE_NEAREST,
VK_SAMPLER_ADDRESS_MODE_REPEAT,
VK_SAMPLER_ADDRESS_MODE_REPEAT,
VK_SAMPLER_ADDRESS_MODE_REPEAT,
0.0,
false,
1.0,
false,
VK_COMPARE_OP_NEVER,
0.0,
0.0,
VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE,
false,
),
}
}
pub fn pretty_sampler() -> SamplerBuilder {
SamplerBuilder {
create_info: VkSamplerCreateInfo::new(
0,
VK_FILTER_LINEAR,
VK_FILTER_LINEAR,
VK_SAMPLER_MIPMAP_MODE_LINEAR,
VK_SAMPLER_ADDRESS_MODE_REPEAT,
VK_SAMPLER_ADDRESS_MODE_REPEAT,
VK_SAMPLER_ADDRESS_MODE_REPEAT,
0.0,
true,
8.0,
false,
VK_COMPARE_OP_NEVER,
0.0,
0.0,
VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE,
false,
),
}
}
}
impl_vk_handle!(Sampler, VkSampler, sampler);
pub struct SamplerManager {
samplers: HashMap<VkSamplerCreateInfo, Arc<Sampler>>,
}
unsafe impl Sync for SamplerManager {}
unsafe impl Send for SamplerManager {}
impl SamplerManager {
pub fn new() -> Mutex<Self> {
Mutex::new(SamplerManager {
samplers: HashMap::new(),
})
}
pub fn create_sampler(
&mut self,
create_info: VkSamplerCreateInfo,
device: &Device,
) -> Result<Arc<Sampler>> {
match self.samplers.get(&create_info) {
Some(sampler) => Ok(sampler.clone()),
None => {
let new_sampler = Arc::new(Sampler {
sampler: device.create_sampler(&create_info)?,
});
self.samplers.insert(create_info, new_sampler.clone());
Ok(new_sampler)
}
}
}
/// This will destroy all VkSampler handles, no matter if they are in use or not
pub unsafe fn clear(&mut self, device: &Device) {
self.samplers
.iter()
.for_each(|(_, sampler)| device.destroy_sampler(sampler.vk_handle()));
self.samplers.clear();
}
}

View file

@ -0,0 +1,49 @@
use crate::prelude::*;
use anyhow::Result;
use std::sync::Arc;
#[derive(Debug)]
pub struct Semaphore {
device: Arc<Device>,
semaphore: VkSemaphore,
}
impl Semaphore {
pub fn new(device: Arc<Device>) -> Result<Arc<Semaphore>> {
let semaphore_ci = VkSemaphoreCreateInfo::new(VK_SEMAPHORE_CREATE_NULL_BIT);
let semaphore = device.create_semaphore(&semaphore_ci)?;
Ok(Arc::new(Semaphore { device, semaphore }))
}
}
impl VulkanDevice for Semaphore {
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl_vk_handle!(Semaphore, VkSemaphore, semaphore);
impl Drop for Semaphore {
fn drop(&mut self) {
self.device.destroy_semaphore(self.semaphore);
}
}
use crate::{ffi::*, handle_ffi_result};
#[no_mangle]
pub extern "C" fn create_semaphore(device: *const Device) -> *const Semaphore {
let device = unsafe { Arc::from_raw(device) };
handle_ffi_result!(Semaphore::new(device))
}
#[no_mangle]
pub extern "C" fn destroy_semaphore(semaphore: *const Semaphore) {
let _semaphore = unsafe { Arc::from_raw(semaphore) };
}

View file

@ -0,0 +1,178 @@
use crate::prelude::*;
use anyhow::{Context, Result};
use std::fs::File;
use std::io::Read;
use std::sync::Arc;
#[allow(clippy::cast_ptr_alignment)]
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ShaderType {
None,
Vertex,
Fragment,
Geometry,
TesselationControl,
TesselationEvaluation,
Compute,
RayGeneration,
ClosestHit,
Miss,
AnyHit,
Intersection,
}
impl Default for ShaderType {
fn default() -> Self {
ShaderType::None
}
}
#[derive(Debug)]
pub struct ShaderModule {
device: Arc<Device>,
shader_module: VkShaderModule,
shader_type: ShaderType,
}
impl ShaderModule {
pub fn new(
device: Arc<Device>,
path: &str,
shader_type: ShaderType,
) -> Result<Arc<ShaderModule>> {
let code = Self::shader_code(path)?;
Self::from_slice(device, code.as_slice(), shader_type)
}
pub fn from_slice(
device: Arc<Device>,
code: &[u8],
shader_type: ShaderType,
) -> Result<Arc<ShaderModule>> {
let shader_module_ci =
VkShaderModuleCreateInfo::new(VK_SHADER_MODULE_CREATE_NULL_BIT, code);
let shader_module = device.create_shader_module(&shader_module_ci)?;
Ok(Arc::new(ShaderModule {
device,
shader_module,
shader_type,
}))
}
fn shader_code(path: &str) -> Result<Vec<u8>> {
let mut file = File::open(path).with_context({
let path = path.to_string();
|| path
})?;
let mut code: Vec<u8> = Vec::new();
file.read_to_end(&mut code)?;
Ok(code)
}
pub fn shader_type(&self) -> ShaderType {
self.shader_type
}
pub fn pipeline_stage_info(&self) -> VkPipelineShaderStageCreateInfo {
match self.shader_type {
ShaderType::None => unimplemented!(),
ShaderType::Vertex => VkPipelineShaderStageCreateInfo::vertex(self.shader_module),
ShaderType::Geometry => VkPipelineShaderStageCreateInfo::geometry(self.shader_module),
ShaderType::TesselationControl => {
VkPipelineShaderStageCreateInfo::tesselation_control(self.shader_module)
}
ShaderType::TesselationEvaluation => {
VkPipelineShaderStageCreateInfo::tesselation_evaluation(self.shader_module)
}
ShaderType::Fragment => VkPipelineShaderStageCreateInfo::fragment(self.shader_module),
ShaderType::Compute => VkPipelineShaderStageCreateInfo::compute(self.shader_module),
ShaderType::AnyHit => VkPipelineShaderStageCreateInfo::any_hit(self.shader_module),
ShaderType::Intersection => {
VkPipelineShaderStageCreateInfo::intersection(self.shader_module)
}
ShaderType::ClosestHit => {
VkPipelineShaderStageCreateInfo::closest_hit(self.shader_module)
}
ShaderType::RayGeneration => {
VkPipelineShaderStageCreateInfo::ray_generation(self.shader_module)
}
ShaderType::Miss => VkPipelineShaderStageCreateInfo::miss(self.shader_module),
}
}
}
impl VulkanDevice for ShaderModule {
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl_vk_handle!(ShaderModule, VkShaderModule, shader_module);
impl Drop for ShaderModule {
fn drop(&mut self) {
self.device.destroy_shader_module(self.shader_module);
}
}
pub trait AddSpecializationConstant<T> {
fn add(&mut self, value: T, id: u32);
}
pub struct SpecializationConstants {
// store data as raw bytes
data: Vec<u8>,
entries: Vec<VkSpecializationMapEntry>,
info: VkSpecializationInfo,
}
impl SpecializationConstants {
pub fn new() -> Self {
let mut me = SpecializationConstants {
data: Vec::new(),
entries: Vec::new(),
info: VkSpecializationInfo::empty(),
};
me.info.set_data(&me.data);
me.info.set_map_entries(&me.entries);
me
}
pub fn vk_handle(&self) -> &VkSpecializationInfo {
&self.info
}
}
macro_rules! impl_add_specialization_constant {
($($type: ty),+) => {
$(
impl AddSpecializationConstant<$type> for SpecializationConstants {
fn add(&mut self, value: $type, id: u32) {
let bytes = value.to_ne_bytes();
self.entries.push(VkSpecializationMapEntry {
constantID: id,
offset: self.data.len() as u32,
size: bytes.len(),
});
self.data.extend(&bytes);
}
}
)+
};
}
impl_add_specialization_constant!(f32, f64, u64, i64, u32, i32, u16, i16, u8, i8, usize, isize);

80
vulkan-rs/src/surface.rs Normal file
View file

@ -0,0 +1,80 @@
use crate::prelude::*;
use anyhow::Result;
use std::sync::Arc;
const UNORM_FORMATS: [VkFormat; 2] = [VK_FORMAT_R8G8B8A8_UNORM, VK_FORMAT_B8G8R8A8_UNORM];
#[derive(Debug)]
pub struct Surface {
external_source: bool,
instance: Arc<Instance>,
surface: VkSurfaceKHR,
}
impl Surface {
pub fn from_vk_surface(surface: VkSurfaceKHR, instance: &Arc<Instance>) -> Arc<Surface> {
Arc::new(Surface {
external_source: true,
instance: instance.clone(),
surface,
})
}
pub fn capabilities(&self, device: &Arc<Device>) -> Result<VkSurfaceCapabilitiesKHR> {
self.instance.physical_device_surface_capabilities(
device.physical_device().vk_handle(),
self.surface,
)
}
pub fn format_colorspace(
&self,
device: &Arc<Device>,
prefered_format: VkFormat,
) -> Result<(VkFormat, VkColorSpaceKHR)> {
let surface_formats = self
.instance
.physical_device_surface_formats(device.physical_device().vk_handle(), self.surface)?;
// if there is a single undefined format, assume the preferred mode
if (surface_formats.len() == 1) && (surface_formats[0].format == VK_FORMAT_UNDEFINED) {
return Ok((prefered_format, VK_COLOR_SPACE_SRGB_NONLINEAR_KHR));
}
// look for prefered_format
for surface_format in &surface_formats {
if surface_format.format == prefered_format {
return Ok((surface_format.format, surface_format.colorSpace));
}
}
// prefer UNORM formats
for surface_format in &surface_formats {
for unorm_format in &UNORM_FORMATS {
if *unorm_format == surface_format.format {
return Ok((surface_format.format, surface_format.colorSpace));
}
}
}
// if nothing was found, take the first one
Ok((surface_formats[0].format, surface_formats[0].colorSpace))
}
pub fn present_modes(&self, device: &Arc<Device>) -> Result<Vec<VkPresentModeKHR>> {
self.instance
.physical_device_present_modes(device.physical_device().vk_handle(), self.surface)
}
}
impl_vk_handle!(Surface, VkSurfaceKHR, surface);
impl Drop for Surface {
fn drop(&mut self) {
if !self.external_source {
self.instance.destroy_surface(self.surface)
}
}
}

319
vulkan-rs/src/swapchain.rs Normal file
View file

@ -0,0 +1,319 @@
use crate::prelude::*;
use anyhow::Result;
use std::cmp;
use std::sync::{
atomic::{AtomicU32, Ordering::SeqCst},
Arc, Mutex,
};
#[derive(Debug)]
pub struct Swapchain {
width: AtomicU32,
height: AtomicU32,
index: AtomicU32,
device: Arc<Device>,
surface: Arc<Surface>,
create_info: Mutex<VkSwapchainCreateInfoKHR>,
swapchain: Mutex<VkSwapchainKHR>,
usage: VkImageUsageFlagBits,
raw: bool,
}
impl Swapchain {
pub fn new(
device: Arc<Device>,
surface: &Arc<Surface>,
vsync: bool,
image_count: u32,
image_usage: impl Into<VkImageUsageFlagBits>,
prefered_format: VkFormat,
array_layers: u32,
) -> Result<Arc<Swapchain>> {
let surface_caps = surface.capabilities(&device)?;
let extent = if surface_caps.currentExtent.width == u32::max_value() {
return Err(anyhow::Error::msg("Surface has no extent"));
} else {
VkExtent2D {
width: surface_caps.currentExtent.width,
height: surface_caps.currentExtent.height,
}
};
let mut present_mode = VK_PRESENT_MODE_FIFO_KHR;
if !vsync {
for present_mode_iter in surface.present_modes(&device)? {
if present_mode_iter == VK_PRESENT_MODE_MAILBOX_KHR {
present_mode = VK_PRESENT_MODE_MAILBOX_KHR;
break;
} else if present_mode_iter == VK_PRESENT_MODE_IMMEDIATE_KHR {
present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR;
}
}
}
let swapchain_image_count = if surface_caps.maxImageCount < surface_caps.minImageCount {
cmp::max(image_count, surface_caps.minImageCount)
} else {
cmp::max(
cmp::min(image_count, surface_caps.maxImageCount),
surface_caps.minImageCount,
)
};
let pretransform =
if (surface_caps.supportedTransforms & VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR) != 0 {
VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR.into()
} else {
surface_caps.currentTransform
};
let (format, colorspace) = surface.format_colorspace(&device, prefered_format)?;
let swapchain_ci = VkSwapchainCreateInfoKHR::new(
0,
surface.vk_handle(),
swapchain_image_count,
format,
colorspace,
extent,
array_layers,
image_usage,
VK_SHARING_MODE_EXCLUSIVE,
&[],
pretransform,
VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
present_mode,
device.physical_device().features().shaderClipDistance,
);
let swapchain = device.create_swapchain(&swapchain_ci)?;
Ok(Arc::new(Swapchain {
width: AtomicU32::new(extent.width),
height: AtomicU32::new(extent.height),
usage: swapchain_ci.imageUsage,
index: AtomicU32::new(0),
device,
surface: surface.clone(),
create_info: Mutex::new(swapchain_ci),
swapchain: Mutex::new(swapchain),
raw: false,
}))
}
pub fn from_ci(
device: Arc<Device>,
swapchain_ci: &VkSwapchainCreateInfoKHR,
) -> Result<Arc<Self>> {
Self::from_raw(
device.clone(),
swapchain_ci,
device.create_swapchain(swapchain_ci)?,
)
}
pub fn from_raw(
device: Arc<Device>,
swapchain_ci: &VkSwapchainCreateInfoKHR,
swapchain: VkSwapchainKHR,
) -> Result<Arc<Self>> {
Ok(Arc::new(Swapchain {
width: AtomicU32::new(swapchain_ci.imageExtent.width),
height: AtomicU32::new(swapchain_ci.imageExtent.height),
usage: swapchain_ci.imageUsage,
index: AtomicU32::new(0),
surface: Surface::from_vk_surface(
swapchain_ci.surface,
device.physical_device().instance(),
),
device,
create_info: Mutex::new(swapchain_ci.clone()),
swapchain: Mutex::new(swapchain),
raw: true,
}))
}
pub fn recreate(&self) -> Result<()> {
// wait for the device to get idle
self.device.wait_idle()?;
let surface_caps = self.surface.capabilities(&self.device)?;
let extent = if surface_caps.currentExtent.width == u32::max_value()
|| surface_caps.currentExtent.height == u32::max_value()
{
return Err(anyhow::Error::msg("Surface has no extent"));
} else if surface_caps.currentExtent.width == 0 || surface_caps.currentExtent.height == 0 {
// don't recreate swapchain
return Ok(());
} else {
VkExtent2D {
width: surface_caps.currentExtent.width,
height: surface_caps.currentExtent.height,
}
};
let mut swapchain_ci = self.create_info.lock().unwrap();
swapchain_ci.imageExtent = extent;
swapchain_ci.set_old_swapchain(*self.swapchain.lock().unwrap());
let swapchain = self.device.create_swapchain(&swapchain_ci)?;
// destroy the old swapchain
self.destroy();
// replace swapchain
*self.swapchain.lock().unwrap() = swapchain;
// set new surface size
self.width.store(extent.width, SeqCst);
self.height.store(extent.height, SeqCst);
Ok(())
}
pub fn acquire_next_image(
&self,
time_out: u64,
present_complete_semaphore: Option<&Arc<Semaphore>>,
fence: Option<&Arc<Fence>>,
) -> Result<OutOfDate<u32>> {
let res = self.device.acquire_next_image(
*self.swapchain.lock().unwrap(),
time_out,
present_complete_semaphore.map(|sem| sem.vk_handle()),
fence.map(|fence| fence.vk_handle()),
);
if let Ok(r) = &res {
if let OutOfDate::Ok(i) = r {
self.index.store(*i, SeqCst);
}
}
res
}
/// set current
/// only use when externally acquired next index !!!
pub unsafe fn set_image_index(&self, index: u32) {
self.index.store(index, SeqCst);
}
pub fn current_index(&self) -> u32 {
self.index.load(SeqCst)
}
pub fn vk_images(&self) -> Result<Vec<VkImage>> {
self.device
.swapchain_images(*self.swapchain.lock().unwrap())
}
pub fn wrap_images(
&self,
images: &[VkImage],
queue: &Arc<Mutex<Queue>>,
assume_layout: bool,
) -> Result<Vec<Arc<Image>>> {
let format = self.format();
let tiling = VK_IMAGE_TILING_OPTIMAL;
if !Image::check_configuration(&self.device, tiling, format, self.usage) {
return Err(anyhow::Error::msg(format!(
"Image configuration not allowed (tiling: {:?}, format: {:?}, usage: {:?})",
tiling, format, self.usage,
)));
}
let mut swapchain_images = Vec::new();
for image in images {
swapchain_images.push(
Image::from_preinitialized(
*image,
format,
self.width(),
self.height(),
VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
self.usage,
assume_layout,
)
.attach_sampler(Sampler::nearest_sampler().build(&self.device)?)
.build(&self.device, queue)?,
);
}
Ok(swapchain_images)
}
pub fn width(&self) -> u32 {
self.width.load(SeqCst)
}
pub fn height(&self) -> u32 {
self.height.load(SeqCst)
}
pub fn format(&self) -> VkFormat {
self.create_info.lock().unwrap().imageFormat
}
#[inline]
fn destroy(&self) {
self.device
.destroy_swapchain(*self.swapchain.lock().unwrap())
}
}
impl VulkanDevice for Swapchain {
fn device(&self) -> &Arc<Device> {
&self.device
}
}
impl VkHandle<VkSwapchainKHR> for Swapchain {
fn vk_handle(&self) -> VkSwapchainKHR {
*self.swapchain.lock().unwrap()
}
}
impl<'a> VkHandle<VkSwapchainKHR> for &'a Swapchain {
fn vk_handle(&self) -> VkSwapchainKHR {
*self.swapchain.lock().unwrap()
}
}
impl VkHandle<VkSwapchainKHR> for Arc<Swapchain> {
fn vk_handle(&self) -> VkSwapchainKHR {
*self.swapchain.lock().unwrap()
}
}
impl<'a> VkHandle<VkSwapchainKHR> for &'a Arc<Swapchain> {
fn vk_handle(&self) -> VkSwapchainKHR {
*self.swapchain.lock().unwrap()
}
}
impl Drop for Swapchain {
fn drop(&mut self) {
if !self.raw {
self.destroy();
}
}
}

11
vulkan-sys/Cargo.toml Normal file
View file

@ -0,0 +1,11 @@
[package]
name = "vulkan-sys"
version = "0.1.0"
authors = ["hodasemi <michaelh.95@t-online.de>"]
edition = "2021"
[dependencies]
library_loader = { path = "../library_loader" }
paste = "1.0.11"
shared_library = "0.1.9"
anyhow = { version = "1.0.68", features = ["backtrace"] }

View file

@ -0,0 +1,67 @@
use core::slice::{Iter, IterMut};
use std::ops::{Index, IndexMut};
use std::{fmt, fmt::Debug};
pub struct VkMappedMemory<'a, T>
where
T: Clone,
{
data: &'a mut [T],
unmap: Option<Box<dyn Fn()>>,
}
impl<'a, T: Clone> VkMappedMemory<'a, T> {
pub fn new(data: &'a mut [T]) -> VkMappedMemory<'a, T> {
VkMappedMemory { data, unmap: None }
}
pub fn set_unmap<F>(&mut self, f: F)
where
F: Fn() + 'static,
{
self.unmap = Some(Box::new(f));
}
pub fn copy(&mut self, data: &[T]) {
self.data.clone_from_slice(data);
}
pub fn iter(&self) -> Iter<'_, T> {
self.data.iter()
}
pub fn iter_mut(&mut self) -> IterMut<'_, T> {
self.data.iter_mut()
}
}
impl<'a, T: Clone> Index<usize> for VkMappedMemory<'a, T> {
type Output = T;
fn index(&self, index: usize) -> &T {
&self.data[index]
}
}
impl<'a, T: Clone> IndexMut<usize> for VkMappedMemory<'a, T> {
fn index_mut(&mut self, index: usize) -> &mut T {
&mut self.data[index]
}
}
impl<'a, T: Clone> Drop for VkMappedMemory<'a, T> {
fn drop(&mut self) {
if let Some(unmap) = &self.unmap {
unmap();
}
}
}
impl<'a, T: Clone + Debug> fmt::Debug for VkMappedMemory<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("VkMappedMemory")
.field("data", &self.data)
.finish()
}
}

View file

@ -0,0 +1,5 @@
pub mod mappedmemory;
pub mod names;
pub mod string;
pub mod prelude;

View file

@ -0,0 +1,32 @@
use crate::prelude::*;
use std::os::raw::c_char;
use std::slice::Iter;
pub struct VkNames {
r_names: Vec<VkString>,
c_names: Vec<*const c_char>,
}
impl VkNames {
pub fn new(names: &[VkString]) -> Self {
let local: Vec<VkString> = names.to_vec();
VkNames {
c_names: local.iter().map(|s| s.as_ptr()).collect(),
r_names: local,
}
}
pub fn len(&self) -> usize {
self.r_names.len()
}
pub fn iter(&self) -> Iter<'_, VkString> {
self.r_names.iter()
}
pub fn c_names(&self) -> &Vec<*const c_char> {
&self.c_names
}
}

View file

@ -0,0 +1,3 @@
pub use super::mappedmemory::*;
pub use super::names::VkNames;
pub use super::string::VkString;

View file

@ -0,0 +1,70 @@
use std::ffi::{CString, CStr};
use std::fmt;
use std::ops::Deref;
use std::os::raw::c_char;
use std::str::Utf8Error;
#[derive(Clone, Eq, Hash)]
pub struct VkString {
rust_text: String,
cstring_text: CString,
}
impl VkString {
pub fn new(text: &str) -> VkString {
let owned = String::from(text);
let cstring = CString::new(owned.clone())
.unwrap_or_else(|_| panic!("could not create CString ({})", text));
VkString {
rust_text: owned,
cstring_text: cstring,
}
}
pub fn as_ptr(&self) -> *const c_char {
self.cstring_text.as_ptr()
}
pub fn as_str(&self) -> &str {
&self.rust_text
}
pub fn as_string(&self) -> String {
self.rust_text.clone()
}
pub fn into_string(self) -> String {
self.rust_text
}
}
impl TryFrom<*const c_char> for VkString {
type Error = Utf8Error;
fn try_from(value: *const c_char) -> Result<Self, Self::Error> {
let cstr = unsafe { CStr::from_ptr(value) };
let str = cstr.to_str()?;
Ok(VkString::new(str))
}
}
impl Deref for VkString {
type Target = String;
fn deref(&self) -> &String {
&self.rust_text
}
}
impl PartialEq for VkString {
fn eq(&self, other: &VkString) -> bool {
self.rust_text == other.rust_text
}
}
impl fmt::Debug for VkString {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "VkString {{ {} }}", self.rust_text)
}
}

View file

@ -0,0 +1,39 @@
pub use VkAccessFlags::*;
#[repr(u32)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum VkAccessFlags {
VK_ACCESS_INDIRECT_COMMAND_READ_BIT = 0x0000_0001,
VK_ACCESS_INDEX_READ_BIT = 0x0000_0002,
VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT = 0x0000_0004,
VK_ACCESS_UNIFORM_READ_BIT = 0x0000_0008,
VK_ACCESS_INPUT_ATTACHMENT_READ_BIT = 0x0000_0010,
VK_ACCESS_SHADER_READ_BIT = 0x0000_0020,
VK_ACCESS_SHADER_WRITE_BIT = 0x0000_0040,
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT = 0x0000_0080,
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT = 0x0000_0100,
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT = 0x0000_0200,
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT = 0x0000_0400,
VK_ACCESS_TRANSFER_READ_BIT = 0x0000_0800,
VK_ACCESS_TRANSFER_WRITE_BIT = 0x0000_1000,
VK_ACCESS_HOST_READ_BIT = 0x0000_2000,
VK_ACCESS_HOST_WRITE_BIT = 0x0000_4000,
VK_ACCESS_MEMORY_READ_BIT = 0x0000_8000,
VK_ACCESS_MEMORY_WRITE_BIT = 0x0001_0000,
VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT = 0x0200_0000,
VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT = 0x0400_0000,
VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT = 0x0800_0000,
VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT = 0x0010_0000,
VK_ACCESS_COMMAND_PROCESS_READ_BIT_NVX = 0x0002_0000,
VK_ACCESS_COMMAND_PROCESS_WRITE_BIT_NVX = 0x0004_0000,
VK_ACCESS_COLOR_ATTACHMENT_READ_NONCOHERENT_BIT_EXT = 0x0008_0000,
VK_ACCESS_SHADING_RATE_IMAGE_READ_BIT_NV = 0x0080_0000,
VK_ACCESS_ACCELERATION_STRUCTURE_READ_BIT_KHR = 0x0020_0000,
VK_ACCESS_ACCELERATION_STRUCTURE_WRITE_BIT_KHR = 0x0040_0000,
VK_ACCESS_FRAGMENT_DENSITY_MAP_READ_BIT_EXT = 0x0100_0000,
}
#[repr(C)]
#[derive(Clone, Copy, Eq, PartialEq, Hash)]
pub struct VkAccessFlagBits(u32);
SetupVkFlags!(VkAccessFlags, VkAccessFlagBits);

View file

@ -0,0 +1,3 @@
pub mod rasterizationorderamd;
pub mod prelude;

View file

@ -0,0 +1 @@
pub use super::rasterizationorderamd::*;

View file

@ -0,0 +1,9 @@
pub use VkRasterizationOrderAMD::*;
#[repr(u32)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum VkRasterizationOrderAMD {
VK_RASTERIZATION_ORDER_STRICT_AMD = 0,
VK_RASTERIZATION_ORDER_RELAXED_AMD = 1,
VK_RASTERIZATION_ORDER_MAX_ENUM_AMD = 0x7FFF_FFFF,
}

View file

@ -0,0 +1,15 @@
pub use VkAndroidSurfaceCreateFlagsKHR::*;
#[repr(u32)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum VkAndroidSurfaceCreateFlagsKHR {
VK_ANDROID_SURFACE_CREATE_NULL_BIT = 0,
}
#[repr(C)]
#[derive(Clone, Copy, Eq, PartialEq, Hash)]
pub struct VkAndroidSurfaceCreateFlagBitsKHR(u32);
SetupVkFlags!(
VkAndroidSurfaceCreateFlagsKHR,
VkAndroidSurfaceCreateFlagBitsKHR
);

View file

@ -0,0 +1,15 @@
pub use VkAttachmentDescriptionFlags::*;
#[repr(u32)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum VkAttachmentDescriptionFlags {
VK_ATTACHMENT_DESCRIPTION_MAY_ALIAS_BIT = 0x0000_0001,
}
#[repr(C)]
#[derive(Clone, Copy, Eq, PartialEq, Hash)]
pub struct VkAttachmentDescriptionFlagBits(u32);
SetupVkFlags!(
VkAttachmentDescriptionFlags,
VkAttachmentDescriptionFlagBits
);

View file

@ -0,0 +1,9 @@
pub use VkAttachmentLoadOp::*;
#[repr(u32)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum VkAttachmentLoadOp {
VK_ATTACHMENT_LOAD_OP_LOAD = 0,
VK_ATTACHMENT_LOAD_OP_CLEAR = 1,
VK_ATTACHMENT_LOAD_OP_DONT_CARE = 2,
}

View file

@ -0,0 +1,8 @@
pub use VkAttachmentStoreOp::*;
#[repr(u32)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum VkAttachmentStoreOp {
VK_ATTACHMENT_STORE_OP_STORE = 0,
VK_ATTACHMENT_STORE_OP_DONT_CARE = 1,
}

View file

@ -0,0 +1,25 @@
pub use VkBlendFactor::*;
#[repr(u32)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum VkBlendFactor {
VK_BLEND_FACTOR_ZERO = 0,
VK_BLEND_FACTOR_ONE = 1,
VK_BLEND_FACTOR_SRC_COLOR = 2,
VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR = 3,
VK_BLEND_FACTOR_DST_COLOR = 4,
VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR = 5,
VK_BLEND_FACTOR_SRC_ALPHA = 6,
VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA = 7,
VK_BLEND_FACTOR_DST_ALPHA = 8,
VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA = 9,
VK_BLEND_FACTOR_CONSTANT_COLOR = 10,
VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR = 11,
VK_BLEND_FACTOR_CONSTANT_ALPHA = 12,
VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA = 13,
VK_BLEND_FACTOR_SRC_ALPHA_SATURATE = 14,
VK_BLEND_FACTOR_SRC1_COLOR = 15,
VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR = 16,
VK_BLEND_FACTOR_SRC1_ALPHA = 17,
VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA = 18,
}

View file

@ -0,0 +1,11 @@
pub use VkBlendOp::*;
#[repr(u32)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum VkBlendOp {
VK_BLEND_OP_ADD = 0,
VK_BLEND_OP_SUBTRACT = 1,
VK_BLEND_OP_REVERSE_SUBTRACT = 2,
VK_BLEND_OP_MIN = 3,
VK_BLEND_OP_MAX = 4,
}

View file

@ -0,0 +1,33 @@
pub use VkBool32::*;
#[repr(u32)]
#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
pub enum VkBool32 {
VK_FALSE = 0,
VK_TRUE = 1,
}
impl From<bool> for VkBool32 {
fn from(b: bool) -> VkBool32 {
if b {
VK_TRUE
} else {
VK_FALSE
}
}
}
impl Into<bool> for VkBool32 {
fn into(self) -> bool {
match self {
VK_FALSE => false,
VK_TRUE => true,
}
}
}
impl Default for VkBool32 {
fn default() -> Self {
VK_FALSE
}
}

View file

@ -0,0 +1,12 @@
pub use VkBorderColor::*;
#[repr(u32)]
#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
pub enum VkBorderColor {
VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK = 0,
VK_BORDER_COLOR_INT_TRANSPARENT_BLACK = 1,
VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK = 2,
VK_BORDER_COLOR_INT_OPAQUE_BLACK = 3,
VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE = 4,
VK_BORDER_COLOR_INT_OPAQUE_WHITE = 5,
}

View file

@ -0,0 +1,17 @@
pub use VkBufferCreateFlags::*;
#[repr(u32)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum VkBufferCreateFlags {
VK_BUFFER_CREATE_SPARSE_BINDING_BIT = 0x0000_0001,
VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT = 0x0000_0002,
VK_BUFFER_CREATE_SPARSE_ALIASED_BIT = 0x0000_0004,
VK_BUFFER_CREATE_PROTECTED_BIT = 0x0000_0008,
VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT_EXT = 0x0000_0010,
VK_BUFFER_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFF_FFFF,
}
#[repr(C)]
#[derive(Clone, Copy, Eq, PartialEq, Hash)]
pub struct VkBufferCreateFlagBits(u32);
SetupVkFlags!(VkBufferCreateFlags, VkBufferCreateFlagBits);

View file

@ -0,0 +1,30 @@
pub use VkBufferUsageFlags::*;
#[repr(u32)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum VkBufferUsageFlags {
VK_BUFFER_USAGE_TRANSFER_SRC_BIT = 0x0000_0001,
VK_BUFFER_USAGE_TRANSFER_DST_BIT = 0x0000_0002,
VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT = 0x0000_0004,
VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT = 0x0000_0008,
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT = 0x0000_0010,
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT = 0x0000_0020,
VK_BUFFER_USAGE_INDEX_BUFFER_BIT = 0x0000_0040,
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT = 0x0000_0080,
VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT = 0x0000_0100,
VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_BUFFER_BIT_EXT = 0x0000_0800,
VK_BUFFER_USAGE_TRANSFORM_FEEDBACK_COUNTER_BUFFER_BIT_EXT = 0x0000_1000,
VK_BUFFER_USAGE_CONDITIONAL_RENDERING_BIT_EXT = 0x0000_0200,
VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR = 0x0000_0400,
VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT = 0x0002_0000,
// Provided by VK_KHR_acceleration_structure
VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_BUILD_INPUT_READ_ONLY_BIT_KHR = 0x0008_0000,
// Provided by VK_KHR_acceleration_structure
VK_BUFFER_USAGE_ACCELERATION_STRUCTURE_STORAGE_BIT_KHR = 0x0010_0000,
// VK_BUFFER_USAGE_RAY_TRACING_BIT_NV = VK_BUFFER_USAGE_SHADER_BINDING_TABLE_BIT_KHR,
}
#[repr(C)]
#[derive(Clone, Copy, Eq, PartialEq, Hash, Default)]
pub struct VkBufferUsageFlagBits(u32);
SetupVkFlags!(VkBufferUsageFlags, VkBufferUsageFlagBits);

View file

@ -0,0 +1,12 @@
pub use VkBufferViewCreateFlags::*;
#[repr(u32)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum VkBufferViewCreateFlags {
VK_BUFFER_VIEW_CREATE_NULL_BIT = 0,
}
#[repr(C)]
#[derive(Clone, Copy, Eq, PartialEq, Hash)]
pub struct VkBufferViewCreateFlagBits(u32);
SetupVkFlags!(VkBufferViewCreateFlags, VkBufferViewCreateFlagBits);

View file

@ -0,0 +1,15 @@
pub use VkColorComponentFlags::*;
#[repr(u32)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum VkColorComponentFlags {
VK_COLOR_COMPONENT_R_BIT = 0x0000_0001,
VK_COLOR_COMPONENT_G_BIT = 0x0000_0002,
VK_COLOR_COMPONENT_B_BIT = 0x0000_0004,
VK_COLOR_COMPONENT_A_BIT = 0x0000_0008,
}
#[repr(C)]
#[derive(Clone, Copy, Eq, PartialEq, Hash)]
pub struct VkColorComponentFlagBits(u32);
SetupVkFlags!(VkColorComponentFlags, VkColorComponentFlagBits);

Some files were not shown because too many files have changed in this diff Show more