Compare commits

...

4 commits

Author SHA1 Message Date
janis 294ad3dbbd
buffers 2026-04-02 14:09:42 +02:00
janis f4a20fde44
image? alloc? bruh.. 2026-04-02 13:32:49 +02:00
janis f9371f8b20
more fence/semaphore refactor 2026-03-31 23:07:47 +02:00
janis 9620f92f0b
fence refactor 2026-03-31 21:40:15 +02:00
13 changed files with 924 additions and 481 deletions

View file

@ -41,6 +41,7 @@ thread_local = "1.1.8"
ash = "0.38.0"
ash-window = "0.13.0"
vk-mem = "0.5.0"
gpu-allocator = { git = "https://github.com/janis-bhm/gpu-allocator", branch = "main" }
vk-sync = "0.1.6"
arrayvec = "0.7.6"

View file

@ -23,6 +23,7 @@ tracing = { workspace = true }
ash = { workspace = true }
ash-window = { workspace = true }
vk-mem = { workspace = true }
gpu-allocator = { workspace = true }
raw-window-handle = { workspace = true }
egui = { workspace = true , features = ["bytemuck"]}

View file

@ -5,13 +5,10 @@ use std::{
};
use ash::{prelude::VkResult, vk};
use itertools::Itertools;
use vk_mem::Alloc;
use crate::{
define_device_owned_handle,
device::{DeviceOwned, QueueFlags},
Device,
device::{Allocation, DeviceObject, QueueFlags},
};
#[derive(Clone)]
@ -21,9 +18,7 @@ pub struct BufferDesc {
pub size: u64,
pub usage: vk::BufferUsageFlags,
pub queue_families: QueueFlags,
pub mem_usage: vk_mem::MemoryUsage,
pub alloc_flags: vk_mem::AllocationCreateFlags,
pub mem_location: gpu_allocator::MemoryLocation,
}
impl std::hash::Hash for BufferDesc {
@ -32,8 +27,7 @@ impl std::hash::Hash for BufferDesc {
self.size.hash(state);
self.usage.hash(state);
self.queue_families.hash(state);
self.mem_usage.hash(state);
self.alloc_flags.bits().hash(state);
self.mem_location.hash(state);
}
}
@ -45,17 +39,7 @@ impl std::fmt::Debug for BufferDesc {
.field("size", &self.size)
.field("usage", &self.usage)
.field("queue_families", &self.queue_families)
.field("mem_usage", &self.mem_usage)
.field_with("alloc_flags", |f| {
write!(
f,
"{}",
self.alloc_flags
.iter_names()
.map(|(name, _)| name)
.format(" | ")
)
})
.field("mem_location", &self.mem_location)
.finish()
}
}
@ -69,8 +53,7 @@ impl PartialEq for BufferDesc {
&& self.size == other.size
&& self.usage == other.usage
&& self.queue_families == other.queue_families
&& self.mem_usage == other.mem_usage
&& self.alloc_flags.bits() == other.alloc_flags.bits()
&& self.mem_location == other.mem_location
}
}
@ -82,109 +65,91 @@ impl Default for BufferDesc {
size: Default::default(),
usage: Default::default(),
queue_families: QueueFlags::empty(),
alloc_flags: vk_mem::AllocationCreateFlags::empty(),
mem_usage: vk_mem::MemoryUsage::Auto,
mem_location: gpu_allocator::MemoryLocation::Unknown,
}
}
}
define_device_owned_handle! {
#[derive(Debug)]
pub Buffer(vk::Buffer) {
alloc: vk_mem::Allocation,
size: u64,
} => |this| unsafe {
this.device().clone().alloc().destroy_buffer(this.handle(), &mut this.alloc);
}
#[derive(Debug)]
pub struct Buffer {
buffer: DeviceObject<vk::Buffer>,
desc: BufferDesc,
alloc: Allocation,
}
impl Eq for Buffer {}
impl PartialEq for Buffer {
fn eq(&self, other: &Self) -> bool {
self.inner == other.inner
*self.buffer == *other.buffer
}
}
impl Buffer {
pub fn new(device: Device, desc: BufferDesc) -> VkResult<Self> {
let queue_families = device.queue_families().family_indices(desc.queue_families);
pub fn new(device: Device, desc: BufferDesc) -> crate::Result<Self> {
let (buffer, requirements) = Self::new_raw(device.clone(), &desc)?;
let alloc =
device
.alloc2
.lock()
.allocate(&gpu_allocator::vulkan::AllocationCreateDesc {
name: desc.name.as_deref().unwrap_or_default(),
requirements,
location: desc.mem_location,
linear: true,
allocation_scheme: gpu_allocator::vulkan::AllocationScheme::GpuAllocatorManaged,
})?;
Ok(Self {
buffer: DeviceObject::new(buffer, device.clone(), desc.name.clone()),
desc,
alloc: Allocation::Owned(DeviceObject::new_without_name(alloc, device)),
})
}
fn new_raw(
device: Device,
desc: &BufferDesc,
) -> crate::Result<(vk::Buffer, vk::MemoryRequirements)> {
let queue_families = device.queues.family_indices(desc.queue_families);
let sharing_mode = if queue_families.len() > 1 {
vk::SharingMode::CONCURRENT
} else {
vk::SharingMode::EXCLUSIVE
};
let create_info = vk::BufferCreateInfo::default()
.size(desc.size)
.usage(desc.usage)
.queue_family_indices(&queue_families)
.sharing_mode(sharing_mode);
let (buffer, allocation) = unsafe {
device.alloc().create_buffer(
&vk::BufferCreateInfo::default()
.size(desc.size)
.usage(desc.usage)
.queue_family_indices(&queue_families)
.sharing_mode(sharing_mode),
&vk_mem::AllocationCreateInfo {
flags: desc.alloc_flags,
usage: desc.mem_usage,
..Default::default()
},
)?
};
let buffer = unsafe { device.dev().create_buffer(&create_info, None)? };
let mem_reqs = unsafe { device.dev().get_buffer_memory_requirements(buffer) };
Ok(Self::construct(
device, buffer, desc.name, allocation, desc.size,
)?)
Ok((buffer, mem_reqs))
}
#[allow(dead_code)]
pub fn map_arc(self: &mut Arc<Self>) -> VkResult<MappedBuffer<'_>> {
Arc::get_mut(self).map(Self::map).unwrap()
}
pub fn map(&mut self) -> VkResult<MappedBuffer<'_>> {
let bytes = unsafe {
let data = self.inner.dev().alloc().map_memory(&mut self.alloc)?;
let slice = core::slice::from_raw_parts_mut(data, self.size as usize);
slice
};
Ok(MappedBuffer { inner: self, bytes })
}
pub fn buffer(&self) -> vk::Buffer {
self.handle()
}
pub fn len(&self) -> u64 {
self.size
}
}
pub struct MappedBuffer<'a> {
bytes: &'a mut [u8],
inner: &'a mut Buffer,
}
impl Drop for MappedBuffer<'_> {
fn drop(&mut self) {
unsafe {
self.inner
.inner
.dev()
.alloc()
.unmap_memory(&mut self.inner.alloc);
pub fn map(&mut self) -> Option<&[u8]> {
if let Some(alloc) = self.alloc.allocation() {
alloc.mapped_slice()
} else {
None
}
}
}
impl Deref for MappedBuffer<'_> {
type Target = [u8];
pub fn map_mut(&mut self) -> Option<&mut [u8]> {
if let Some(alloc) = self.alloc.allocation_mut() {
alloc.mapped_slice_mut()
} else {
None
}
}
fn deref(&self) -> &Self::Target {
self.bytes
}
}
impl DerefMut for MappedBuffer<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.bytes
pub fn buffer(&self) -> vk::Buffer {
*self.buffer
}
pub fn len(&self) -> u64 {
self.desc.size
}
}

View file

@ -277,7 +277,7 @@ impl SingleUseCommand {
signal: Option<vk::Semaphore>,
fence: Arc<sync::Fence>,
) -> VkResult<FenceFuture<'a>> {
self.submit_fence(wait, signal, Some(fence.fence()))?;
self.submit_fence(wait, signal, Some(fence.raw()))?;
Ok(FenceFuture::new(fence))
}
@ -287,8 +287,8 @@ impl SingleUseCommand {
self,
wait: Option<(vk::Semaphore, vk::PipelineStageFlags)>,
signal: Option<vk::Semaphore>,
) -> VkResult<()> {
let fence = Arc::new(sync::Fence::create(self.device().clone())?);
) -> crate::Result<()> {
let fence = Arc::new(sync::Fence::from_pool(&self.device().pools.fences, None)?);
let future = self.submit_async(wait, signal, fence)?;
future.block()?;
Ok(())

View file

@ -2,7 +2,8 @@ use std::{
borrow::Cow,
collections::{BTreeSet, HashMap, HashSet},
ffi::CStr,
ops::Deref,
mem::ManuallyDrop,
ops::{Deref, DerefMut},
sync::Arc,
};
@ -11,15 +12,14 @@ use ash::{
prelude::VkResult,
vk::{self, Handle},
};
use parking_lot::Mutex;
use raw_window_handle::RawDisplayHandle;
use tinyvec::{ArrayVec, array_vec};
use crate::{
Instance, PhysicalDeviceFeatures, PhysicalDeviceInfo, Result,
instance::InstanceInner,
queue::Queue,
queue::{DeviceQueueInfos, DeviceQueues},
sync,
queue::{DeviceQueueInfos, DeviceQueues, Queue},
sync::{self, BinarySemaphore, TimelineSemaphore},
};
#[derive(Debug, Default)]
@ -108,9 +108,44 @@ struct DeviceExtensions {
pub(crate) mesh_shader: Option<ext::mesh_shader::Device>,
}
#[allow(unused)]
type GpuAllocation = gpu_allocator::vulkan::Allocation;
impl DeviceHandle for GpuAllocation {
unsafe fn destroy(&mut self, device: &Device) {
let mut swapped = GpuAllocation::default();
std::mem::swap(self, &mut swapped);
_ = device.alloc2.lock().free(swapped);
}
}
#[derive(Debug)]
pub(crate) enum Allocation {
Owned(DeviceObject<GpuAllocation>),
Shared(Arc<DeviceObject<GpuAllocation>>),
Unmanaged,
}
impl Allocation {
pub(crate) fn allocation(&self) -> Option<&GpuAllocation> {
match self {
Allocation::Owned(obj) => Some(obj),
Allocation::Shared(arc) => Some(arc.as_ref()),
Allocation::Unmanaged => None,
}
}
pub(crate) fn allocation_mut(&mut self) -> Option<&mut GpuAllocation> {
match self {
Allocation::Owned(obj) => Some(obj),
Allocation::Shared(arc) => Arc::get_mut(arc).map(|alloc| &mut alloc.inner),
Allocation::Unmanaged => None,
}
}
}
pub struct DeviceInner {
pub(crate) alloc: vk_mem::Allocator,
pub(crate) alloc2: Mutex<gpu_allocator::vulkan::Allocator>,
pub(crate) raw: ash::Device,
pub(crate) adapter: PhysicalDeviceInfo,
pub(crate) instance: Instance,
@ -387,6 +422,21 @@ impl PhysicalDeviceInfo {
},
};
let alloc2 =
gpu_allocator::vulkan::Allocator::new(&gpu_allocator::vulkan::AllocatorCreateDesc {
instance: instance.inner.raw.clone(),
device: device.clone(),
physical_device: self.pdev,
debug_settings: Default::default(),
buffer_device_address: false,
allocation_sizes: {
const MB: u64 = 1024 * 1024;
gpu_allocator::AllocationSizes::new(8 * MB, 64 * MB)
.with_max_host_memblock_size(256 * MB)
.with_max_device_memblock_size(256 * MB)
},
})?;
let inner = DeviceInner {
raw: device.clone(),
alloc: unsafe {
@ -396,6 +446,7 @@ impl PhysicalDeviceInfo {
self.pdev,
))?
},
alloc2: Mutex::new(alloc2),
instance: instance.clone(),
adapter: self,
queues: device_queues,
@ -405,7 +456,11 @@ impl PhysicalDeviceInfo {
_drop: DeviceDrop(device),
};
Ok(Device(Arc::new(inner)))
let shared = Arc::new(inner);
Ok(Device {
pools: Arc::new(DevicePools::new(shared.clone())),
shared,
})
}
fn required_extensions(&self, requested_extensions: &[Extension<'static>]) -> Vec<*const i8> {
@ -434,11 +489,31 @@ impl PhysicalDeviceInfo {
}
#[derive(Clone, Debug)]
pub struct Device(Arc<DeviceInner>);
pub(crate) struct DevicePools {
pub(crate) fences: Pool<vk::Fence>,
pub(crate) binary_semaphores: Pool<BinarySemaphore>,
pub(crate) timeline_semaphores: Pool<TimelineSemaphore>,
}
impl DevicePools {
pub fn new(device: Arc<DeviceInner>) -> Self {
Self {
fences: Pool::new(device.clone()),
binary_semaphores: Pool::new(device.clone()),
timeline_semaphores: Pool::new(device),
}
}
}
#[derive(Clone, Debug)]
pub struct Device {
pub(crate) shared: Arc<DeviceInner>,
pub(crate) pools: Arc<DevicePools>,
}
impl PartialEq for Device {
fn eq(&self, other: &Self) -> bool {
Arc::ptr_eq(&self.0, &other.0)
Arc::ptr_eq(&self.shared, &other.shared)
}
}
@ -448,57 +523,57 @@ impl core::ops::Deref for Device {
type Target = DeviceInner;
fn deref(&self) -> &Self::Target {
&self.0
&self.shared
}
}
impl Device {
impl DeviceInner {
pub fn sync_threadpool(&self) -> &sync::SyncThreadpool {
&self.0.sync_threadpool
&self.sync_threadpool
}
pub fn alloc(&self) -> &vk_mem::Allocator {
&self.0.alloc
&self.alloc
}
pub fn dev(&self) -> &ash::Device {
&self.0.raw
&self.raw
}
pub fn instance(&self) -> &Instance {
&self.0.instance
&self.instance
}
pub fn queues(&self) -> &DeviceQueues {
&self.0.queues
&self.queues
}
pub fn phy(&self) -> vk::PhysicalDevice {
self.0.adapter.pdev
self.adapter.pdev
}
pub fn features(&self) -> &crate::PhysicalDeviceFeatures {
&self.0.adapter.features
&self.adapter.features
}
pub fn properties(&self) -> &crate::PhysicalDeviceProperties {
&self.0.adapter.properties
&self.adapter.properties
}
pub fn physical_device(&self) -> &PhysicalDeviceInfo {
&self.0.adapter
&self.adapter
}
pub fn main_queue(&self) -> &Queue {
self.0.queues.graphics()
self.queues.graphics()
}
pub fn compute_queue(&self) -> &Queue {
self.0.queues.compute()
self.queues.compute()
}
pub fn transfer_queue(&self) -> &Queue {
self.0.queues.transfer()
self.queues.transfer()
}
pub unsafe fn lock_queues(&self) {
unsafe {
self.0.queues.lock();
self.queues.lock();
}
}
pub unsafe fn unlock_queues(&self) {
unsafe {
self.0.queues.unlock();
self.queues.unlock();
}
}
@ -515,7 +590,7 @@ impl Device {
tracing::warn!("locking all queues and waiting for device to idle");
unsafe {
self.lock_queues();
self.dev().device_wait_idle()?;
self.raw.device_wait_idle()?;
self.unlock_queues();
}
tracing::warn!("finished waiting: unlocking all queues.");
@ -540,7 +615,9 @@ impl Device {
let mut buffer = [0u8; 64];
let buffer_vec: Vec<u8>;
let name_bytes = if name.len() < buffer.len() {
let name_bytes = if name.is_empty() {
&[]
} else if name.len() < buffer.len() {
buffer[..name.len()].copy_from_slice(name.as_bytes());
&buffer[..]
} else {
@ -580,7 +657,8 @@ pub struct DeviceOwnedDebugObject<T> {
impl<T: Eq> Eq for DeviceOwnedDebugObject<T> {}
impl<T: PartialEq> PartialEq for DeviceOwnedDebugObject<T> {
fn eq(&self, other: &Self) -> bool {
std::sync::Arc::ptr_eq(&self.device.0, &other.device.0) && self.object == other.object
std::sync::Arc::ptr_eq(&self.device.shared, &other.device.shared)
&& self.object == other.object
}
}
@ -652,11 +730,20 @@ impl<T: DeviceHandle> Deref for DeviceObject<T> {
}
}
impl<T: DeviceHandle> DerefMut for DeviceObject<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
impl<T: DeviceHandle> DeviceObject<T> {
pub fn new(inner: T, device: Device, name: Option<Cow<'static, str>>) -> Self {
pub fn new(inner: T, device: Device, name: Option<Cow<'static, str>>) -> Self
where
T: vk::Handle + Clone,
{
unsafe {
if let Some(name) = name.as_ref() {
device.debug_name_object(inner, &name);
device.debug_name_object(inner.clone(), &name);
}
}
@ -667,6 +754,14 @@ impl<T: DeviceHandle> DeviceObject<T> {
name,
}
}
pub fn new_without_name(inner: T, device: Device) -> Self {
Self {
inner,
device,
#[cfg(debug_assertions)]
name: None,
}
}
pub fn device(&self) -> &Device {
&self.device
}
@ -684,46 +779,48 @@ impl<T: DeviceHandle> DeviceObject<T> {
impl<T: DeviceHandle> Drop for DeviceObject<T> {
fn drop(&mut self) {
self.destroy(&self.device);
unsafe {
self.inner.destroy(&self.device);
}
}
}
pub trait DeviceHandle: vk::Handle + Copy {
fn destroy(self, device: &Device);
pub trait DeviceHandle {
unsafe fn destroy(&mut self, device: &Device);
}
impl DeviceHandle for vk::Semaphore {
fn destroy(self, device: &Device) {
unsafe fn destroy(&mut self, device: &Device) {
unsafe {
device.dev().destroy_semaphore(self, None);
device.dev().destroy_semaphore(*self, None);
}
}
}
impl DeviceHandle for vk::Fence {
fn destroy(self, device: &Device) {
unsafe fn destroy(&mut self, device: &Device) {
unsafe {
device.dev().destroy_fence(self, None);
device.dev().destroy_fence(*self, None);
}
}
}
impl DeviceHandle for vk::Buffer {
fn destroy(self, device: &Device) {
unsafe fn destroy(&mut self, device: &Device) {
unsafe {
device.dev().destroy_buffer(self, None);
device.dev().destroy_buffer(*self, None);
}
}
}
impl DeviceHandle for vk::SwapchainKHR {
fn destroy(self, device: &Device) {
unsafe fn destroy(&mut self, device: &Device) {
unsafe {
device
.device_extensions
.swapchain
.as_ref()
.map(|swapchain| swapchain.destroy_swapchain(self, None));
.map(|swapchain| swapchain.destroy_swapchain(*self, None));
}
}
}
@ -733,7 +830,100 @@ pub trait DeviceOwned<T> {
fn handle(&self) -> T;
}
/// Macro for helping create and destroy Vulkan objects which are owned by a device.
pub trait Pooled: Sized {
fn create_from_pool(pool: &Pool<Self>) -> Result<Self>;
}
pub struct PoolObject<T: Pooled + vk::Handle + Clone> {
pub(crate) inner: ManuallyDrop<T>,
pub(crate) pool: Pool<T>,
#[cfg(debug_assertions)]
pub(crate) name: Option<Cow<'static, str>>,
}
impl<T: Pooled + vk::Handle + Clone> PoolObject<T> {
pub fn name_object(&mut self, name: impl Into<Cow<'static, str>>) {
#[cfg(debug_assertions)]
unsafe {
self.name = Some(name.into());
self.pool
.device
.debug_name_object(T::clone(&self.inner), self.name.as_ref().unwrap());
}
}
pub fn device(&self) -> &Arc<DeviceInner> {
&self.pool.device
}
}
impl<T: Pooled + vk::Handle + Clone> Drop for PoolObject<T> {
fn drop(&mut self) {
let handle = unsafe { ManuallyDrop::take(&mut self.inner) };
#[cfg(debug_assertions)]
if self.name.is_some() {
unsafe { self.pool.device.debug_name_object(handle.clone(), "") };
}
self.pool.push(handle);
}
}
impl<T: Pooled + vk::Handle + Clone> Deref for PoolObject<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
#[derive(Debug, Clone)]
pub struct Pool<T> {
pub(crate) pool: Arc<Mutex<Vec<T>>>,
pub(crate) device: Arc<DeviceInner>,
}
impl<T> Pool<T> {
pub fn push(&self, item: T) {
self.pool.lock().push(item);
}
pub fn new(device: Arc<DeviceInner>) -> Self {
Self {
pool: Arc::new(Mutex::new(Vec::new())),
device,
}
}
pub fn pop(&self) -> Option<T> {
self.pool.lock().pop()
}
}
impl<T: Pooled + vk::Handle + Clone> Pool<T> {
pub fn get(&self) -> Result<PoolObject<T>> {
let item = if let Some(item) = self.pool.lock().pop() {
item
} else {
T::create_from_pool(self)?
};
Ok(PoolObject {
inner: ManuallyDrop::new(item),
pool: self.clone(),
#[cfg(debug_assertions)]
name: None,
})
}
pub fn get_named(&self, name: Option<impl Into<Cow<'static, str>>>) -> Result<PoolObject<T>> {
let mut obj = self.get()?;
if let Some(name) = name {
obj.name_object(name);
}
Ok(obj)
}
}
// Macro for helping create and destroy Vulkan objects which are owned by a device.
#[macro_export]
macro_rules! define_device_owned_handle {
($(#[$attr:meta])*

View file

@ -6,12 +6,14 @@ use std::{
use crate::{
define_device_owned_handle,
device::{DeviceOwned, QueueFlags},
device::{Allocation, DeviceHandle, DeviceObject, DeviceOwned, QueueFlags},
swapchain::Swapchain,
util::weak_vec::WeakVec,
};
use super::Device;
use ash::{prelude::*, vk};
use itertools::Itertools;
use gpu_allocator::vulkan::{AllocationCreateDesc, AllocationScheme};
use parking_lot::Mutex;
use vk_mem::Alloc;
@ -30,8 +32,7 @@ pub struct ImageDesc {
pub queue_families: QueueFlags,
pub layout: vk::ImageLayout,
pub mem_usage: vk_mem::MemoryUsage,
pub alloc_flags: vk_mem::AllocationCreateFlags,
pub mem_location: gpu_allocator::MemoryLocation,
}
impl std::hash::Hash for ImageDesc {
@ -47,8 +48,7 @@ impl std::hash::Hash for ImageDesc {
self.usage.hash(state);
self.queue_families.hash(state);
self.layout.hash(state);
self.mem_usage.hash(state);
self.alloc_flags.bits().hash(state);
self.mem_location.hash(state);
}
}
@ -67,8 +67,7 @@ impl PartialEq for ImageDesc {
&& self.usage == other.usage
&& self.queue_families == other.queue_families
&& self.layout == other.layout
&& self.mem_usage == other.mem_usage
&& self.alloc_flags.bits() == other.alloc_flags.bits()
&& self.mem_location == other.mem_location
}
}
@ -87,17 +86,7 @@ impl<'a> std::fmt::Debug for ImageDesc {
.field("usage", &self.usage)
.field("queue_families", &self.queue_families)
.field("layout", &self.layout)
.field("mem_usage", &self.mem_usage)
.field_with("alloc_flags", |f| {
write!(
f,
"{}",
self.alloc_flags
.iter_names()
.map(|(name, _)| name)
.format(" | ")
)
})
.field("mem_location", &self.mem_location)
.finish()
}
}
@ -117,54 +106,154 @@ impl Default for ImageDesc {
usage: Default::default(),
queue_families: QueueFlags::empty(),
layout: vk::ImageLayout::UNDEFINED,
alloc_flags: vk_mem::AllocationCreateFlags::empty(),
mem_usage: vk_mem::MemoryUsage::Auto,
mem_location: gpu_allocator::MemoryLocation::Unknown,
}
}
}
define_device_owned_handle! {
#[derive(Debug)]
pub Image(vk::Image) {
alloc: Option<vk_mem::Allocation>,
size: vk::Extent3D,
format: vk::Format,
views: Mutex<HashMap<ImageViewDesc, vk::ImageView>>,
aliases: Mutex<HashMap<ImageDesc, Arc<Image>>>,
parent: Option<Weak<Image>>,
is_swapchain_image: bool,
} => |this| if !this.is_swapchain_image {
#[derive(Debug)]
enum ImageInner {
Swapchain(vk::Image, Device),
Allocated(DeviceObject<vk::Image>, Allocation),
}
impl DeviceHandle for vk::Image {
unsafe fn destroy(&mut self, device: &Device) {
unsafe {
for &view in this.views.lock().values() {
this.inner.dev().dev().destroy_image_view(view, None);
}
let handle = this.handle();
let dev = this.device().clone();
if let Some(alloc) = this.alloc.as_mut() {
// destroy image handle and allocation
dev.alloc().destroy_image(handle, alloc);
} else {
// destroy image handle
dev.dev().destroy_image(handle, None);
}
device.dev().destroy_image(*self, None);
}
}
}
impl Eq for Image {}
impl PartialEq for Image {
fn eq(&self, other: &Self) -> bool {
self.inner == other.inner
impl ImageInner {
fn image(&self) -> vk::Image {
match self {
Self::Swapchain(image, _) => *image,
Self::Allocated(image, _) => **image,
}
}
fn device(&self) -> &Device {
match self {
Self::Swapchain(_, device) => device,
Self::Allocated(image, _) => image.device(),
}
}
fn allocation(&self) -> Option<&Allocation> {
match self {
Self::Swapchain(_, _) => None,
Self::Allocated(_, alloc) => Some(alloc),
}
}
fn allocation_mut(&mut self) -> Option<&mut Allocation> {
match self {
Self::Swapchain(_, _) => None,
Self::Allocated(_, alloc) => Some(alloc),
}
}
}
#[derive(Debug)]
pub struct Image {
image: ImageInner,
desc: ImageDesc,
views: Mutex<WeakVec<ImageView>>,
}
impl Image {
pub fn new(device: Device, desc: ImageDesc) -> VkResult<Self> {
pub fn new(device: Device, desc: ImageDesc) -> crate::Result<Self> {
let (image, requirements) = Self::new_raw(device.clone(), &desc)?;
let alloc = device.alloc2.lock().allocate(&AllocationCreateDesc {
name: desc.name.as_deref().unwrap_or(""),
requirements,
location: desc.mem_location,
linear: desc.tiling == vk::ImageTiling::LINEAR,
allocation_scheme: AllocationScheme::GpuAllocatorManaged,
})?;
Ok(Self {
image: ImageInner::Allocated(
DeviceObject::new(image, device.clone(), desc.name.clone()),
Allocation::Owned(DeviceObject::new_without_name(alloc, device)),
),
desc,
views: Default::default(),
})
}
pub fn new_with_allocation(
device: Device,
allocation: Allocation,
desc: ImageDesc,
) -> crate::Result<Self> {
let (image, requirements) = Self::new_raw(device.clone(), &desc)?;
// validate allocation
let alloc_size = allocation
.allocation()
.map(|alloc| alloc.size())
.unwrap_or(0);
if alloc_size < requirements.size {
tracing::error!(
"allocation size {} is smaller than image memory requirements {}",
alloc_size,
requirements.size
);
return Err(crate::Error::Unspecified);
}
if allocation
.allocation()
.map(|alloc| 1 << alloc.memory_type_index())
.unwrap_or(0)
& requirements.memory_type_bits
== 0
{
return Err(crate::Error::Unspecified);
}
Ok(Self {
image: ImageInner::Allocated(
DeviceObject::new(image, device.clone(), desc.name.clone()),
allocation,
),
desc,
views: Default::default(),
})
}
pub fn from_swapchain_image(image: vk::Image, swapchain: &Swapchain) -> Self {
Self {
image: ImageInner::Swapchain(image, swapchain.swapchain.device().clone()),
desc: ImageDesc {
format: swapchain.config.format,
kind: vk::ImageType::TYPE_2D,
mip_levels: 1,
array_layers: 1,
samples: vk::SampleCountFlags::TYPE_1,
extent: vk::Extent3D {
width: swapchain.config.extent.width,
height: swapchain.config.extent.height,
depth: 1,
},
tiling: vk::ImageTiling::OPTIMAL,
usage: swapchain.config.usage,
queue_families: QueueFlags::PRESENT,
layout: vk::ImageLayout::UNDEFINED,
mem_location: gpu_allocator::MemoryLocation::GpuOnly,
..Default::default()
},
views: Default::default(),
}
}
fn new_raw(
device: Device,
desc: &ImageDesc,
) -> crate::Result<(vk::Image, vk::MemoryRequirements)> {
tracing::trace!("allocate new image with desc={desc:?}");
let ImageDesc {
flags,
name,
format,
kind,
mip_levels,
@ -175,11 +264,10 @@ impl Image {
usage,
queue_families,
layout,
mem_usage,
alloc_flags,
..
} = desc;
let queue_families = device.queue_families().family_indices(queue_families);
let queue_families = device.queues.family_indices(*queue_families);
let sharing_mode = if queue_families.len() > 1 {
vk::SharingMode::CONCURRENT
@ -188,180 +276,87 @@ impl Image {
};
let info = &vk::ImageCreateInfo::default()
.flags(flags)
.image_type(kind)
.format(format)
.extent(extent)
.samples(samples)
.initial_layout(layout)
.tiling(tiling)
.usage(usage)
.flags(*flags)
.image_type(*kind)
.format(*format)
.extent(*extent)
.samples(*samples)
.initial_layout(*layout)
.tiling(*tiling)
.usage(*usage)
.sharing_mode(sharing_mode)
.queue_family_indices(&queue_families)
.array_layers(array_layers)
.mip_levels(mip_levels);
.array_layers(*array_layers)
.mip_levels(*mip_levels);
let alloc_info = &vk_mem::AllocationCreateInfo {
usage: mem_usage,
flags: alloc_flags,
..Default::default()
// validate
let limits = &device.adapter.properties.core.limits;
let max_dim = match *kind {
vk::ImageType::TYPE_1D => limits.max_image_dimension1_d,
vk::ImageType::TYPE_2D => limits.max_image_dimension2_d,
vk::ImageType::TYPE_3D => limits.max_image_dimension3_d,
_ => unreachable!(),
};
let (handle, alloc) = unsafe { device.alloc().create_image(info, alloc_info)? };
if extent.width > max_dim || extent.height > max_dim || extent.depth > max_dim {
tracing::error!(
"image extent {extent:?} exceeds device limits (max dimension: {max_dim})"
);
Self::construct(
device,
handle,
name,
Some(alloc),
extent,
format,
Mutex::new(HashMap::new()),
Mutex::new(HashMap::new()),
None, // aliased
false,
)
return Err(crate::Error::ImageTooLarge {
width: extent.width,
height: extent.height,
max_size: max_dim,
});
}
let image = unsafe { device.raw.create_image(&info, None)? };
let requirements = unsafe { device.raw.get_image_memory_requirements(image) };
Ok((image, requirements))
}
}
pub unsafe fn from_swapchain_image(
device: Device,
image: vk::Image,
name: Option<Cow<'static, str>>,
extent: vk::Extent3D,
format: vk::Format,
) -> Result<Image, vk::Result> {
Self::construct(
device,
image,
name,
None,
extent,
format,
Mutex::new(HashMap::new()),
Mutex::new(HashMap::new()),
None,
true,
)
impl Eq for Image {}
impl PartialEq for Image {
fn eq(&self, other: &Self) -> bool {
self.image.image() == other.image.image()
}
}
impl Image {
pub fn format(&self) -> vk::Format {
self.format
self.desc.format
}
pub fn image(&self) -> vk::Image {
self.handle()
self.image.image()
}
pub fn size(&self) -> vk::Extent3D {
self.size
self.desc.extent
}
pub fn extent_2d(&self) -> vk::Extent2D {
vk::Extent2D {
width: self.size.width,
height: self.size.height,
width: self.desc.extent.width,
height: self.desc.extent.height,
}
}
pub fn width(&self) -> u32 {
self.size.width
self.desc.extent.width
}
pub fn height(&self) -> u32 {
self.size.height
self.desc.extent.height
}
pub fn depth(&self) -> u32 {
self.size.depth
self.desc.extent.depth
}
fn get_parent_or_self(self: &Arc<Self>) -> Arc<Image> {
self.parent
.as_ref()
.map(|weak| weak.upgrade().unwrap())
.unwrap_or_else(|| self.clone())
pub fn allocation(&self) -> Option<&Allocation> {
self.image.allocation()
}
// TODO: figure out how to make this safer
pub fn get_alias(self: &Arc<Self>, desc: ImageDesc) -> VkResult<Arc<Self>> {
unsafe { self.get_parent_or_self().get_alias_inner(desc) }
}
/// # Safety
/// must only be called on the primogenitor of an image.
/// get the primogenitor with [`Self::get_parent_or_self()`]
unsafe fn get_alias_inner(self: Arc<Self>, desc: ImageDesc) -> VkResult<Arc<Image>> {
use std::collections::hash_map::Entry::*;
match self.aliases.lock().entry(desc.clone()) {
Occupied(occupied) => Ok(occupied.get().clone()),
Vacant(vacant) => {
let ImageDesc {
flags,
name,
format,
kind,
mip_levels,
array_layers,
samples,
extent,
tiling,
usage,
queue_families,
layout,
..
} = desc;
let queue_families = self
.device()
.queue_families()
.family_indices(queue_families);
let sharing_mode = if queue_families.len() > 1 {
vk::SharingMode::CONCURRENT
} else {
vk::SharingMode::EXCLUSIVE
};
let info = &vk::ImageCreateInfo::default()
.flags(flags)
.image_type(kind)
.format(format)
.extent(extent)
.samples(samples)
.initial_layout(layout)
.tiling(tiling)
.usage(usage)
.sharing_mode(sharing_mode)
.queue_family_indices(&queue_families)
.array_layers(array_layers)
.mip_levels(mip_levels);
let alloc = self
.alloc
.as_ref()
.expect("no alloc associated with image. is this the framebuffer?");
let image = unsafe {
let image = self.device().dev().create_image(info, None)?;
let req = self.device().dev().get_image_memory_requirements(image);
if self.device().alloc().get_allocation_info(alloc).size < req.size {
return Err(vk::Result::ERROR_MEMORY_MAP_FAILED);
}
self.device().alloc().bind_image_memory(alloc, image)?;
image
};
let alias = Self::construct(
self.device().clone(),
image,
name,
None,
extent,
format,
Mutex::new(HashMap::new()),
Mutex::new(HashMap::new()),
Some(Arc::downgrade(&self)),
self.is_swapchain_image,
)?;
Ok(vacant.insert(Arc::new(alias)).clone())
}
}
pub fn allocation_mut(&mut self) -> Option<&mut Allocation> {
self.image.allocation_mut()
}
/// technically, this ImageView belongs to the image and is managed by it.
@ -392,11 +387,33 @@ impl Image {
}
}
pub fn create_view(&self, desc: ImageViewDesc) -> VkResult<ImageView> {
pub fn create_view(&self, desc: ImageViewDesc) -> crate::Result<ImageView> {
// validate
if !view_kind_compatible(self.desc.kind, desc.kind) {
tracing::error!(
"image view kind {:?} is not compatible with image kind {:?}",
desc.kind,
self.desc.kind
);
return Err(crate::Error::IncompatibleImageViewKind {
image_kind: self.desc.kind,
view_kind: desc.kind,
});
}
if desc.mip_range.0 > self.desc.mip_levels || desc.mip_range.1 > self.desc.mip_levels {
tracing::error!(
"image view mip range {:?} exceeds image mip levels {}",
desc.mip_range,
self.desc.mip_levels
);
return Err(crate::Error::Unspecified);
}
let create_info = vk::ImageViewCreateInfo::default()
.flags(desc.flags)
.image(self.image())
.view_type(vk::ImageViewType::TYPE_2D)
.view_type(desc.kind)
.format(desc.format)
.components(desc.components)
.subresource_range(
@ -408,16 +425,28 @@ impl Image {
.layer_count(desc.layer_range.count()),
);
let view = unsafe { self.device().dev().create_image_view(&create_info, None)? };
let device = self.image.device();
let view = unsafe { device.raw.create_image_view(&create_info, None)? };
ImageView::construct(self.device().clone(), view, desc.name)
}
}
fn view_kind_compatible(image_kind: vk::ImageType, view_kind: vk::ImageViewType) -> bool {
use vk::ImageType as IT;
use vk::ImageViewType as VT;
match (image_kind, view_kind) {
(IT::TYPE_1D, VT::TYPE_1D | VT::TYPE_1D_ARRAY) => true,
(IT::TYPE_2D, VT::TYPE_2D | VT::TYPE_2D_ARRAY | VT::CUBE | VT::CUBE_ARRAY) => true,
(IT::TYPE_3D, VT::TYPE_2D | VT::TYPE_2D_ARRAY | VT::TYPE_3D) => true,
_ => false,
}
}
#[derive(Debug, Default, Clone)]
pub struct ImageViewDesc {
pub flags: vk::ImageViewCreateFlags,
pub name: Option<Cow<'static, str>>,
pub flags: vk::ImageViewCreateFlags,
pub kind: vk::ImageViewType,
pub format: vk::Format,
pub components: vk::ComponentMapping,
@ -525,10 +554,18 @@ impl PartialEq for ImageViewDesc {
}
}
define_device_owned_handle! {
#[derive(Debug)]
pub ImageView(vk::ImageView) {} => |this| unsafe {
this.device().dev().destroy_image_view(this.handle(), None);
#[derive(Debug)]
pub struct ImageView {
view: DeviceObject<vk::ImageView>,
desc: ImageViewDesc,
image: Arc<Image>,
}
impl DeviceHandle for vk::ImageView {
unsafe fn destroy(&mut self, device: &Device) {
unsafe {
device.dev().destroy_image_view(*self, None);
}
}
}

View file

@ -6,17 +6,12 @@
slice_partition_dedup
)]
use std::{
cell::OnceCell, collections::HashMap, ffi::CStr, fmt::Debug, marker::PhantomData, sync::Arc,
};
use std::{collections::HashMap, ffi::CStr, fmt::Debug, marker::PhantomData, sync::Arc};
use bitflags::bitflags;
use raw_window_handle::{RawDisplayHandle, RawWindowHandle};
use parking_lot::{Mutex, MutexGuard};
use ash::{
Entry, ext, khr,
ext,
prelude::VkResult,
vk::{self, SurfaceCapabilitiesKHR},
};
@ -32,7 +27,6 @@ pub mod device;
mod egui_pass;
mod images;
pub mod instance;
mod memory;
mod pipeline;
pub mod render_graph;
pub mod rendering;
@ -101,6 +95,8 @@ pub enum Error {
Utf8Error(#[from] core::str::Utf8Error),
#[error(transparent)]
NulError(#[from] std::ffi::NulError),
#[error(transparent)]
GpuAllocatorError(#[from] gpu_allocator::AllocationError),
#[error("No Physical Device found.")]
NoPhysicalDevice,
#[error(transparent)]
@ -115,6 +111,13 @@ pub enum Error {
},
#[error("Image dimensions cannot be zero.")]
ImageZeroSized,
#[error("Incompatible image view kind {view_kind:?} for image kind {image_kind:?}.")]
IncompatibleImageViewKind {
view_kind: vk::ImageViewType,
image_kind: vk::ImageType,
},
#[error("Unspecified Error")]
Unspecified,
}
pub type Result<T> = core::result::Result<T, Error>;
@ -990,13 +993,32 @@ impl Renderer2 {
&self,
window: RawWindowHandle,
extent: vk::Extent2D,
) -> Result<swapchain::WindowSurface> {
swapchain::WindowSurface::new(self.device.clone(), extent, window, self.display)
) -> Result<swapchain::Surface> {
let surface = unsafe {
swapchain::Surface::new_from_raw_window_handle(
&self.device.instance,
self.display,
window,
)?
};
surface.configure(
&self.device,
swapchain::SwapchainConfiguration {
present_mode: vk::PresentModeKHR::MAILBOX,
format: vk::Format::R8G8B8A8_UNORM,
color_space: vk::ColorSpaceKHR::SRGB_NONLINEAR,
image_count: 3,
extent,
composite_alpha_mode: vk::CompositeAlphaFlagsKHR::OPAQUE,
usage: vk::ImageUsageFlags::COLOR_ATTACHMENT | vk::ImageUsageFlags::TRANSFER_DST,
},
)?;
Ok(surface)
}
pub async fn draw_graph<T, F: FnOnce(&mut Renderer2, &mut render_graph::RenderGraph) -> T>(
&mut self,
surface: &swapchain::WindowSurface,
surface: &swapchain::Surface,
cb: F,
) -> Result<T> {
let frame = surface.acquire_image().await?;
@ -1011,7 +1033,7 @@ impl Renderer2 {
let future = cmds.submit(
Some((frame.acquire, vk::PipelineStageFlags::TRANSFER)),
Some(frame.release),
Arc::new(sync::Fence::create(self.device.clone())?),
Arc::new(sync::Fence::from_pool(&self.device.pools.fences, None)?),
)?;
future.await;

View file

@ -1,27 +1 @@
#![allow(dead_code)]
use crate::device::Device;
//#[derive(Debug)]
pub struct DeviceMemoryDesc {
pub flags: vk_mem::AllocationCreateFlags,
pub size: u64,
pub align: u64,
pub type_bits: u32,
pub usage: vk_mem::MemoryUsage,
}
#[derive(Debug)]
pub struct DeviceMemory {
device: Device,
alloc: vk_mem::Allocation,
}
impl DeviceMemory {}
impl Drop for DeviceMemory {
fn drop(&mut self) {
unsafe {
self.device.alloc().free_memory(&mut self.alloc);
}
}
}

View file

@ -2,6 +2,7 @@ use bitflags::bitflags;
use parking_lot::Mutex;
use raw_window_handle::RawDisplayHandle;
use std::{collections::HashMap, ops::Deref, sync::Arc};
use tinyvec::{ArrayVec, array_vec};
use ash::vk;
@ -133,6 +134,23 @@ impl DeviceQueues {
&self.transfer
}
pub fn family_indices(&self, flags: crate::device::QueueFlags) -> ArrayVec<[u32; 4]> {
let mut indices = array_vec!([u32; 4]);
use crate::device::QueueFlags as QF;
if flags.intersects(QF::GRAPHICS | QF::PRESENT) {
indices.push(self.graphics.family.index);
}
if flags.contains(QF::ASYNC_COMPUTE) {
indices.push(self.compute.family.index);
}
if flags.contains(QF::TRANSFER) {
indices.push(self.transfer.family.index);
}
let len = indices.partition_dedup().0.len();
_ = indices.drain(len..);
indices
}
pub fn swapchain_family_indices(&self) -> &[u32] {
core::slice::from_ref(&self.graphics.family.index)
}

View file

@ -6,17 +6,17 @@ use glam::{f32::Mat4, vec3};
pub use crate::egui_pass::{egui_pass, egui_pre_pass};
use crate::{
Result,
buffers::{Buffer, BufferDesc},
commands::{self, traits::CommandBufferExt},
device::{Device, DeviceOwned},
images::ImageViewDesc,
pipeline,
render_graph::{
buffer_barrier, Access, GraphResourceId, PassDesc, RecordFn, RenderContext, RenderGraph,
Access, GraphResourceId, PassDesc, RecordFn, RenderContext, RenderGraph, buffer_barrier,
},
sync,
util::Rgba8,
Result,
};
pub struct Wireframe {
@ -162,7 +162,11 @@ impl Wireframe {
);
}
let future = cmd.submit_async(None, None, Arc::new(sync::Fence::create(dev.clone())?))?;
let future = cmd.submit_async(
None,
None,
Arc::new(sync::Fence::from_pool(&dev.pools.fences, None)?),
)?;
let (pipeline, layout) = Self::create_pipeline(dev.clone())?;

View file

@ -20,7 +20,7 @@ use crate::{
device::{Device, DeviceObject, DeviceOwned},
images,
instance::InstanceInner,
sync,
sync::{self, Fence},
util::RawMutexGuard,
};
@ -280,12 +280,12 @@ impl Surface {
pub struct Swapchain {
// swapchain images, managed by the swapchain and must not be destroyed manually.
images: Vec<vk::Image>,
swapchain: DeviceObject<vk::SwapchainKHR>,
pub(crate) swapchain: DeviceObject<vk::SwapchainKHR>,
// this carries the device handle, however the `swapchain` field holds a ref count on the device, so it is safe to hold the pointer in the functor as well.
#[debug(skip)]
functor: khr::swapchain::Device,
/// current configuration of the swapchain.
config: SwapchainConfiguration,
pub(crate) config: SwapchainConfiguration,
/// the minimum number of images the surface permits. This is used to calculate how many images we can have in-flight at the same time.
min_image_count: u32,
@ -338,9 +338,9 @@ impl Drop for Swapchain {
fn drop(&mut self) {
unsafe {
self.release_resources();
self.functor.destroy_swapchain(*self.swapchain, None);
}
todo!()
// the swapchain itself will be automatically destroyed by the
// DeviceObject's Drop implementation.
}
}
@ -489,7 +489,7 @@ impl Swapchain {
/// suboptimal and should be recreated.
fn acquire_image(
self: Arc<Self>,
) -> impl std::future::Future<Output = VkResult<(SwapchainFrame, bool)>> {
) -> impl std::future::Future<Output = crate::Result<(SwapchainFrame, bool)>> {
let frame = self
.current_frame
.try_update(Ordering::Release, Ordering::Relaxed, |i| {
@ -500,7 +500,7 @@ impl Swapchain {
tracing::trace!(frame, "acquiring image for frame {frame}");
async move {
let fence = self.fences[frame];
let fence = Fence::from_pool(&self.swapchain.device().pools.fences, None)?;
let acquire = self.acquire_semaphores[frame];
let release = self.release_semaphores[frame];
@ -510,14 +510,14 @@ impl Swapchain {
move || unsafe {
this.with_locked(|swapchain| {
this.functor
.acquire_next_image(swapchain, u64::MAX, acquire, fence)
.acquire_next_image(swapchain, u64::MAX, acquire, fence.raw())
})
}
})
.await?;
// wait for image to become available.
sync::FenceFuture::new(fence.clone()).await;
fence.into_future().await;
let idx = idx as usize;
let image = self.images[idx].clone();

View file

@ -1,10 +1,16 @@
#[cfg(debug_assertions)]
use std::borrow::Cow;
use std::{
future::Future,
marker::PhantomData,
mem::ManuallyDrop,
sync::{Arc, atomic::AtomicU32},
time::Duration,
};
use crate::device::{DeviceObject, DeviceOwned, Pool, PoolObject, Pooled};
use crate::{Result, device::DeviceInner};
use super::Device;
use ash::{prelude::*, vk};
use crossbeam::channel::{Receiver, Sender};
@ -149,111 +155,273 @@ impl SyncThreadpool {
}
}
pub struct Semaphore {
device: Device,
inner: vk::Semaphore,
pub enum Fence {
Dedicated { fence: DeviceObject<vk::Fence> },
Pooled { fence: PoolObject<vk::Fence> },
}
pub struct Fence {
dev: Device,
fence: vk::Fence,
impl Pooled for vk::Fence {
fn create_from_pool(pool: &Pool<Self>) -> Result<Self> {
let fence = unsafe {
pool.device
.raw
.create_fence(&vk::FenceCreateInfo::default(), None)?
};
Ok(fence)
}
}
impl std::fmt::Debug for Fence {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Fence").field("fence", &self.fence).finish()
}
}
impl Drop for Fence {
fn drop(&mut self) {
unsafe {
self.dev.dev().destroy_fence(self.fence, None);
}
f.debug_struct("Fence").field("fence", &self.raw()).finish()
}
}
impl Fence {
unsafe fn new(dev: Device, fence: vk::Fence) -> Fence {
Self { dev, fence }
pub fn new_dedicated(device: Device, name: Option<&'static str>) -> Result<Fence> {
let fence = unsafe {
device
.raw
.create_fence(&vk::FenceCreateInfo::default(), None)?
};
Ok(Self::Dedicated {
fence: DeviceObject::new(fence, device, name.map(Into::into)),
})
}
pub fn create(dev: Device) -> VkResult<Fence> {
unsafe {
Ok(Self::new(
dev.clone(),
dev.dev()
.create_fence(&vk::FenceCreateInfo::default(), None)?,
))
pub fn from_pool(pool: &Pool<vk::Fence>, name: Option<&'static str>) -> Result<Fence> {
let mut fence = pool.get()?;
#[cfg(debug_assertions)]
if let Some(name) = name {
fence.name_object(name);
}
Ok(Self::Pooled { fence })
}
pub fn raw(&self) -> vk::Fence {
match self {
Fence::Dedicated { fence } => **fence,
Fence::Pooled { fence } => **fence,
}
}
#[allow(dead_code)]
pub fn create_signaled(dev: Device) -> VkResult<Fence> {
unsafe {
Ok(Self::new(
dev.clone(),
dev.dev().create_fence(
&vk::FenceCreateInfo::default().flags(vk::FenceCreateFlags::SIGNALED),
None,
)?,
))
fn device(&self) -> &Arc<DeviceInner> {
match self {
Fence::Dedicated { fence } => &fence.device().shared,
Fence::Pooled { fence } => fence.device(),
}
}
pub fn wait_on(&self, timeout: Option<u64>) -> Result<(), vk::Result> {
use core::slice::from_ref;
pub fn wait_on(&self, timeout: Option<u64>) -> Result<()> {
unsafe {
self.dev
.dev()
.wait_for_fences(from_ref(&self.fence), true, timeout.unwrap_or(u64::MAX))
self.device().raw.wait_for_fences(
core::slice::from_ref(&self.raw()),
true,
timeout.unwrap_or(u64::MAX),
)?
}
Ok(())
}
pub fn fence(&self) -> vk::Fence {
self.fence
}
pub fn is_signaled(&self) -> bool {
unsafe { self.dev.dev().get_fence_status(self.fence).unwrap_or(false) }
}
pub fn reset(&self) -> Result<(), vk::Result> {
unsafe {
self.dev
.dev()
.reset_fences(core::slice::from_ref(&self.fence))
self.device()
.raw
.get_fence_status(self.raw())
.unwrap_or(false)
}
}
}
impl AsRef<vk::Fence> for Fence {
fn as_ref(&self) -> &vk::Fence {
todo!()
pub fn reset(&self) -> Result<()> {
unsafe {
self.device()
.raw
.reset_fences(core::slice::from_ref(&self.raw()))?
}
Ok(())
}
pub fn into_future<'a>(self) -> FenceFuture<'a> {
FenceFuture::new(Arc::new(self))
}
}
#[allow(dead_code)]
impl Semaphore {
pub fn new(device: Device) -> VkResult<Self> {
#[derive(Debug, Clone, Copy)]
enum SemaphoreType {
Binary,
Timeline(u64),
}
pub enum Semaphore {
Dedicated {
semaphore_type: SemaphoreType,
semaphore: DeviceObject<vk::Semaphore>,
},
Pooled {
semaphore_type: SemaphoreType,
semaphore: vk::Semaphore,
device: Device,
#[cfg(debug_assertions)]
name: Option<Cow<'static, str>>,
},
}
#[derive(Debug, Clone)]
pub(crate) struct BinarySemaphore(vk::Semaphore);
#[derive(Debug, Clone)]
pub(crate) struct TimelineSemaphore(vk::Semaphore);
// This is just so that ash can name these semaphore newtypes
impl vk::Handle for BinarySemaphore {
const TYPE: vk::ObjectType = <vk::Semaphore as vk::Handle>::TYPE;
fn as_raw(self) -> u64 {
self.0.as_raw()
}
fn from_raw(_: u64) -> Self {
unimplemented!("BinarySemaphore cannot be created from raw handle")
}
}
impl vk::Handle for TimelineSemaphore {
const TYPE: vk::ObjectType = <vk::Semaphore as vk::Handle>::TYPE;
fn as_raw(self) -> u64 {
self.0.as_raw()
}
fn from_raw(_: u64) -> Self {
unimplemented!("TimelineSemaphore cannot be created from raw handle")
}
}
impl Pooled for BinarySemaphore {
fn create_from_pool(pool: &Pool<Self>) -> Result<Self> {
let mut type_info =
vk::SemaphoreTypeCreateInfo::default().semaphore_type(vk::SemaphoreType::BINARY);
let create_info = vk::SemaphoreCreateInfo::default().push_next(&mut type_info);
let inner = unsafe { device.dev().create_semaphore(&create_info, None)? };
Ok(Self { device, inner })
let inner = unsafe { pool.device.raw.create_semaphore(&create_info, None)? };
Ok(Self(inner))
}
pub fn new_timeline(device: Device, value: u64) -> VkResult<Self> {
}
impl Pooled for TimelineSemaphore {
fn create_from_pool(pool: &Pool<Self>) -> Result<Self> {
let mut type_info = vk::SemaphoreTypeCreateInfo::default()
.semaphore_type(vk::SemaphoreType::TIMELINE)
.initial_value(value);
.initial_value(0);
let create_info = vk::SemaphoreCreateInfo::default().push_next(&mut type_info);
let inner = unsafe { device.dev().create_semaphore(&create_info, None)? };
Ok(Self { device, inner })
}
pub fn semaphore(&self) -> vk::Semaphore {
self.inner
let inner = unsafe { pool.device.raw.create_semaphore(&create_info, None)? };
Ok(Self(inner))
}
}
impl Drop for Semaphore {
fn drop(&mut self) {
unsafe {
self.device.dev().destroy_semaphore(self.inner, None);
if let Semaphore::Pooled {
device,
semaphore_type,
semaphore,
name,
} = self
{
#[cfg(debug_assertions)]
if name.is_some() {
// reset the name to avoid confusion in case this semaphore is re-used
unsafe { device.debug_name_object(*semaphore, "") };
}
match semaphore_type {
SemaphoreType::Binary => device
.pools
.binary_semaphores
.push(BinarySemaphore(*semaphore)),
SemaphoreType::Timeline(_) => {
device
.pools
.timeline_semaphores
.push(TimelineSemaphore(*semaphore));
}
}
}
}
}
impl Semaphore {
pub fn new_dedicated(
device: Device,
semaphore_type: SemaphoreType,
name: Option<&'static str>,
) -> Result<Self> {
let mut type_info = vk::SemaphoreTypeCreateInfo::default();
match semaphore_type {
SemaphoreType::Binary => {
type_info = type_info.semaphore_type(vk::SemaphoreType::BINARY);
}
SemaphoreType::Timeline(value) => {
type_info = type_info
.semaphore_type(vk::SemaphoreType::TIMELINE)
.initial_value(value);
}
}
let create_info = vk::SemaphoreCreateInfo::default().push_next(&mut type_info);
let inner = unsafe { device.dev().create_semaphore(&create_info, None)? };
Ok(Self::Dedicated {
semaphore_type,
semaphore: DeviceObject::new(inner, device, name.map(Into::into)),
})
}
pub fn from_pool(
device: Device,
semaphore_type: SemaphoreType,
name: Option<&'static str>,
) -> Result<Self> {
let semaphore = match semaphore_type {
SemaphoreType::Binary => {
if let Some(semaphore) = device.pools.binary_semaphores.pop() {
semaphore.0
} else {
let mut type_info = vk::SemaphoreTypeCreateInfo::default()
.semaphore_type(vk::SemaphoreType::BINARY);
let create_info = vk::SemaphoreCreateInfo::default().push_next(&mut type_info);
unsafe { device.raw.create_semaphore(&create_info, None)? }
}
}
SemaphoreType::Timeline(value) => {
if let Some(semaphore) = device.pools.binary_semaphores.pop() {
semaphore.0
} else {
let mut type_info = vk::SemaphoreTypeCreateInfo::default()
.semaphore_type(vk::SemaphoreType::TIMELINE)
.initial_value(value);
let create_info = vk::SemaphoreCreateInfo::default().push_next(&mut type_info);
unsafe { device.raw.create_semaphore(&create_info, None)? }
}
}
};
#[cfg(debug_assertions)]
if let Some(name) = name {
unsafe {
device.debug_name_object(semaphore, name);
}
}
Ok(Self::Pooled {
semaphore_type,
semaphore,
device,
#[cfg(debug_assertions)]
name: name.map(Into::into),
})
}
pub fn semaphore(&self) -> vk::Semaphore {
match self {
Semaphore::Dedicated { semaphore, .. } => **semaphore,
Semaphore::Pooled { semaphore, .. } => *semaphore,
}
}
}
@ -265,24 +433,16 @@ pub struct FenceFuture<'a> {
}
impl FenceFuture<'_> {
/// # Safety
/// `fence` must not be destroyed while this future is live.
#[allow(dead_code)]
pub unsafe fn from_fence(device: Device, fence: vk::Fence) -> Self {
Self {
fence: Arc::new(unsafe { Fence::new(device, fence) }),
_pd: PhantomData,
}
}
pub fn new(fence: Arc<Fence>) -> Self {
Self {
fence,
_pd: PhantomData,
}
}
pub fn block(&self) -> VkResult<()> {
pub fn block(&self) -> crate::Result<()> {
self.fence.wait_on(None)?;
self.fence.reset()
self.fence.reset()?;
Ok(())
}
}
@ -299,7 +459,7 @@ impl Future for FenceFuture<'_> {
std::task::Poll::Ready(())
} else {
self.fence
.dev
.device()
.sync_threadpool()
.spawn_waiter(self.fence.clone(), cx.waker().clone());
std::task::Poll::Pending

View file

@ -3,6 +3,77 @@ use std::ops::{Deref, DerefMut};
use ash::vk;
use bytemuck::{Pod, Zeroable};
pub(crate) mod weak_vec {
//! Module containing the [`WeakVec`] API.
use std::{sync::Weak, vec::Vec};
/// An optimized container for `Weak` references of `T` that minimizes reallocations by
/// dropping older elements that no longer have strong references to them.
#[derive(Debug)]
pub(crate) struct WeakVec<T> {
inner: Vec<Weak<T>>,
}
impl<T> Default for WeakVec<T> {
fn default() -> Self {
Self {
inner: Default::default(),
}
}
}
impl<T> WeakVec<T> {
pub(crate) fn new() -> Self {
Self { inner: Vec::new() }
}
/// Pushes a new element to this collection.
///
/// If the inner Vec needs to be reallocated, we will first drop older elements that
/// no longer have strong references to them.
pub(crate) fn push(&mut self, value: Weak<T>) {
if self.inner.len() == self.inner.capacity() {
// Iterating backwards has the advantage that we don't do more work than we have to.
for i in (0..self.inner.len()).rev() {
if self.inner[i].strong_count() == 0 {
self.inner.swap_remove(i);
}
}
// Make sure our capacity is twice the number of live elements.
// Leaving some spare capacity ensures that we won't re-scan immediately.
self.inner.reserve_exact(self.inner.len());
}
self.inner.push(value);
}
}
pub(crate) struct WeakVecIter<T> {
inner: std::vec::IntoIter<Weak<T>>,
}
impl<T> Iterator for WeakVecIter<T> {
type Item = Weak<T>;
fn next(&mut self) -> Option<Self::Item> {
self.inner.next()
}
}
impl<T> IntoIterator for WeakVec<T> {
type Item = Weak<T>;
type IntoIter = WeakVecIter<T>;
fn into_iter(self) -> Self::IntoIter {
WeakVecIter {
inner: self.inner.into_iter(),
}
}
}
}
pub(crate) mod cow_arc {}
#[macro_export]
macro_rules! def_monotonic_id {
($vis:vis $ty:ident) => {