use std::{ borrow::Cow, collections::{BTreeSet, HashMap, HashSet}, ffi::CStr, ops::{Deref, DerefMut}, sync::Arc, }; use ash::{ ext, khr, prelude::VkResult, vk::{self, Handle}, }; use parking_lot::Mutex; use raw_window_handle::RawDisplayHandle; use crate::{ Instance, PhysicalDeviceFeatures, PhysicalDeviceInfo, Result, device::asdf::traits::ExternallyManagedObject, pipeline::pipeline_cache::PipelineCache, queue::{DeviceQueueInfos, DeviceQueues, Queue}, sync::{self, BinarySemaphore, TimelineSemaphore}, }; bitflags::bitflags! { #[repr(transparent)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct QueueFlags: u32 { const GRAPHICS = 1 << 0; const ASYNC_COMPUTE = 1 << 1; const TRANSFER = 1 << 2; const PRESENT = 1 << 3; const NONE = 0; const PRESENT_GRAPHICS = 1 << 0 | 1 << 2; } } struct DeviceDrop(ash::Device); impl Drop for DeviceDrop { fn drop(&mut self) { unsafe { _ = self.0.device_wait_idle(); self.0.destroy_device(None); } } } pub(crate) struct DeviceExtensions { pub(crate) debug_utils: ext::debug_utils::Device, pub(crate) swapchain: Option, #[allow(dead_code)] pub(crate) mesh_shader: Option, } type GpuAllocation = gpu_allocator::vulkan::Allocation; #[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)] pub enum AllocationStrategy { #[default] /// Let gpu_allocator manage the memory for this allocation, sub-allocating /// from larger blocks as needed. AllocatorManaged, /// Allocate a dedicated block of memory for this allocation. This is /// recommended for long-lived resources or resources with specific memory /// requirements. Dedicated, } #[derive(Debug)] pub enum Allocation { Owned(DeviceObject), Shared(Arc>), Unmanaged, } impl Allocation { pub(crate) fn allocation(&self) -> Option<&GpuAllocation> { match self { Allocation::Owned(obj) => Some(obj), Allocation::Shared(arc) => Some(arc.as_ref()), Allocation::Unmanaged => None, } } pub(crate) fn allocation_mut(&mut self) -> Option<&mut GpuAllocation> { match self { Allocation::Owned(obj) => Some(obj), Allocation::Shared(arc) => Arc::get_mut(arc).map(DerefMut::deref_mut), Allocation::Unmanaged => None, } } } pub struct DeviceInner { pub(crate) alloc2: Mutex, pub(crate) raw: ash::Device, pub(crate) adapter: PhysicalDeviceInfo, pub(crate) instance: Instance, pub(crate) queues: DeviceQueues, pub(crate) sync_threadpool: sync::SyncThreadpool, pub(crate) device_extensions: DeviceExtensions, #[allow(dead_code)] pub(crate) enabled_extensions: Vec<&'static CStr>, _drop: DeviceDrop, } impl AsRef for DeviceInner { fn as_ref(&self) -> &DeviceInner { self } } impl core::fmt::Debug for DeviceInner { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("DeviceInner") .field("device", &self.raw.handle()) .finish() } } #[macro_export] macro_rules! make_extension { ($module:path) => {{ use $module::{NAME as EXTENSION_NAME, SPEC_VERSION as EXTENSION_VERSION}; $crate::device::Extension { name: EXTENSION_NAME, version: EXTENSION_VERSION, } }}; ($module:path as $version:expr) => {{ use $module::*; $crate::device::Extension { name: NAME, version: $version, } }}; } #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] pub struct Extension<'a> { pub name: &'a CStr, pub version: u32, } impl<'a> std::hash::Hash for Extension<'a> { fn hash(&self, state: &mut H) { self.name.hash(state); } } pub(crate) fn get_available_extensions( entry: &ash::Entry, layers: &[&CStr], ) -> Result> { unsafe { let extensions = core::iter::once(entry.enumerate_instance_extension_properties(None)) .chain( layers .iter() .map(|&layer| entry.enumerate_instance_extension_properties(Some(layer))), ) .filter_map(|result| result.ok()) .flatten() .collect::>(); Ok(extensions) } } /// returns a tuple of supported-or-enabled extensions and unsupported-and-requested extensions pub(crate) fn get_extensions<'a>( entry: &ash::Entry, layers: &[&'a CStr], mut extensions: Vec>, display_handle: Option, ) -> Result<(HashSet>, HashSet>)> { let available_extensions = get_available_extensions(entry, layers)?; let available_extension_names = available_extensions .iter() .filter_map(|ext| { Some(( ext.extension_name_as_c_str().ok()?.to_str().ok()?, ext.spec_version, )) }) .collect::>(); tracing::debug!( "Available extensions: {:?}", available_extension_names.iter().collect::>() ); let mut wsi_extensions = Vec::new(); wsi_extensions.push(make_extension!(khr::surface)); // taken from wgpu-hal/src/vulkan/instance.rs: // // we want to enable all the wsi extensions that are applicable to the // platform, even if the user didn't explicitly request them, or // supplied a different/no display handle, because we might later want // to create a surface for a different windowing system, and enabling // all the wsi extensions doesn't have any real downsides. // We don't notify the user if some of these extensions aren't available // (e.g. because wayland isn't supported on some unix system) if cfg!(all( unix, not(target_os = "android"), not(target_os = "macos") )) { wsi_extensions.push(make_extension!(khr::xlib_surface)); wsi_extensions.push(make_extension!(khr::xcb_surface)); wsi_extensions.push(make_extension!(khr::wayland_surface)); } if cfg!(target_os = "windows") { wsi_extensions.push(make_extension!(khr::win32_surface)); } if cfg!(target_os = "android") { wsi_extensions.push(make_extension!(khr::android_surface)); } if cfg!(target_os = "macos") { wsi_extensions.push(make_extension!(ext::metal_surface)); wsi_extensions.push(make_extension!(khr::portability_enumeration)); } if cfg!(all( unix, not(target_vendor = "apple"), not(target_family = "wasm") )) { wsi_extensions.push(make_extension!(ext::acquire_drm_display)); wsi_extensions.push(make_extension!(ext::direct_mode_display)); wsi_extensions.push(make_extension!(khr::display)); } let is_extension_available = |ext: &mut Extension| -> bool { if available_extensions .iter() .any(|inst_ext| inst_ext.extension_name_as_c_str() == Ok(ext.name)) { true } else { tracing::warn!( "Extension {:?} v{} was requested but is not available", ext.name, ext.version ); false } }; let mut enabled_extensions = extensions .extract_if(.., is_extension_available) .collect::>(); enabled_extensions.extend(wsi_extensions.extract_if(.., is_extension_available)); // if a display handle is provided, ensure the required WSI extensions are present if let Some(display_handle) = display_handle { let mut required_extensions = ash_window::enumerate_required_extensions(display_handle)? .iter() .map(|&p| Extension { name: unsafe { CStr::from_ptr(p) }, version: 0, }) // filter out extensions that are already enabled .filter(|ext| { !enabled_extensions .iter() .any(|enabled| enabled.name == ext.name) }) .collect::>(); // filter out extensions that aren't available, and log a warning for them let display_extensions = required_extensions.extract_if(.., is_extension_available); enabled_extensions.extend(display_extensions); extensions.extend(required_extensions); } // all extensions remaining in `extensions` at this point are unsupported, // and were requested by the user or are required by the display handle let unsupported_extensions = HashSet::from_iter(extensions); let out_extensions = HashSet::from_iter(enabled_extensions); Ok((out_extensions, unsupported_extensions)) } /// returns a list of enabled, or a tuple of enabled and unsupported but requested layers. pub(crate) fn get_layers<'a>( entry: &ash::Entry, wants_layers: Vec<&'a CStr>, ) -> core::result::Result, (Vec<&'a CStr>, Vec<&'a CStr>)> { unsafe { let Ok(available_layers) = entry.enumerate_instance_layer_properties() else { return Err((vec![], wants_layers)); }; let Ok(available_layer_names) = available_layers .iter() .map(|layer| layer.layer_name_as_c_str()) .collect::, _>>() else { return Err((vec![], wants_layers)); }; tracing::debug!( "Available layers: {:?}", available_layer_names .iter() .map(|s| s.to_str().unwrap_or("")) .collect::>() ); let mut enabled_layers = Vec::new(); let mut unsupported_layers = Vec::new(); for layer in wants_layers { if available_layer_names.contains(&layer) { enabled_layers.push(layer); } else { unsupported_layers.push(layer); } } if !unsupported_layers.is_empty() { Err((enabled_layers, unsupported_layers)) } else { Ok(enabled_layers) } } } impl PhysicalDeviceInfo { pub fn create_logical_device( self, instance: &Instance, extensions: &[Extension<'static>], mut features: PhysicalDeviceFeatures, display_handle: Option, ) -> Result { let queue_infos = DeviceQueueInfos::select_queue_families(instance, &self, display_handle)?; let queue_create_infos = queue_infos.into_create_infos(); let extensions = Self::required_extensions(&self, extensions); let create_info = vk::DeviceCreateInfo::default() .queue_create_infos(&queue_create_infos) .enabled_extension_names(&extensions); let create_info = features.push_to_device_create_info(create_info); let device = unsafe { instance .inner .raw .create_device(self.pdev, &create_info, None)? }; let device_queues = queue_infos.retrieve_queues(&device); let enabled_extensions = extensions .into_iter() .map(|ptr| unsafe { CStr::from_ptr(ptr) }) .collect::>(); let device_extensions = DeviceExtensions { debug_utils: ext::debug_utils::Device::new(&instance.inner.raw, &device), swapchain: if enabled_extensions.contains(&khr::swapchain::NAME) { Some(khr::swapchain::Device::new(&instance.inner.raw, &device)) } else { None }, mesh_shader: if enabled_extensions.contains(&ext::mesh_shader::NAME) { Some(ext::mesh_shader::Device::new(&instance.inner.raw, &device)) } else { None }, }; let alloc2 = gpu_allocator::vulkan::Allocator::new(&gpu_allocator::vulkan::AllocatorCreateDesc { instance: instance.inner.raw.clone(), device: device.clone(), physical_device: self.pdev, debug_settings: Default::default(), buffer_device_address: false, allocation_sizes: { const MB: u64 = 1024 * 1024; gpu_allocator::AllocationSizes::new(8 * MB, 64 * MB) .with_max_host_memblock_size(256 * MB) .with_max_device_memblock_size(256 * MB) }, })?; let inner = DeviceInner { raw: device.clone(), alloc2: Mutex::new(alloc2), instance: instance.clone(), adapter: self, queues: device_queues, device_extensions, enabled_extensions, sync_threadpool: sync::SyncThreadpool::new(), _drop: DeviceDrop(device), }; let shared = Arc::new(inner); Ok(Device { pools: Arc::new(DevicePools::new(shared.clone())), shared, }) } fn required_extensions(&self, requested_extensions: &[Extension<'static>]) -> Vec<*const i8> { let mut extensions = vec![khr::swapchain::NAME.as_ptr()]; for ext in requested_extensions { if self .properties .supported_extensions .iter() .any(|supported| { supported.extension_name_as_c_str() == Ok(ext.name) && supported.spec_version >= ext.version }) { extensions.push(ext.name.as_ptr()); } else { tracing::warn!( "Physical device {:?} does not support required extension {:?}", self.pdev, ext.name ); } } extensions } } #[derive(Debug)] pub(crate) struct DevicePools { pub(crate) pipeline_cache: asdf::DeviceObject>, pub(crate) fences: Arc>, pub(crate) binary_semaphores: Pool, pub(crate) timeline_semaphores: Pool, } impl AsRef for DevicePools { fn as_ref(&self) -> &DevicePools { self } } impl DevicePools { pub fn new(device: Arc) -> Self { Self { fences: Arc::new(Pool::new(device.clone())), binary_semaphores: Pool::new(device.clone()), timeline_semaphores: Pool::new(device.clone()), pipeline_cache: asdf::DeviceObject::new( device.clone(), PipelineCache::new(&device.raw, &device.adapter).unwrap(), ), } } } #[derive(Clone, Debug)] pub struct Device { pub(crate) shared: Arc, pub(crate) pools: Arc, } impl PartialEq for Device { fn eq(&self, other: &Self) -> bool { Arc::ptr_eq(&self.shared, &other.shared) } } impl Eq for Device {} impl core::ops::Deref for Device { type Target = DeviceInner; fn deref(&self) -> &Self::Target { &self.shared } } impl AsRef for Device where T: ?Sized, ::Target: AsRef, { fn as_ref(&self) -> &T { self.deref().as_ref() } } impl DeviceInner { pub fn sync_threadpool(&self) -> &sync::SyncThreadpool { &self.sync_threadpool } pub fn dev(&self) -> &ash::Device { &self.raw } pub fn instance(&self) -> &Instance { &self.instance } pub fn queues(&self) -> &DeviceQueues { &self.queues } pub fn phy(&self) -> vk::PhysicalDevice { self.adapter.pdev } pub fn features(&self) -> &crate::PhysicalDeviceFeatures { &self.adapter.features } pub(crate) fn properties(&self) -> &crate::PhysicalDeviceProperties { &self.adapter.properties } pub fn physical_device(&self) -> &PhysicalDeviceInfo { &self.adapter } pub fn main_queue(&self) -> &Queue { self.queues.graphics() } pub fn compute_queue(&self) -> &Queue { self.queues.compute() } pub fn transfer_queue(&self) -> &Queue { self.queues.transfer() } /// # Safety /// /// The caller must ensure that the queues aren't already locked when calling this function. pub unsafe fn lock_queues(&self) { unsafe { self.queues.lock(); } } /// # Safety /// /// The caller must have acquired and have logical ownership of the lock on the queues. pub unsafe fn unlock_queues(&self) { unsafe { self.queues.unlock(); } } pub fn wait_queue_idle(&self, queue: &Queue) -> VkResult<()> { tracing::warn!("locking queue {queue:?} and waiting for idle"); queue.with_locked(|q| unsafe { self.raw.queue_wait_idle(q.raw) })?; tracing::warn!("finished waiting: unlocking queue {queue:?}."); Ok(()) } pub fn wait_idle(&self) -> VkResult<()> { tracing::warn!("locking all queues and waiting for device to idle"); unsafe { self.lock_queues(); self.raw.device_wait_idle()?; self.unlock_queues(); } tracing::warn!("finished waiting: unlocking all queues."); Ok(()) } /// # Safety /// /// This method inherits the safety contract from [`vkSetDebugUtilsObjectName`]. In particular: /// /// - `object` must be a valid handle for one of the following: /// - An instance-level object from the same instance as this device. /// - A physical-device-level object that descends from the same physical device as this /// device. /// - A device-level object that descends from this device. /// - `object` must be externally synchronized—only the calling thread should access it during /// this call. /// /// [`vkSetDebugUtilsObjectName`]: https://registry.khronos.org/vulkan/specs/latest/man/html/vkSetDebugUtilsObjectNameEXT.html pub unsafe fn debug_name_object(&self, object: T, name: &str) { // avoid heap allocation for short names let mut buffer = [0u8; 64]; let buffer_vec: Vec; let name_bytes = if name.is_empty() { &[0] } else if name.len() < buffer.len() { buffer[..name.len()].copy_from_slice(name.as_bytes()); &buffer[..] } else { buffer_vec = name .as_bytes() .iter() .cloned() .chain(std::iter::once(0)) .collect(); &buffer_vec }; let name = CStr::from_bytes_until_nul(name_bytes) .inspect_err(|_| { panic!( "debug name {:?} contains interior nul byte, which is not allowed", name_bytes ) }) .expect("{name_bytes:?} there is always a nul terminator because we added one"); unsafe { _ = self .device_extensions .debug_utils .set_debug_utils_object_name( &vk::DebugUtilsObjectNameInfoEXT::default() .object_handle(object) .object_name(name), ); } } } #[derive(Clone)] pub struct DeviceOwnedDebugObject { pub(crate) device: Device, pub(crate) object: T, #[cfg(debug_assertions)] name: Option>, } impl Eq for DeviceOwnedDebugObject {} impl PartialEq for DeviceOwnedDebugObject { fn eq(&self, other: &Self) -> bool { std::sync::Arc::ptr_eq(&self.device.shared, &other.device.shared) && self.object == other.object } } impl std::fmt::Debug for DeviceOwnedDebugObject { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut fmt = f.debug_struct(core::any::type_name::()); fmt.field_with("device", |f| { write!(f, "0x{:x}", self.device.raw.handle().as_raw()) }) .field_with("handle", |f| write!(f, "0x{:x}", &self.object.as_raw())); #[cfg(debug_assertions)] { fmt.field("name", &self.name); } fmt.finish() } } impl DeviceOwnedDebugObject { pub fn new( device: crate::Device, object: T, name: Option>, ) -> ash::prelude::VkResult where T: vk::Handle + Copy, { if let Some(name) = name.as_ref() { unsafe { device.debug_name_object(object, name); } } Ok(Self { device, object, #[cfg(debug_assertions)] name, }) } pub fn dev(&self) -> &crate::Device { &self.device } pub fn handle(&self) -> T where T: Copy, { self.object } } pub use asdf::{DeviceObject, InnerDeviceObject}; pub trait DeviceHandle { /// # Safety /// The caller must ensure this function is only called once for a given object. unsafe fn destroy(&mut self, device: &Device); } impl> ExternallyManagedObject for Mutex { unsafe fn destroy(self, owner: &O) { // Safety guarantee is upheld by the caller. unsafe { self.into_inner().destroy(owner) }; } } impl> ExternallyManagedObject for vk::Buffer { unsafe fn destroy(self, device: &T) { unsafe { device.as_ref().raw.destroy_buffer(self, None); } } } impl> ExternallyManagedObject for vk::SwapchainKHR { unsafe fn destroy(self, device: &T) { unsafe { if let Some(swapchain) = device.as_ref().device_extensions.swapchain.as_ref() { swapchain.destroy_swapchain(self, None) } } } } pub trait DeviceOwned { fn device(&self) -> &Device; fn handle(&self) -> T; } pub trait Pooled: Sized { fn create_from_pool(pool: &Pool) -> Result; } #[derive(Debug)] pub struct Pool { pub(crate) pool: Mutex>, pub(crate) device: Arc, } impl Pool { pub fn push(&self, item: T) { self.pool.lock().push(item); } pub fn new(device: Arc) -> Self { Self { pool: Mutex::new(Vec::new()), device, } } pub fn pop(&self) -> Option { self.pool.lock().pop() } } impl AsRef> for Pool { fn as_ref(&self) -> &Pool { self } } pub type PoolObject>> = asdf::ExternallyManagedObject; impl Pool { pub fn get(&self) -> Result { let item = if let Some(item) = self.pool.lock().pop() { item } else { T::create_from_pool(self)? }; Ok(item) } pub fn get_debug_named(&self, name: Option>>) -> Result where T: asdf::traits::DebugNameable, { let obj = self.get()?; #[cfg(debug_assertions)] { let name = name.map(Into::into).unwrap_or_default(); ::debug_name(&obj, &self.device, &name); } Ok(obj) } } // Macro for helping create and destroy Vulkan objects which are owned by a device. #[macro_export] macro_rules! define_device_owned_handle { ($(#[$attr:meta])* $ty_vis:vis $ty:ident($handle:ty) { $($(#[$field_attr:meta])* $field_vis:vis $field_name:ident : $field_ty:ty),* $(,)? } $(=> |$this:ident| $dtor:stmt)?) => { $(#[$attr])* $ty_vis struct $ty { inner: $crate::device::DeviceOwnedDebugObject<$handle>, $( $(#[$field_attr])* $field_vis $field_name: $field_ty, )* } impl $crate::device::DeviceOwned<$handle> for $ty { fn device(&self) -> &$crate::device::Device { self.inner.dev() } fn handle(&self) -> $handle { self.inner.handle() } } impl $ty { #[allow(clippy::too_many_arguments, reason = "This function is generated by a macro")] fn construct( device: $crate::device::Device, handle: $handle, name: Option<::std::borrow::Cow<'static, str>>, $($field_name: $field_ty,)* ) -> ::ash::prelude::VkResult { Ok(Self { inner: $crate::device::DeviceOwnedDebugObject::new( device, handle, name, )?, $($field_name,)* }) } } $( impl Drop for $ty { fn drop(&mut self) { #[allow(unused_mut)] let mut $this = self; $dtor } } )? }; } // This module is an experiment in a more generic way to manage device-owned resources. // #[cfg(false)] pub(crate) mod asdf { use std::{ mem::{ManuallyDrop, MaybeUninit}, ops::{Deref, DerefMut}, sync::Arc, }; use ash::vk; use crate::{device::DeviceInner, util::DebugName}; pub mod traits { /// A trait describing an object owned by some manager-type, which is /// responsible for destroying it. pub trait ExternallyManagedObject { /// # Safety /// The caller must ensure this function is only called once for a given object. unsafe fn destroy(self, owner: &T); } /// A trait describing an object which can have a debug name assigned to it. pub trait DebugNameable { fn debug_name(&self, device: &super::DeviceInner, name: &str); } } /// Wrapper for types which are owned by another type `O`, which is responsible for destruction. #[derive(Debug)] pub struct ExternallyManagedObject, O> { inner: ManuallyDrop, owner: O, } impl, O> Deref for ExternallyManagedObject { type Target = T; fn deref(&self) -> &Self::Target { &self.inner } } impl, O> DerefMut for ExternallyManagedObject { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.inner } } impl, O> ExternallyManagedObject { pub fn new(inner: T, owner: O) -> Self { Self { inner: ManuallyDrop::new(inner), owner, } } pub fn owner(&self) -> &O { &self.owner } pub fn map_inner(self, f: impl FnOnce(T) -> U) -> ExternallyManagedObject where U: traits::ExternallyManagedObject, { unsafe { let mut this = MaybeUninit::new(self); let inner = ManuallyDrop::take(&mut this.assume_init_mut().inner); let owner = core::ptr::read(&raw const this.assume_init_mut().owner); let new_inner = f(inner); ExternallyManagedObject { inner: ManuallyDrop::new(new_inner), owner, } } } pub fn map_owner(self, f: impl FnOnce(O) -> U) -> ExternallyManagedObject where T: traits::ExternallyManagedObject, { unsafe { let mut this = MaybeUninit::new(self); let inner = ManuallyDrop::take(&mut this.assume_init_mut().inner); // get the old owner without calling `Self::drop` let owner = core::ptr::read(&raw const this.assume_init_mut().owner); ExternallyManagedObject { inner: ManuallyDrop::new(inner), owner: f(owner), } } } } impl Drop for ExternallyManagedObject where T: traits::ExternallyManagedObject, { fn drop(&mut self) { unsafe { let inner = ManuallyDrop::take(&mut self.inner); inner.destroy(&self.owner); } } } pub type InnerDeviceObject = DeviceObject>; /// A wrapper for vulkan types which are owned by the device, taking care of destruction. #[derive(Debug)] pub struct DeviceObject< T: traits::ExternallyManagedObject, O: AsRef = super::Device, > { inner: ExternallyManagedObject, #[allow(dead_code)] name: Option, } impl< T: traits::ExternallyManagedObject + traits::DebugNameable, O: AsRef, > DeviceObject { pub fn new_debug_named(owner: O, inner: T, name: Option>) -> Self { let name = name.map(Into::into); if let Some(ref name) = name { traits::DebugNameable::debug_name(&inner, owner.as_ref(), name); } let obj = ExternallyManagedObject::new(inner, owner); Self { inner: obj, name } } } impl, O: AsRef> DeviceObject { pub fn new(owner: O, inner: T) -> Self { let inner = ExternallyManagedObject::new(inner, owner); Self { inner, name: None } } pub fn new_debug_named_with( owner: O, inner: T, name: Option>, debug_namable: impl FnOnce(&T) -> D, ) -> Self where D: traits::DebugNameable, { let name = name.map(Into::into); if let Some(ref name) = name { traits::DebugNameable::debug_name(&debug_namable(&inner), owner.as_ref(), name); } let obj = ExternallyManagedObject::new(inner, owner); Self { inner: obj, name } } pub fn device(&self) -> &O { self.inner.owner() } pub fn name(&self) -> Option<&str> { self.name.as_ref().map(|n| &**n) } pub fn map_inner(self, f: impl FnOnce(T) -> U) -> DeviceObject where U: traits::ExternallyManagedObject, { DeviceObject { inner: self.inner.map_inner(f), name: self.name, } } pub fn map_owner(self, f: impl FnOnce(O) -> U) -> DeviceObject where T: traits::ExternallyManagedObject, U: AsRef, { DeviceObject { inner: self.inner.map_owner(f), name: self.name, } } } impl Deref for DeviceObject where T: traits::ExternallyManagedObject, O: AsRef, { type Target = T; fn deref(&self) -> &Self::Target { &*self.inner } } impl DerefMut for DeviceObject where T: traits::ExternallyManagedObject, O: AsRef, { fn deref_mut(&mut self) -> &mut Self::Target { &mut *self.inner } } mod impls { use crate::device::{DeviceInner, DevicePools, GpuAllocation}; use super::*; impl> traits::ExternallyManagedObject for vk::Semaphore { unsafe fn destroy(self, device: &T) { unsafe { device.as_ref().raw.destroy_semaphore(self, None); } } } impl> traits::ExternallyManagedObject for GpuAllocation { unsafe fn destroy(self, device: &T) { _ = device.as_ref().alloc2.lock().free(self); } } impl traits::ExternallyManagedObject for vk::Semaphore { unsafe fn destroy(self, owner: &DevicePools) { owner .binary_semaphores .push(crate::sync::BinarySemaphore(self)); } } impl traits::DebugNameable for T where T: vk::Handle + Copy, { fn debug_name(&self, device: &super::DeviceInner, name: &str) { unsafe { device.debug_name_object(*self, name); } } } } #[allow(dead_code)] #[cfg(test)] fn asdf() { use crate::device::{DevicePools, GpuAllocation}; fn summon() -> T { unimplemented!() } let _inner_ref: DeviceObject = DeviceObject::new_debug_named( summon::<&DeviceInner>(), summon::(), Some("my semaphore"), ); let _device_owned: DeviceObject = DeviceObject::new_debug_named( summon::(), summon::(), Some("my other semaphore"), ); let _allocation: DeviceObject> = DeviceObject::new( summon::>(), summon::(), ); let _pool_owned: ExternallyManagedObject = ExternallyManagedObject::new(summon::(), summon::()); } }