vidya/crates/renderer/src/device.rs
2026-04-03 03:13:51 +02:00

934 lines
27 KiB
Rust

use std::{
borrow::Cow,
collections::{BTreeSet, HashMap, HashSet},
ffi::CStr,
mem::ManuallyDrop,
ops::{Deref, DerefMut},
sync::Arc,
};
use ash::{
ext, khr,
prelude::VkResult,
vk::{self, Handle},
};
use parking_lot::Mutex;
use raw_window_handle::RawDisplayHandle;
use crate::{
Instance, PhysicalDeviceFeatures, PhysicalDeviceInfo, Result,
queue::{DeviceQueueInfos, DeviceQueues, Queue},
sync::{self, BinarySemaphore, TimelineSemaphore},
};
bitflags::bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct QueueFlags: u32 {
const GRAPHICS = 1 << 0;
const ASYNC_COMPUTE = 1 << 1;
const TRANSFER = 1 << 2;
const PRESENT = 1 << 3;
const NONE = 0;
const PRESENT_GRAPHICS = 1 << 0 | 1 << 2;
}
}
struct DeviceDrop(ash::Device);
impl Drop for DeviceDrop {
fn drop(&mut self) {
unsafe {
_ = self.0.device_wait_idle();
self.0.destroy_device(None);
}
}
}
pub(crate) struct DeviceExtensions {
pub(crate) debug_utils: ext::debug_utils::Device,
pub(crate) swapchain: Option<khr::swapchain::Device>,
#[allow(dead_code)]
pub(crate) mesh_shader: Option<ext::mesh_shader::Device>,
}
type GpuAllocation = gpu_allocator::vulkan::Allocation;
impl DeviceHandle for GpuAllocation {
unsafe fn destroy(&mut self, device: &Device) {
let mut swapped = GpuAllocation::default();
std::mem::swap(self, &mut swapped);
_ = device.alloc2.lock().free(swapped);
}
}
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)]
pub enum AllocationStrategy {
#[default]
/// Let gpu_allocator manage the memory for this allocation, sub-allocating
/// from larger blocks as needed.
AllocatorManaged,
/// Allocate a dedicated block of memory for this allocation. This is
/// recommended for long-lived resources or resources with specific memory
/// requirements.
Dedicated,
}
#[derive(Debug)]
pub enum Allocation {
Owned(DeviceObject<GpuAllocation>),
Shared(Arc<DeviceObject<GpuAllocation>>),
Unmanaged,
}
impl Allocation {
pub(crate) fn allocation(&self) -> Option<&GpuAllocation> {
match self {
Allocation::Owned(obj) => Some(obj),
Allocation::Shared(arc) => Some(arc.as_ref()),
Allocation::Unmanaged => None,
}
}
pub(crate) fn allocation_mut(&mut self) -> Option<&mut GpuAllocation> {
match self {
Allocation::Owned(obj) => Some(obj),
Allocation::Shared(arc) => Arc::get_mut(arc).map(|alloc| &mut alloc.inner),
Allocation::Unmanaged => None,
}
}
}
pub struct DeviceInner {
pub(crate) alloc2: Mutex<gpu_allocator::vulkan::Allocator>,
pub(crate) raw: ash::Device,
pub(crate) adapter: PhysicalDeviceInfo,
pub(crate) instance: Instance,
pub(crate) queues: DeviceQueues,
pub(crate) sync_threadpool: sync::SyncThreadpool,
pub(crate) device_extensions: DeviceExtensions,
#[allow(dead_code)]
pub(crate) enabled_extensions: Vec<&'static CStr>,
_drop: DeviceDrop,
}
impl core::fmt::Debug for DeviceInner {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DeviceInner")
.field("device", &self.raw.handle())
.finish()
}
}
#[macro_export]
macro_rules! make_extension {
($module:path) => {{
use $module::{NAME as EXTENSION_NAME, SPEC_VERSION as EXTENSION_VERSION};
$crate::device::Extension {
name: EXTENSION_NAME,
version: EXTENSION_VERSION,
}
}};
($module:path as $version:expr) => {{
use $module::*;
$crate::device::Extension {
name: NAME,
version: $version,
}
}};
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Extension<'a> {
pub name: &'a CStr,
pub version: u32,
}
impl<'a> std::hash::Hash for Extension<'a> {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.name.hash(state);
}
}
pub(crate) fn get_available_extensions(
entry: &ash::Entry,
layers: &[&CStr],
) -> Result<Vec<ash::vk::ExtensionProperties>> {
unsafe {
let extensions = core::iter::once(entry.enumerate_instance_extension_properties(None))
.chain(
layers
.iter()
.map(|&layer| entry.enumerate_instance_extension_properties(Some(layer))),
)
.filter_map(|result| result.ok())
.flatten()
.collect::<Vec<ash::vk::ExtensionProperties>>();
Ok(extensions)
}
}
/// returns a tuple of supported-or-enabled extensions and unsupported-and-requested extensions
pub(crate) fn get_extensions<'a>(
entry: &ash::Entry,
layers: &[&'a CStr],
mut extensions: Vec<Extension<'a>>,
display_handle: Option<RawDisplayHandle>,
) -> Result<(HashSet<Extension<'a>>, HashSet<Extension<'a>>)> {
let available_extensions = get_available_extensions(entry, layers)?;
let available_extension_names = available_extensions
.iter()
.filter_map(|ext| {
Some((
ext.extension_name_as_c_str().ok()?.to_str().ok()?,
ext.spec_version,
))
})
.collect::<HashMap<_, _>>();
tracing::debug!(
"Available extensions: {:?}",
available_extension_names.iter().collect::<Vec<_>>()
);
let mut wsi_extensions = Vec::new();
wsi_extensions.push(make_extension!(khr::surface));
// taken from wgpu-hal/src/vulkan/instance.rs:
//
// we want to enable all the wsi extensions that are applicable to the
// platform, even if the user didn't explicitly request them, or
// supplied a different/no display handle, because we might later want
// to create a surface for a different windowing system, and enabling
// all the wsi extensions doesn't have any real downsides.
// We don't notify the user if some of these extensions aren't available
// (e.g. because wayland isn't supported on some unix system)
if cfg!(all(
unix,
not(target_os = "android"),
not(target_os = "macos")
)) {
wsi_extensions.push(make_extension!(khr::xlib_surface));
wsi_extensions.push(make_extension!(khr::xcb_surface));
wsi_extensions.push(make_extension!(khr::wayland_surface));
}
if cfg!(target_os = "windows") {
wsi_extensions.push(make_extension!(khr::win32_surface));
}
if cfg!(target_os = "android") {
wsi_extensions.push(make_extension!(khr::android_surface));
}
if cfg!(target_os = "macos") {
wsi_extensions.push(make_extension!(ext::metal_surface));
wsi_extensions.push(make_extension!(khr::portability_enumeration));
}
if cfg!(all(
unix,
not(target_vendor = "apple"),
not(target_family = "wasm")
)) {
wsi_extensions.push(make_extension!(ext::acquire_drm_display));
wsi_extensions.push(make_extension!(ext::direct_mode_display));
wsi_extensions.push(make_extension!(khr::display));
}
let is_extension_available = |ext: &mut Extension| -> bool {
if available_extensions
.iter()
.any(|inst_ext| inst_ext.extension_name_as_c_str() == Ok(ext.name))
{
true
} else {
tracing::warn!(
"Extension {:?} v{} was requested but is not available",
ext.name,
ext.version
);
false
}
};
let mut enabled_extensions = extensions
.extract_if(.., is_extension_available)
.collect::<Vec<_>>();
enabled_extensions.extend(wsi_extensions.extract_if(.., is_extension_available));
// if a display handle is provided, ensure the required WSI extensions are present
if let Some(display_handle) = display_handle {
let mut required_extensions = ash_window::enumerate_required_extensions(display_handle)?
.iter()
.map(|&p| Extension {
name: unsafe { CStr::from_ptr(p) },
version: 0,
})
// filter out extensions that are already enabled
.filter(|ext| {
!enabled_extensions
.iter()
.any(|enabled| enabled.name == ext.name)
})
.collect::<Vec<_>>();
// filter out extensions that aren't available, and log a warning for them
let display_extensions = required_extensions.extract_if(.., is_extension_available);
enabled_extensions.extend(display_extensions);
extensions.extend(required_extensions);
}
// all extensions remaining in `extensions` at this point are unsupported,
// and were requested by the user or are required by the display handle
let unsupported_extensions = HashSet::from_iter(extensions);
let out_extensions = HashSet::from_iter(enabled_extensions);
Ok((out_extensions, unsupported_extensions))
}
/// returns a list of enabled, or a tuple of enabled and unsupported but requested layers.
pub(crate) fn get_layers<'a>(
entry: &ash::Entry,
wants_layers: Vec<&'a CStr>,
) -> core::result::Result<Vec<&'a CStr>, (Vec<&'a CStr>, Vec<&'a CStr>)> {
unsafe {
let Ok(available_layers) = entry.enumerate_instance_layer_properties() else {
return Err((vec![], wants_layers));
};
let Ok(available_layer_names) = available_layers
.iter()
.map(|layer| layer.layer_name_as_c_str())
.collect::<core::result::Result<BTreeSet<_>, _>>()
else {
return Err((vec![], wants_layers));
};
tracing::debug!(
"Available layers: {:?}",
available_layer_names
.iter()
.map(|s| s.to_str().unwrap_or("<invalid utf8>"))
.collect::<Vec<_>>()
);
let mut enabled_layers = Vec::new();
let mut unsupported_layers = Vec::new();
for layer in wants_layers {
if available_layer_names.contains(&layer) {
enabled_layers.push(layer);
} else {
unsupported_layers.push(layer);
}
}
if !unsupported_layers.is_empty() {
Err((enabled_layers, unsupported_layers))
} else {
Ok(enabled_layers)
}
}
}
impl PhysicalDeviceInfo {
pub fn create_logical_device(
self,
instance: &Instance,
extensions: &[Extension<'static>],
mut features: PhysicalDeviceFeatures,
display_handle: Option<RawDisplayHandle>,
) -> Result<Device> {
let queue_infos = DeviceQueueInfos::select_queue_families(instance, &self, display_handle)?;
let queue_create_infos = queue_infos.into_create_infos();
let extensions = Self::required_extensions(&self, extensions);
let create_info = vk::DeviceCreateInfo::default()
.queue_create_infos(&queue_create_infos)
.enabled_extension_names(&extensions);
let create_info = features.push_to_device_create_info(create_info);
let device = unsafe {
instance
.inner
.raw
.create_device(self.pdev, &create_info, None)?
};
let device_queues = queue_infos.retrieve_queues(&device);
let enabled_extensions = extensions
.into_iter()
.map(|ptr| unsafe { CStr::from_ptr(ptr) })
.collect::<Vec<_>>();
let device_extensions = DeviceExtensions {
debug_utils: ext::debug_utils::Device::new(&instance.inner.raw, &device),
swapchain: if enabled_extensions.contains(&khr::swapchain::NAME) {
Some(khr::swapchain::Device::new(&instance.inner.raw, &device))
} else {
None
},
mesh_shader: if enabled_extensions.contains(&ext::mesh_shader::NAME) {
Some(ext::mesh_shader::Device::new(&instance.inner.raw, &device))
} else {
None
},
};
let alloc2 =
gpu_allocator::vulkan::Allocator::new(&gpu_allocator::vulkan::AllocatorCreateDesc {
instance: instance.inner.raw.clone(),
device: device.clone(),
physical_device: self.pdev,
debug_settings: Default::default(),
buffer_device_address: false,
allocation_sizes: {
const MB: u64 = 1024 * 1024;
gpu_allocator::AllocationSizes::new(8 * MB, 64 * MB)
.with_max_host_memblock_size(256 * MB)
.with_max_device_memblock_size(256 * MB)
},
})?;
let inner = DeviceInner {
raw: device.clone(),
alloc2: Mutex::new(alloc2),
instance: instance.clone(),
adapter: self,
queues: device_queues,
device_extensions,
enabled_extensions,
sync_threadpool: sync::SyncThreadpool::new(),
_drop: DeviceDrop(device),
};
let shared = Arc::new(inner);
Ok(Device {
pools: Arc::new(DevicePools::new(shared.clone())),
shared,
})
}
fn required_extensions(&self, requested_extensions: &[Extension<'static>]) -> Vec<*const i8> {
let mut extensions = vec![khr::swapchain::NAME.as_ptr()];
for ext in requested_extensions {
if self
.properties
.supported_extensions
.iter()
.any(|supported| {
supported.extension_name_as_c_str() == Ok(ext.name)
&& supported.spec_version >= ext.version
})
{
extensions.push(ext.name.as_ptr());
} else {
tracing::warn!(
"Physical device {:?} does not support required extension {:?}",
self.pdev,
ext.name
);
}
}
extensions
}
}
#[derive(Clone, Debug)]
pub(crate) struct DevicePools {
pub(crate) fences: Pool<vk::Fence>,
pub(crate) binary_semaphores: Pool<BinarySemaphore>,
pub(crate) timeline_semaphores: Pool<TimelineSemaphore>,
}
impl DevicePools {
pub fn new(device: Arc<DeviceInner>) -> Self {
Self {
fences: Pool::new(device.clone()),
binary_semaphores: Pool::new(device.clone()),
timeline_semaphores: Pool::new(device),
}
}
}
#[derive(Clone, Debug)]
pub struct Device {
pub(crate) shared: Arc<DeviceInner>,
pub(crate) pools: Arc<DevicePools>,
}
impl PartialEq for Device {
fn eq(&self, other: &Self) -> bool {
Arc::ptr_eq(&self.shared, &other.shared)
}
}
impl Eq for Device {}
impl core::ops::Deref for Device {
type Target = DeviceInner;
fn deref(&self) -> &Self::Target {
&self.shared
}
}
impl DeviceInner {
pub fn sync_threadpool(&self) -> &sync::SyncThreadpool {
&self.sync_threadpool
}
pub fn dev(&self) -> &ash::Device {
&self.raw
}
pub fn instance(&self) -> &Instance {
&self.instance
}
pub fn queues(&self) -> &DeviceQueues {
&self.queues
}
pub fn phy(&self) -> vk::PhysicalDevice {
self.adapter.pdev
}
pub fn features(&self) -> &crate::PhysicalDeviceFeatures {
&self.adapter.features
}
pub(crate) fn properties(&self) -> &crate::PhysicalDeviceProperties {
&self.adapter.properties
}
pub fn physical_device(&self) -> &PhysicalDeviceInfo {
&self.adapter
}
pub fn main_queue(&self) -> &Queue {
self.queues.graphics()
}
pub fn compute_queue(&self) -> &Queue {
self.queues.compute()
}
pub fn transfer_queue(&self) -> &Queue {
self.queues.transfer()
}
/// # Safety
///
/// The caller must ensure that the queues aren't already locked when calling this function.
pub unsafe fn lock_queues(&self) {
unsafe {
self.queues.lock();
}
}
/// # Safety
///
/// The caller must have acquired and have logical ownership of the lock on the queues.
pub unsafe fn unlock_queues(&self) {
unsafe {
self.queues.unlock();
}
}
pub fn wait_queue_idle(&self, queue: &Queue) -> VkResult<()> {
tracing::warn!("locking queue {queue:?} and waiting for idle");
queue.with_locked(|q| unsafe { self.raw.queue_wait_idle(q.raw) })?;
tracing::warn!("finished waiting: unlocking queue {queue:?}.");
Ok(())
}
pub fn wait_idle(&self) -> VkResult<()> {
tracing::warn!("locking all queues and waiting for device to idle");
unsafe {
self.lock_queues();
self.raw.device_wait_idle()?;
self.unlock_queues();
}
tracing::warn!("finished waiting: unlocking all queues.");
Ok(())
}
/// # Safety
///
/// This method inherits the safety contract from [`vkSetDebugUtilsObjectName`]. In particular:
///
/// - `object` must be a valid handle for one of the following:
/// - An instance-level object from the same instance as this device.
/// - A physical-device-level object that descends from the same physical device as this
/// device.
/// - A device-level object that descends from this device.
/// - `object` must be externally synchronized—only the calling thread should access it during
/// this call.
///
/// [`vkSetDebugUtilsObjectName`]: https://registry.khronos.org/vulkan/specs/latest/man/html/vkSetDebugUtilsObjectNameEXT.html
pub unsafe fn debug_name_object<T: vk::Handle>(&self, object: T, name: &str) {
// avoid heap allocation for short names
let mut buffer = [0u8; 64];
let buffer_vec: Vec<u8>;
let name_bytes = if name.is_empty() {
&[]
} else if name.len() < buffer.len() {
buffer[..name.len()].copy_from_slice(name.as_bytes());
&buffer[..]
} else {
buffer_vec = name
.as_bytes()
.iter()
.cloned()
.chain(std::iter::once(0))
.collect();
&buffer_vec
};
let name = CStr::from_bytes_with_nul(name_bytes)
.expect("there is always a nul terminator because we added one");
unsafe {
_ = self
.device_extensions
.debug_utils
.set_debug_utils_object_name(
&vk::DebugUtilsObjectNameInfoEXT::default()
.object_handle(object)
.object_name(name),
);
}
}
}
#[derive(Clone)]
pub struct DeviceOwnedDebugObject<T> {
pub(crate) device: Device,
pub(crate) object: T,
#[cfg(debug_assertions)]
name: Option<Cow<'static, str>>,
}
impl<T: Eq> Eq for DeviceOwnedDebugObject<T> {}
impl<T: PartialEq> PartialEq for DeviceOwnedDebugObject<T> {
fn eq(&self, other: &Self) -> bool {
std::sync::Arc::ptr_eq(&self.device.shared, &other.device.shared)
&& self.object == other.object
}
}
impl<T: std::fmt::Debug + vk::Handle + Copy> std::fmt::Debug for DeviceOwnedDebugObject<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut fmt = f.debug_struct(core::any::type_name::<T>());
fmt.field_with("device", |f| {
write!(f, "0x{:x}", self.device.raw.handle().as_raw())
})
.field_with("handle", |f| write!(f, "0x{:x}", &self.object.as_raw()));
#[cfg(debug_assertions)]
{
fmt.field("name", &self.name);
}
fmt.finish()
}
}
impl<T> DeviceOwnedDebugObject<T> {
pub fn new(
device: crate::Device,
object: T,
name: Option<Cow<'static, str>>,
) -> ash::prelude::VkResult<Self>
where
T: vk::Handle + Copy,
{
if let Some(name) = name.as_ref() {
unsafe {
device.debug_name_object(object, name);
}
}
Ok(Self {
device,
object,
#[cfg(debug_assertions)]
name,
})
}
pub fn dev(&self) -> &crate::Device {
&self.device
}
pub fn handle(&self) -> T
where
T: Copy,
{
self.object
}
}
#[derive(Debug)]
pub struct DeviceObject<T: DeviceHandle> {
inner: T,
device: Device,
#[cfg(debug_assertions)]
name: Option<Cow<'static, str>>,
}
impl<T: DeviceHandle> Deref for DeviceObject<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<T: DeviceHandle> DerefMut for DeviceObject<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
impl<T: DeviceHandle> DeviceObject<T> {
pub fn new(inner: T, device: Device, name: Option<Cow<'static, str>>) -> Self
where
T: vk::Handle + Clone,
{
unsafe {
if let Some(name) = name.as_ref() {
device.debug_name_object(inner.clone(), &name);
}
}
Self {
inner,
device,
#[cfg(debug_assertions)]
name,
}
}
pub fn new_without_name(inner: T, device: Device) -> Self {
Self {
inner,
device,
#[cfg(debug_assertions)]
name: None,
}
}
pub fn device(&self) -> &Device {
&self.device
}
pub fn name(&self) -> Option<&str> {
#[cfg(debug_assertions)]
{
self.name.as_deref().map(|cow| cow.as_ref())
}
#[cfg(not(debug_assertions))]
{
None
}
}
}
impl<T: DeviceHandle> Drop for DeviceObject<T> {
fn drop(&mut self) {
unsafe {
self.inner.destroy(&self.device);
}
}
}
pub trait DeviceHandle {
unsafe fn destroy(&mut self, device: &Device);
}
impl DeviceHandle for vk::Semaphore {
unsafe fn destroy(&mut self, device: &Device) {
unsafe {
device.dev().destroy_semaphore(*self, None);
}
}
}
impl DeviceHandle for vk::Fence {
unsafe fn destroy(&mut self, device: &Device) {
unsafe {
device.dev().destroy_fence(*self, None);
}
}
}
impl DeviceHandle for vk::Buffer {
unsafe fn destroy(&mut self, device: &Device) {
unsafe {
device.dev().destroy_buffer(*self, None);
}
}
}
impl DeviceHandle for vk::SwapchainKHR {
unsafe fn destroy(&mut self, device: &Device) {
unsafe {
device
.device_extensions
.swapchain
.as_ref()
.map(|swapchain| swapchain.destroy_swapchain(*self, None));
}
}
}
pub trait DeviceOwned<T> {
fn device(&self) -> &Device;
fn handle(&self) -> T;
}
pub trait Pooled: Sized {
fn create_from_pool(pool: &Pool<Self>) -> Result<Self>;
}
pub struct PoolObject<T: Pooled + vk::Handle + Clone> {
pub(crate) inner: ManuallyDrop<T>,
pub(crate) pool: Pool<T>,
#[cfg(debug_assertions)]
pub(crate) name: Option<Cow<'static, str>>,
}
impl<T: Pooled + vk::Handle + Clone> PoolObject<T> {
pub fn name_object(&mut self, name: impl Into<Cow<'static, str>>) {
#[cfg(debug_assertions)]
unsafe {
self.name = Some(name.into());
self.pool
.device
.debug_name_object(T::clone(&self.inner), self.name.as_ref().unwrap());
}
}
pub fn device(&self) -> &Arc<DeviceInner> {
&self.pool.device
}
}
impl<T: Pooled + vk::Handle + Clone> Drop for PoolObject<T> {
fn drop(&mut self) {
let handle = unsafe { ManuallyDrop::take(&mut self.inner) };
#[cfg(debug_assertions)]
if self.name.is_some() {
unsafe { self.pool.device.debug_name_object(handle.clone(), "") };
}
self.pool.push(handle);
}
}
impl<T: Pooled + vk::Handle + Clone> Deref for PoolObject<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
#[derive(Debug, Clone)]
pub struct Pool<T> {
pub(crate) pool: Arc<Mutex<Vec<T>>>,
pub(crate) device: Arc<DeviceInner>,
}
impl<T> Pool<T> {
pub fn push(&self, item: T) {
self.pool.lock().push(item);
}
pub fn new(device: Arc<DeviceInner>) -> Self {
Self {
pool: Arc::new(Mutex::new(Vec::new())),
device,
}
}
pub fn pop(&self) -> Option<T> {
self.pool.lock().pop()
}
}
impl<T: Pooled + vk::Handle + Clone> Pool<T> {
pub fn get(&self) -> Result<PoolObject<T>> {
let item = if let Some(item) = self.pool.lock().pop() {
item
} else {
T::create_from_pool(self)?
};
Ok(PoolObject {
inner: ManuallyDrop::new(item),
pool: self.clone(),
#[cfg(debug_assertions)]
name: None,
})
}
pub fn get_named(&self, name: Option<impl Into<Cow<'static, str>>>) -> Result<PoolObject<T>> {
let mut obj = self.get()?;
if let Some(name) = name {
obj.name_object(name);
}
Ok(obj)
}
}
// Macro for helping create and destroy Vulkan objects which are owned by a device.
#[macro_export]
macro_rules! define_device_owned_handle {
($(#[$attr:meta])*
$ty_vis:vis $ty:ident($handle:ty) {
$($(#[$field_attr:meta])* $field_vis:vis $field_name:ident : $field_ty:ty),*
$(,)?
} $(=> |$this:ident| $dtor:stmt)?) => {
$(#[$attr])*
$ty_vis struct $ty {
inner: $crate::device::DeviceOwnedDebugObject<$handle>,
$(
$(#[$field_attr])*
$field_vis $field_name: $field_ty,
)*
}
impl $crate::device::DeviceOwned<$handle> for $ty {
fn device(&self) -> &$crate::device::Device {
self.inner.dev()
}
fn handle(&self) -> $handle {
self.inner.handle()
}
}
impl $ty {
#[allow(clippy::too_many_arguments, reason = "This function is generated by a macro")]
fn construct(
device: $crate::device::Device,
handle: $handle,
name: Option<::std::borrow::Cow<'static, str>>,
$($field_name: $field_ty,)*
) -> ::ash::prelude::VkResult<Self> {
Ok(Self {
inner: $crate::device::DeviceOwnedDebugObject::new(
device,
handle,
name,
)?,
$($field_name,)*
})
}
}
$(
impl Drop for $ty {
fn drop(&mut self) {
#[allow(unused_mut)]
let mut $this = self;
$dtor
}
}
)?
};
}