1137 lines
34 KiB
Rust
1137 lines
34 KiB
Rust
use std::{
|
|
borrow::Cow,
|
|
collections::{BTreeSet, HashMap, HashSet},
|
|
ffi::CStr,
|
|
ops::{Deref, DerefMut},
|
|
sync::Arc,
|
|
};
|
|
|
|
use ash::{
|
|
ext, khr,
|
|
prelude::VkResult,
|
|
vk::{self, Handle},
|
|
};
|
|
use parking_lot::Mutex;
|
|
use raw_window_handle::RawDisplayHandle;
|
|
|
|
use crate::{
|
|
Instance, PhysicalDeviceFeatures, PhysicalDeviceInfo, Result,
|
|
device::asdf::traits::ExternallyManagedObject,
|
|
pipeline::pipeline_cache::PipelineCache,
|
|
queue::{DeviceQueueInfos, DeviceQueues, Queue},
|
|
sync::{self, BinarySemaphore, TimelineSemaphore},
|
|
};
|
|
|
|
bitflags::bitflags! {
|
|
#[repr(transparent)]
|
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
|
pub struct QueueFlags: u32 {
|
|
const GRAPHICS = 1 << 0;
|
|
const ASYNC_COMPUTE = 1 << 1;
|
|
const TRANSFER = 1 << 2;
|
|
const PRESENT = 1 << 3;
|
|
|
|
const NONE = 0;
|
|
const PRESENT_GRAPHICS = 1 << 0 | 1 << 2;
|
|
}
|
|
}
|
|
|
|
struct DeviceDrop(ash::Device);
|
|
impl Drop for DeviceDrop {
|
|
fn drop(&mut self) {
|
|
unsafe {
|
|
_ = self.0.device_wait_idle();
|
|
self.0.destroy_device(None);
|
|
}
|
|
}
|
|
}
|
|
|
|
pub(crate) struct DeviceExtensions {
|
|
pub(crate) debug_utils: ext::debug_utils::Device,
|
|
pub(crate) swapchain: Option<khr::swapchain::Device>,
|
|
#[allow(dead_code)]
|
|
pub(crate) mesh_shader: Option<ext::mesh_shader::Device>,
|
|
}
|
|
|
|
type GpuAllocation = gpu_allocator::vulkan::Allocation;
|
|
|
|
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)]
|
|
pub enum AllocationStrategy {
|
|
#[default]
|
|
/// Let gpu_allocator manage the memory for this allocation, sub-allocating
|
|
/// from larger blocks as needed.
|
|
AllocatorManaged,
|
|
/// Allocate a dedicated block of memory for this allocation. This is
|
|
/// recommended for long-lived resources or resources with specific memory
|
|
/// requirements.
|
|
Dedicated,
|
|
}
|
|
|
|
#[derive(Debug)]
|
|
pub enum Allocation {
|
|
Owned(DeviceObject<GpuAllocation>),
|
|
Shared(Arc<DeviceObject<GpuAllocation>>),
|
|
Unmanaged,
|
|
}
|
|
|
|
impl Allocation {
|
|
pub(crate) fn allocation(&self) -> Option<&GpuAllocation> {
|
|
match self {
|
|
Allocation::Owned(obj) => Some(obj),
|
|
Allocation::Shared(arc) => Some(arc.as_ref()),
|
|
Allocation::Unmanaged => None,
|
|
}
|
|
}
|
|
|
|
pub(crate) fn allocation_mut(&mut self) -> Option<&mut GpuAllocation> {
|
|
match self {
|
|
Allocation::Owned(obj) => Some(obj),
|
|
Allocation::Shared(arc) => Arc::get_mut(arc).map(DerefMut::deref_mut),
|
|
Allocation::Unmanaged => None,
|
|
}
|
|
}
|
|
}
|
|
|
|
pub struct DeviceInner {
|
|
pub(crate) alloc2: Mutex<gpu_allocator::vulkan::Allocator>,
|
|
pub(crate) raw: ash::Device,
|
|
pub(crate) adapter: PhysicalDeviceInfo,
|
|
pub(crate) instance: Instance,
|
|
pub(crate) queues: DeviceQueues,
|
|
pub(crate) sync_threadpool: sync::SyncThreadpool,
|
|
pub(crate) device_extensions: DeviceExtensions,
|
|
#[allow(dead_code)]
|
|
pub(crate) enabled_extensions: Vec<&'static CStr>,
|
|
|
|
_drop: DeviceDrop,
|
|
}
|
|
|
|
impl AsRef<DeviceInner> for DeviceInner {
|
|
fn as_ref(&self) -> &DeviceInner {
|
|
self
|
|
}
|
|
}
|
|
|
|
impl core::fmt::Debug for DeviceInner {
|
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
f.debug_struct("DeviceInner")
|
|
.field("device", &self.raw.handle())
|
|
.finish()
|
|
}
|
|
}
|
|
|
|
#[macro_export]
|
|
macro_rules! make_extension {
|
|
($module:path) => {{
|
|
use $module::{NAME as EXTENSION_NAME, SPEC_VERSION as EXTENSION_VERSION};
|
|
$crate::device::Extension {
|
|
name: EXTENSION_NAME,
|
|
version: EXTENSION_VERSION,
|
|
}
|
|
}};
|
|
($module:path as $version:expr) => {{
|
|
use $module::*;
|
|
$crate::device::Extension {
|
|
name: NAME,
|
|
version: $version,
|
|
}
|
|
}};
|
|
}
|
|
|
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
|
pub struct Extension<'a> {
|
|
pub name: &'a CStr,
|
|
pub version: u32,
|
|
}
|
|
|
|
impl<'a> std::hash::Hash for Extension<'a> {
|
|
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
|
self.name.hash(state);
|
|
}
|
|
}
|
|
|
|
pub(crate) fn get_available_extensions(
|
|
entry: &ash::Entry,
|
|
layers: &[&CStr],
|
|
) -> Result<Vec<ash::vk::ExtensionProperties>> {
|
|
unsafe {
|
|
let extensions = core::iter::once(entry.enumerate_instance_extension_properties(None))
|
|
.chain(
|
|
layers
|
|
.iter()
|
|
.map(|&layer| entry.enumerate_instance_extension_properties(Some(layer))),
|
|
)
|
|
.filter_map(|result| result.ok())
|
|
.flatten()
|
|
.collect::<Vec<ash::vk::ExtensionProperties>>();
|
|
|
|
Ok(extensions)
|
|
}
|
|
}
|
|
|
|
/// returns a tuple of supported-or-enabled extensions and unsupported-and-requested extensions
|
|
pub(crate) fn get_extensions<'a>(
|
|
entry: &ash::Entry,
|
|
layers: &[&'a CStr],
|
|
mut extensions: Vec<Extension<'a>>,
|
|
display_handle: Option<RawDisplayHandle>,
|
|
) -> Result<(HashSet<Extension<'a>>, HashSet<Extension<'a>>)> {
|
|
let available_extensions = get_available_extensions(entry, layers)?;
|
|
|
|
let available_extension_names = available_extensions
|
|
.iter()
|
|
.filter_map(|ext| {
|
|
Some((
|
|
ext.extension_name_as_c_str().ok()?.to_str().ok()?,
|
|
ext.spec_version,
|
|
))
|
|
})
|
|
.collect::<HashMap<_, _>>();
|
|
|
|
tracing::debug!(
|
|
"Available extensions: {:?}",
|
|
available_extension_names.iter().collect::<Vec<_>>()
|
|
);
|
|
|
|
let mut wsi_extensions = Vec::new();
|
|
wsi_extensions.push(make_extension!(khr::surface));
|
|
|
|
// taken from wgpu-hal/src/vulkan/instance.rs:
|
|
//
|
|
// we want to enable all the wsi extensions that are applicable to the
|
|
// platform, even if the user didn't explicitly request them, or
|
|
// supplied a different/no display handle, because we might later want
|
|
// to create a surface for a different windowing system, and enabling
|
|
// all the wsi extensions doesn't have any real downsides.
|
|
// We don't notify the user if some of these extensions aren't available
|
|
// (e.g. because wayland isn't supported on some unix system)
|
|
if cfg!(all(
|
|
unix,
|
|
not(target_os = "android"),
|
|
not(target_os = "macos")
|
|
)) {
|
|
wsi_extensions.push(make_extension!(khr::xlib_surface));
|
|
wsi_extensions.push(make_extension!(khr::xcb_surface));
|
|
wsi_extensions.push(make_extension!(khr::wayland_surface));
|
|
}
|
|
if cfg!(target_os = "windows") {
|
|
wsi_extensions.push(make_extension!(khr::win32_surface));
|
|
}
|
|
if cfg!(target_os = "android") {
|
|
wsi_extensions.push(make_extension!(khr::android_surface));
|
|
}
|
|
if cfg!(target_os = "macos") {
|
|
wsi_extensions.push(make_extension!(ext::metal_surface));
|
|
wsi_extensions.push(make_extension!(khr::portability_enumeration));
|
|
}
|
|
if cfg!(all(
|
|
unix,
|
|
not(target_vendor = "apple"),
|
|
not(target_family = "wasm")
|
|
)) {
|
|
wsi_extensions.push(make_extension!(ext::acquire_drm_display));
|
|
wsi_extensions.push(make_extension!(ext::direct_mode_display));
|
|
wsi_extensions.push(make_extension!(khr::display));
|
|
}
|
|
|
|
let is_extension_available = |ext: &mut Extension| -> bool {
|
|
if available_extensions
|
|
.iter()
|
|
.any(|inst_ext| inst_ext.extension_name_as_c_str() == Ok(ext.name))
|
|
{
|
|
true
|
|
} else {
|
|
tracing::warn!(
|
|
"Extension {:?} v{} was requested but is not available",
|
|
ext.name,
|
|
ext.version
|
|
);
|
|
false
|
|
}
|
|
};
|
|
|
|
let mut enabled_extensions = extensions
|
|
.extract_if(.., is_extension_available)
|
|
.collect::<Vec<_>>();
|
|
|
|
enabled_extensions.extend(wsi_extensions.extract_if(.., is_extension_available));
|
|
|
|
// if a display handle is provided, ensure the required WSI extensions are present
|
|
if let Some(display_handle) = display_handle {
|
|
let mut required_extensions = ash_window::enumerate_required_extensions(display_handle)?
|
|
.iter()
|
|
.map(|&p| Extension {
|
|
name: unsafe { CStr::from_ptr(p) },
|
|
version: 0,
|
|
})
|
|
// filter out extensions that are already enabled
|
|
.filter(|ext| {
|
|
!enabled_extensions
|
|
.iter()
|
|
.any(|enabled| enabled.name == ext.name)
|
|
})
|
|
.collect::<Vec<_>>();
|
|
|
|
// filter out extensions that aren't available, and log a warning for them
|
|
let display_extensions = required_extensions.extract_if(.., is_extension_available);
|
|
|
|
enabled_extensions.extend(display_extensions);
|
|
extensions.extend(required_extensions);
|
|
}
|
|
|
|
// all extensions remaining in `extensions` at this point are unsupported,
|
|
// and were requested by the user or are required by the display handle
|
|
let unsupported_extensions = HashSet::from_iter(extensions);
|
|
let out_extensions = HashSet::from_iter(enabled_extensions);
|
|
|
|
Ok((out_extensions, unsupported_extensions))
|
|
}
|
|
|
|
/// returns a list of enabled, or a tuple of enabled and unsupported but requested layers.
|
|
pub(crate) fn get_layers<'a>(
|
|
entry: &ash::Entry,
|
|
wants_layers: Vec<&'a CStr>,
|
|
) -> core::result::Result<Vec<&'a CStr>, (Vec<&'a CStr>, Vec<&'a CStr>)> {
|
|
unsafe {
|
|
let Ok(available_layers) = entry.enumerate_instance_layer_properties() else {
|
|
return Err((vec![], wants_layers));
|
|
};
|
|
|
|
let Ok(available_layer_names) = available_layers
|
|
.iter()
|
|
.map(|layer| layer.layer_name_as_c_str())
|
|
.collect::<core::result::Result<BTreeSet<_>, _>>()
|
|
else {
|
|
return Err((vec![], wants_layers));
|
|
};
|
|
|
|
tracing::debug!(
|
|
"Available layers: {:?}",
|
|
available_layer_names
|
|
.iter()
|
|
.map(|s| s.to_str().unwrap_or("<invalid utf8>"))
|
|
.collect::<Vec<_>>()
|
|
);
|
|
|
|
let mut enabled_layers = Vec::new();
|
|
let mut unsupported_layers = Vec::new();
|
|
|
|
for layer in wants_layers {
|
|
if available_layer_names.contains(&layer) {
|
|
enabled_layers.push(layer);
|
|
} else {
|
|
unsupported_layers.push(layer);
|
|
}
|
|
}
|
|
|
|
if !unsupported_layers.is_empty() {
|
|
Err((enabled_layers, unsupported_layers))
|
|
} else {
|
|
Ok(enabled_layers)
|
|
}
|
|
}
|
|
}
|
|
|
|
impl PhysicalDeviceInfo {
|
|
pub fn create_logical_device(
|
|
self,
|
|
instance: &Instance,
|
|
extensions: &[Extension<'static>],
|
|
mut features: PhysicalDeviceFeatures,
|
|
display_handle: Option<RawDisplayHandle>,
|
|
) -> Result<Device> {
|
|
let queue_infos = DeviceQueueInfos::select_queue_families(instance, &self, display_handle)?;
|
|
|
|
let queue_create_infos = queue_infos.into_create_infos();
|
|
let extensions = Self::required_extensions(&self, extensions);
|
|
|
|
let create_info = vk::DeviceCreateInfo::default()
|
|
.queue_create_infos(&queue_create_infos)
|
|
.enabled_extension_names(&extensions);
|
|
let create_info = features.push_to_device_create_info(create_info);
|
|
|
|
let device = unsafe {
|
|
instance
|
|
.inner
|
|
.raw
|
|
.create_device(self.pdev, &create_info, None)?
|
|
};
|
|
|
|
let device_queues = queue_infos.retrieve_queues(&device);
|
|
|
|
let enabled_extensions = extensions
|
|
.into_iter()
|
|
.map(|ptr| unsafe { CStr::from_ptr(ptr) })
|
|
.collect::<Vec<_>>();
|
|
|
|
let device_extensions = DeviceExtensions {
|
|
debug_utils: ext::debug_utils::Device::new(&instance.inner.raw, &device),
|
|
swapchain: if enabled_extensions.contains(&khr::swapchain::NAME) {
|
|
Some(khr::swapchain::Device::new(&instance.inner.raw, &device))
|
|
} else {
|
|
None
|
|
},
|
|
mesh_shader: if enabled_extensions.contains(&ext::mesh_shader::NAME) {
|
|
Some(ext::mesh_shader::Device::new(&instance.inner.raw, &device))
|
|
} else {
|
|
None
|
|
},
|
|
};
|
|
|
|
let alloc2 =
|
|
gpu_allocator::vulkan::Allocator::new(&gpu_allocator::vulkan::AllocatorCreateDesc {
|
|
instance: instance.inner.raw.clone(),
|
|
device: device.clone(),
|
|
physical_device: self.pdev,
|
|
debug_settings: Default::default(),
|
|
buffer_device_address: false,
|
|
allocation_sizes: {
|
|
const MB: u64 = 1024 * 1024;
|
|
gpu_allocator::AllocationSizes::new(8 * MB, 64 * MB)
|
|
.with_max_host_memblock_size(256 * MB)
|
|
.with_max_device_memblock_size(256 * MB)
|
|
},
|
|
})?;
|
|
|
|
let inner = DeviceInner {
|
|
raw: device.clone(),
|
|
alloc2: Mutex::new(alloc2),
|
|
instance: instance.clone(),
|
|
adapter: self,
|
|
queues: device_queues,
|
|
device_extensions,
|
|
enabled_extensions,
|
|
sync_threadpool: sync::SyncThreadpool::new(),
|
|
_drop: DeviceDrop(device),
|
|
};
|
|
|
|
let shared = Arc::new(inner);
|
|
Ok(Device {
|
|
pools: Arc::new(DevicePools::new(shared.clone())),
|
|
shared,
|
|
})
|
|
}
|
|
|
|
fn required_extensions(&self, requested_extensions: &[Extension<'static>]) -> Vec<*const i8> {
|
|
let mut extensions = vec![khr::swapchain::NAME.as_ptr()];
|
|
for ext in requested_extensions {
|
|
if self
|
|
.properties
|
|
.supported_extensions
|
|
.iter()
|
|
.any(|supported| {
|
|
supported.extension_name_as_c_str() == Ok(ext.name)
|
|
&& supported.spec_version >= ext.version
|
|
})
|
|
{
|
|
extensions.push(ext.name.as_ptr());
|
|
} else {
|
|
tracing::warn!(
|
|
"Physical device {:?} does not support required extension {:?}",
|
|
self.pdev,
|
|
ext.name
|
|
);
|
|
}
|
|
}
|
|
extensions
|
|
}
|
|
}
|
|
|
|
#[derive(Debug)]
|
|
pub(crate) struct DevicePools {
|
|
pub(crate) pipeline_cache: asdf::DeviceObject<PipelineCache, Arc<DeviceInner>>,
|
|
pub(crate) fences: Arc<Pool<vk::Fence>>,
|
|
pub(crate) binary_semaphores: Pool<BinarySemaphore>,
|
|
pub(crate) timeline_semaphores: Pool<TimelineSemaphore>,
|
|
}
|
|
|
|
impl AsRef<DevicePools> for DevicePools {
|
|
fn as_ref(&self) -> &DevicePools {
|
|
self
|
|
}
|
|
}
|
|
|
|
impl DevicePools {
|
|
pub fn new(device: Arc<DeviceInner>) -> Self {
|
|
Self {
|
|
fences: Arc::new(Pool::new(device.clone())),
|
|
binary_semaphores: Pool::new(device.clone()),
|
|
timeline_semaphores: Pool::new(device.clone()),
|
|
pipeline_cache: asdf::DeviceObject::new(
|
|
device.clone(),
|
|
PipelineCache::new(&device.raw, &device.adapter).unwrap(),
|
|
),
|
|
}
|
|
}
|
|
}
|
|
|
|
#[derive(Clone, Debug)]
|
|
pub struct Device {
|
|
pub(crate) shared: Arc<DeviceInner>,
|
|
pub(crate) pools: Arc<DevicePools>,
|
|
}
|
|
|
|
impl PartialEq for Device {
|
|
fn eq(&self, other: &Self) -> bool {
|
|
Arc::ptr_eq(&self.shared, &other.shared)
|
|
}
|
|
}
|
|
|
|
impl Eq for Device {}
|
|
|
|
impl core::ops::Deref for Device {
|
|
type Target = DeviceInner;
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
&self.shared
|
|
}
|
|
}
|
|
|
|
impl<T> AsRef<T> for Device
|
|
where
|
|
T: ?Sized,
|
|
<Self as Deref>::Target: AsRef<T>,
|
|
{
|
|
fn as_ref(&self) -> &T {
|
|
self.deref().as_ref()
|
|
}
|
|
}
|
|
|
|
impl DeviceInner {
|
|
pub fn sync_threadpool(&self) -> &sync::SyncThreadpool {
|
|
&self.sync_threadpool
|
|
}
|
|
pub fn dev(&self) -> &ash::Device {
|
|
&self.raw
|
|
}
|
|
pub fn instance(&self) -> &Instance {
|
|
&self.instance
|
|
}
|
|
pub fn queues(&self) -> &DeviceQueues {
|
|
&self.queues
|
|
}
|
|
pub fn phy(&self) -> vk::PhysicalDevice {
|
|
self.adapter.pdev
|
|
}
|
|
pub fn features(&self) -> &crate::PhysicalDeviceFeatures {
|
|
&self.adapter.features
|
|
}
|
|
pub(crate) fn properties(&self) -> &crate::PhysicalDeviceProperties {
|
|
&self.adapter.properties
|
|
}
|
|
pub fn physical_device(&self) -> &PhysicalDeviceInfo {
|
|
&self.adapter
|
|
}
|
|
pub fn main_queue(&self) -> &Queue {
|
|
self.queues.graphics()
|
|
}
|
|
pub fn compute_queue(&self) -> &Queue {
|
|
self.queues.compute()
|
|
}
|
|
pub fn transfer_queue(&self) -> &Queue {
|
|
self.queues.transfer()
|
|
}
|
|
|
|
/// # Safety
|
|
///
|
|
/// The caller must ensure that the queues aren't already locked when calling this function.
|
|
pub unsafe fn lock_queues(&self) {
|
|
unsafe {
|
|
self.queues.lock();
|
|
}
|
|
}
|
|
|
|
/// # Safety
|
|
///
|
|
/// The caller must have acquired and have logical ownership of the lock on the queues.
|
|
pub unsafe fn unlock_queues(&self) {
|
|
unsafe {
|
|
self.queues.unlock();
|
|
}
|
|
}
|
|
|
|
pub fn wait_queue_idle(&self, queue: &Queue) -> VkResult<()> {
|
|
tracing::warn!("locking queue {queue:?} and waiting for idle");
|
|
|
|
queue.with_locked(|q| unsafe { self.raw.queue_wait_idle(q.raw) })?;
|
|
|
|
tracing::warn!("finished waiting: unlocking queue {queue:?}.");
|
|
Ok(())
|
|
}
|
|
|
|
pub fn wait_idle(&self) -> VkResult<()> {
|
|
tracing::warn!("locking all queues and waiting for device to idle");
|
|
unsafe {
|
|
self.lock_queues();
|
|
self.raw.device_wait_idle()?;
|
|
self.unlock_queues();
|
|
}
|
|
tracing::warn!("finished waiting: unlocking all queues.");
|
|
Ok(())
|
|
}
|
|
|
|
/// # Safety
|
|
///
|
|
/// This method inherits the safety contract from [`vkSetDebugUtilsObjectName`]. In particular:
|
|
///
|
|
/// - `object` must be a valid handle for one of the following:
|
|
/// - An instance-level object from the same instance as this device.
|
|
/// - A physical-device-level object that descends from the same physical device as this
|
|
/// device.
|
|
/// - A device-level object that descends from this device.
|
|
/// - `object` must be externally synchronized—only the calling thread should access it during
|
|
/// this call.
|
|
///
|
|
/// [`vkSetDebugUtilsObjectName`]: https://registry.khronos.org/vulkan/specs/latest/man/html/vkSetDebugUtilsObjectNameEXT.html
|
|
pub unsafe fn debug_name_object<T: vk::Handle>(&self, object: T, name: &str) {
|
|
// avoid heap allocation for short names
|
|
let mut buffer = [0u8; 64];
|
|
let buffer_vec: Vec<u8>;
|
|
|
|
let name_bytes = if name.is_empty() {
|
|
&[0]
|
|
} else if name.len() < buffer.len() {
|
|
buffer[..name.len()].copy_from_slice(name.as_bytes());
|
|
&buffer[..]
|
|
} else {
|
|
buffer_vec = name
|
|
.as_bytes()
|
|
.iter()
|
|
.cloned()
|
|
.chain(std::iter::once(0))
|
|
.collect();
|
|
&buffer_vec
|
|
};
|
|
|
|
let name = CStr::from_bytes_until_nul(name_bytes)
|
|
.inspect_err(|_| {
|
|
panic!(
|
|
"debug name {:?} contains interior nul byte, which is not allowed",
|
|
name_bytes
|
|
)
|
|
})
|
|
.expect("{name_bytes:?} there is always a nul terminator because we added one");
|
|
|
|
unsafe {
|
|
_ = self
|
|
.device_extensions
|
|
.debug_utils
|
|
.set_debug_utils_object_name(
|
|
&vk::DebugUtilsObjectNameInfoEXT::default()
|
|
.object_handle(object)
|
|
.object_name(name),
|
|
);
|
|
}
|
|
}
|
|
}
|
|
|
|
#[derive(Clone)]
|
|
pub struct DeviceOwnedDebugObject<T> {
|
|
pub(crate) device: Device,
|
|
pub(crate) object: T,
|
|
#[cfg(debug_assertions)]
|
|
name: Option<Cow<'static, str>>,
|
|
}
|
|
|
|
impl<T: Eq> Eq for DeviceOwnedDebugObject<T> {}
|
|
impl<T: PartialEq> PartialEq for DeviceOwnedDebugObject<T> {
|
|
fn eq(&self, other: &Self) -> bool {
|
|
std::sync::Arc::ptr_eq(&self.device.shared, &other.device.shared)
|
|
&& self.object == other.object
|
|
}
|
|
}
|
|
|
|
impl<T: std::fmt::Debug + vk::Handle + Copy> std::fmt::Debug for DeviceOwnedDebugObject<T> {
|
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
let mut fmt = f.debug_struct(core::any::type_name::<T>());
|
|
|
|
fmt.field_with("device", |f| {
|
|
write!(f, "0x{:x}", self.device.raw.handle().as_raw())
|
|
})
|
|
.field_with("handle", |f| write!(f, "0x{:x}", &self.object.as_raw()));
|
|
|
|
#[cfg(debug_assertions)]
|
|
{
|
|
fmt.field("name", &self.name);
|
|
}
|
|
|
|
fmt.finish()
|
|
}
|
|
}
|
|
|
|
impl<T> DeviceOwnedDebugObject<T> {
|
|
pub fn new(
|
|
device: crate::Device,
|
|
object: T,
|
|
name: Option<Cow<'static, str>>,
|
|
) -> ash::prelude::VkResult<Self>
|
|
where
|
|
T: vk::Handle + Copy,
|
|
{
|
|
if let Some(name) = name.as_ref() {
|
|
unsafe {
|
|
device.debug_name_object(object, name);
|
|
}
|
|
}
|
|
|
|
Ok(Self {
|
|
device,
|
|
object,
|
|
#[cfg(debug_assertions)]
|
|
name,
|
|
})
|
|
}
|
|
|
|
pub fn dev(&self) -> &crate::Device {
|
|
&self.device
|
|
}
|
|
pub fn handle(&self) -> T
|
|
where
|
|
T: Copy,
|
|
{
|
|
self.object
|
|
}
|
|
}
|
|
|
|
pub use asdf::{DeviceObject, InnerDeviceObject};
|
|
|
|
pub trait DeviceHandle {
|
|
/// # Safety
|
|
/// The caller must ensure this function is only called once for a given object.
|
|
unsafe fn destroy(&mut self, device: &Device);
|
|
}
|
|
|
|
impl<O, T: ExternallyManagedObject<O>> ExternallyManagedObject<O> for Mutex<T> {
|
|
unsafe fn destroy(self, owner: &O) {
|
|
// Safety guarantee is upheld by the caller.
|
|
unsafe { self.into_inner().destroy(owner) };
|
|
}
|
|
}
|
|
|
|
impl<T: AsRef<DeviceInner>> ExternallyManagedObject<T> for vk::Buffer {
|
|
unsafe fn destroy(self, device: &T) {
|
|
unsafe {
|
|
device.as_ref().raw.destroy_buffer(self, None);
|
|
}
|
|
}
|
|
}
|
|
|
|
impl<T: AsRef<DeviceInner>> ExternallyManagedObject<T> for vk::SwapchainKHR {
|
|
unsafe fn destroy(self, device: &T) {
|
|
unsafe {
|
|
if let Some(swapchain) = device.as_ref().device_extensions.swapchain.as_ref() {
|
|
swapchain.destroy_swapchain(self, None)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
pub trait DeviceOwned<T> {
|
|
fn device(&self) -> &Device;
|
|
fn handle(&self) -> T;
|
|
}
|
|
|
|
pub trait Pooled: Sized {
|
|
fn create_from_pool(pool: &Pool<Self>) -> Result<Self>;
|
|
}
|
|
|
|
#[derive(Debug)]
|
|
pub struct Pool<T> {
|
|
pub(crate) pool: Mutex<Vec<T>>,
|
|
pub(crate) device: Arc<DeviceInner>,
|
|
}
|
|
|
|
impl<T> Pool<T> {
|
|
pub fn push(&self, item: T) {
|
|
self.pool.lock().push(item);
|
|
}
|
|
pub fn new(device: Arc<DeviceInner>) -> Self {
|
|
Self {
|
|
pool: Mutex::new(Vec::new()),
|
|
device,
|
|
}
|
|
}
|
|
pub fn pop(&self) -> Option<T> {
|
|
self.pool.lock().pop()
|
|
}
|
|
}
|
|
|
|
impl<T> AsRef<Pool<T>> for Pool<T> {
|
|
fn as_ref(&self) -> &Pool<T> {
|
|
self
|
|
}
|
|
}
|
|
|
|
pub type PoolObject<T, U = Arc<Pool<T>>> = asdf::ExternallyManagedObject<T, U>;
|
|
|
|
impl<T: Pooled> Pool<T> {
|
|
pub fn get(&self) -> Result<T> {
|
|
let item = if let Some(item) = self.pool.lock().pop() {
|
|
item
|
|
} else {
|
|
T::create_from_pool(self)?
|
|
};
|
|
|
|
Ok(item)
|
|
}
|
|
|
|
pub fn get_debug_named(&self, name: Option<impl Into<Cow<'static, str>>>) -> Result<T>
|
|
where
|
|
T: asdf::traits::DebugNameable,
|
|
{
|
|
let obj = self.get()?;
|
|
|
|
#[cfg(debug_assertions)]
|
|
{
|
|
let name = name.map(Into::into).unwrap_or_default();
|
|
<T as asdf::traits::DebugNameable>::debug_name(&obj, &self.device, &name);
|
|
}
|
|
|
|
Ok(obj)
|
|
}
|
|
}
|
|
|
|
// Macro for helping create and destroy Vulkan objects which are owned by a device.
|
|
#[macro_export]
|
|
macro_rules! define_device_owned_handle {
|
|
($(#[$attr:meta])*
|
|
$ty_vis:vis $ty:ident($handle:ty) {
|
|
$($(#[$field_attr:meta])* $field_vis:vis $field_name:ident : $field_ty:ty),*
|
|
$(,)?
|
|
} $(=> |$this:ident| $dtor:stmt)?) => {
|
|
$(#[$attr])*
|
|
$ty_vis struct $ty {
|
|
inner: $crate::device::DeviceOwnedDebugObject<$handle>,
|
|
$(
|
|
$(#[$field_attr])*
|
|
$field_vis $field_name: $field_ty,
|
|
)*
|
|
}
|
|
|
|
impl $crate::device::DeviceOwned<$handle> for $ty {
|
|
fn device(&self) -> &$crate::device::Device {
|
|
self.inner.dev()
|
|
}
|
|
fn handle(&self) -> $handle {
|
|
self.inner.handle()
|
|
}
|
|
}
|
|
|
|
impl $ty {
|
|
#[allow(clippy::too_many_arguments, reason = "This function is generated by a macro")]
|
|
fn construct(
|
|
device: $crate::device::Device,
|
|
handle: $handle,
|
|
name: Option<::std::borrow::Cow<'static, str>>,
|
|
$($field_name: $field_ty,)*
|
|
) -> ::ash::prelude::VkResult<Self> {
|
|
Ok(Self {
|
|
inner: $crate::device::DeviceOwnedDebugObject::new(
|
|
device,
|
|
handle,
|
|
name,
|
|
)?,
|
|
$($field_name,)*
|
|
})
|
|
}
|
|
}
|
|
|
|
$(
|
|
impl Drop for $ty {
|
|
fn drop(&mut self) {
|
|
#[allow(unused_mut)]
|
|
let mut $this = self;
|
|
$dtor
|
|
}
|
|
}
|
|
)?
|
|
};
|
|
}
|
|
|
|
// This module is an experiment in a more generic way to manage device-owned resources.
|
|
// #[cfg(false)]
|
|
pub(crate) mod asdf {
|
|
use std::{
|
|
mem::{ManuallyDrop, MaybeUninit},
|
|
ops::{Deref, DerefMut},
|
|
sync::Arc,
|
|
};
|
|
|
|
use ash::vk;
|
|
|
|
use crate::{device::DeviceInner, util::DebugName};
|
|
|
|
pub mod traits {
|
|
/// A trait describing an object owned by some manager-type, which is
|
|
/// responsible for destroying it.
|
|
pub trait ExternallyManagedObject<T> {
|
|
/// # Safety
|
|
/// The caller must ensure this function is only called once for a given object.
|
|
unsafe fn destroy(self, owner: &T);
|
|
}
|
|
|
|
/// A trait describing an object which can have a debug name assigned to it.
|
|
pub trait DebugNameable {
|
|
fn debug_name(&self, device: &super::DeviceInner, name: &str);
|
|
}
|
|
}
|
|
|
|
/// Wrapper for types which are owned by another type `O`, which is responsible for destruction.
|
|
#[derive(Debug)]
|
|
pub struct ExternallyManagedObject<T: traits::ExternallyManagedObject<O>, O> {
|
|
inner: ManuallyDrop<T>,
|
|
owner: O,
|
|
}
|
|
|
|
impl<T: traits::ExternallyManagedObject<O>, O> Deref for ExternallyManagedObject<T, O> {
|
|
type Target = T;
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
&self.inner
|
|
}
|
|
}
|
|
|
|
impl<T: traits::ExternallyManagedObject<O>, O> DerefMut for ExternallyManagedObject<T, O> {
|
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
&mut self.inner
|
|
}
|
|
}
|
|
|
|
impl<T: traits::ExternallyManagedObject<O>, O> ExternallyManagedObject<T, O> {
|
|
pub fn new(inner: T, owner: O) -> Self {
|
|
Self {
|
|
inner: ManuallyDrop::new(inner),
|
|
owner,
|
|
}
|
|
}
|
|
pub fn owner(&self) -> &O {
|
|
&self.owner
|
|
}
|
|
|
|
pub fn map_inner<U>(self, f: impl FnOnce(T) -> U) -> ExternallyManagedObject<U, O>
|
|
where
|
|
U: traits::ExternallyManagedObject<O>,
|
|
{
|
|
unsafe {
|
|
let mut this = MaybeUninit::new(self);
|
|
let inner = ManuallyDrop::take(&mut this.assume_init_mut().inner);
|
|
let owner = core::ptr::read(&raw const this.assume_init_mut().owner);
|
|
|
|
let new_inner = f(inner);
|
|
|
|
ExternallyManagedObject {
|
|
inner: ManuallyDrop::new(new_inner),
|
|
owner,
|
|
}
|
|
}
|
|
}
|
|
|
|
pub fn map_owner<U>(self, f: impl FnOnce(O) -> U) -> ExternallyManagedObject<T, U>
|
|
where
|
|
T: traits::ExternallyManagedObject<U>,
|
|
{
|
|
unsafe {
|
|
let mut this = MaybeUninit::new(self);
|
|
let inner = ManuallyDrop::take(&mut this.assume_init_mut().inner);
|
|
|
|
// get the old owner without calling `Self::drop`
|
|
let owner = core::ptr::read(&raw const this.assume_init_mut().owner);
|
|
|
|
ExternallyManagedObject {
|
|
inner: ManuallyDrop::new(inner),
|
|
owner: f(owner),
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
impl<T, O> Drop for ExternallyManagedObject<T, O>
|
|
where
|
|
T: traits::ExternallyManagedObject<O>,
|
|
{
|
|
fn drop(&mut self) {
|
|
unsafe {
|
|
let inner = ManuallyDrop::take(&mut self.inner);
|
|
inner.destroy(&self.owner);
|
|
}
|
|
}
|
|
}
|
|
|
|
pub type InnerDeviceObject<T> = DeviceObject<T, Arc<DeviceInner>>;
|
|
|
|
/// A wrapper for vulkan types which are owned by the device, taking care of destruction.
|
|
#[derive(Debug)]
|
|
pub struct DeviceObject<
|
|
T: traits::ExternallyManagedObject<O>,
|
|
O: AsRef<super::DeviceInner> = super::Device,
|
|
> {
|
|
inner: ExternallyManagedObject<T, O>,
|
|
#[allow(dead_code)]
|
|
name: Option<DebugName>,
|
|
}
|
|
|
|
impl<
|
|
T: traits::ExternallyManagedObject<O> + traits::DebugNameable,
|
|
O: AsRef<super::DeviceInner>,
|
|
> DeviceObject<T, O>
|
|
{
|
|
pub fn new_debug_named(owner: O, inner: T, name: Option<impl Into<DebugName>>) -> Self {
|
|
let name = name.map(Into::into);
|
|
if let Some(ref name) = name {
|
|
traits::DebugNameable::debug_name(&inner, owner.as_ref(), name);
|
|
}
|
|
|
|
let obj = ExternallyManagedObject::new(inner, owner);
|
|
|
|
Self { inner: obj, name }
|
|
}
|
|
}
|
|
|
|
impl<T: traits::ExternallyManagedObject<O>, O: AsRef<super::DeviceInner>> DeviceObject<T, O> {
|
|
pub fn new(owner: O, inner: T) -> Self {
|
|
let inner = ExternallyManagedObject::new(inner, owner);
|
|
|
|
Self { inner, name: None }
|
|
}
|
|
|
|
pub fn new_debug_named_with<D>(
|
|
owner: O,
|
|
inner: T,
|
|
name: Option<impl Into<DebugName>>,
|
|
debug_namable: impl FnOnce(&T) -> D,
|
|
) -> Self
|
|
where
|
|
D: traits::DebugNameable,
|
|
{
|
|
let name = name.map(Into::into);
|
|
if let Some(ref name) = name {
|
|
traits::DebugNameable::debug_name(&debug_namable(&inner), owner.as_ref(), name);
|
|
}
|
|
|
|
let obj = ExternallyManagedObject::new(inner, owner);
|
|
|
|
Self { inner: obj, name }
|
|
}
|
|
|
|
pub fn device(&self) -> &O {
|
|
self.inner.owner()
|
|
}
|
|
|
|
pub fn name(&self) -> Option<&str> {
|
|
self.name.as_ref().map(|n| &**n)
|
|
}
|
|
|
|
pub fn map_inner<U>(self, f: impl FnOnce(T) -> U) -> DeviceObject<U, O>
|
|
where
|
|
U: traits::ExternallyManagedObject<O>,
|
|
{
|
|
DeviceObject {
|
|
inner: self.inner.map_inner(f),
|
|
name: self.name,
|
|
}
|
|
}
|
|
|
|
pub fn map_owner<U>(self, f: impl FnOnce(O) -> U) -> DeviceObject<T, U>
|
|
where
|
|
T: traits::ExternallyManagedObject<U>,
|
|
U: AsRef<super::DeviceInner>,
|
|
{
|
|
DeviceObject {
|
|
inner: self.inner.map_owner(f),
|
|
name: self.name,
|
|
}
|
|
}
|
|
}
|
|
|
|
impl<T, O> Deref for DeviceObject<T, O>
|
|
where
|
|
T: traits::ExternallyManagedObject<O>,
|
|
O: AsRef<super::DeviceInner>,
|
|
{
|
|
type Target = T;
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
&*self.inner
|
|
}
|
|
}
|
|
|
|
impl<T, O> DerefMut for DeviceObject<T, O>
|
|
where
|
|
T: traits::ExternallyManagedObject<O>,
|
|
O: AsRef<super::DeviceInner>,
|
|
{
|
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
&mut *self.inner
|
|
}
|
|
}
|
|
|
|
mod impls {
|
|
use crate::device::{DeviceInner, DevicePools, GpuAllocation};
|
|
|
|
use super::*;
|
|
|
|
impl<T: AsRef<DeviceInner>> traits::ExternallyManagedObject<T> for vk::Semaphore {
|
|
unsafe fn destroy(self, device: &T) {
|
|
unsafe {
|
|
device.as_ref().raw.destroy_semaphore(self, None);
|
|
}
|
|
}
|
|
}
|
|
|
|
impl<T: AsRef<DeviceInner>> traits::ExternallyManagedObject<T> for GpuAllocation {
|
|
unsafe fn destroy(self, device: &T) {
|
|
_ = device.as_ref().alloc2.lock().free(self);
|
|
}
|
|
}
|
|
|
|
impl traits::ExternallyManagedObject<DevicePools> for vk::Semaphore {
|
|
unsafe fn destroy(self, owner: &DevicePools) {
|
|
owner
|
|
.binary_semaphores
|
|
.push(crate::sync::BinarySemaphore(self));
|
|
}
|
|
}
|
|
|
|
impl<T> traits::DebugNameable for T
|
|
where
|
|
T: vk::Handle + Copy,
|
|
{
|
|
fn debug_name(&self, device: &super::DeviceInner, name: &str) {
|
|
unsafe {
|
|
device.debug_name_object(*self, name);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
#[allow(dead_code)]
|
|
#[cfg(test)]
|
|
fn asdf() {
|
|
use crate::device::{DevicePools, GpuAllocation};
|
|
fn summon<T>() -> T {
|
|
unimplemented!()
|
|
}
|
|
|
|
let _inner_ref: DeviceObject<vk::Semaphore, &DeviceInner> = DeviceObject::new_debug_named(
|
|
summon::<&DeviceInner>(),
|
|
summon::<vk::Semaphore>(),
|
|
Some("my semaphore"),
|
|
);
|
|
|
|
let _device_owned: DeviceObject<vk::Semaphore, super::Device> =
|
|
DeviceObject::new_debug_named(
|
|
summon::<super::Device>(),
|
|
summon::<vk::Semaphore>(),
|
|
Some("my other semaphore"),
|
|
);
|
|
|
|
let _allocation: DeviceObject<GpuAllocation, Arc<super::DeviceInner>> = DeviceObject::new(
|
|
summon::<Arc<super::DeviceInner>>(),
|
|
summon::<GpuAllocation>(),
|
|
);
|
|
|
|
let _pool_owned: ExternallyManagedObject<vk::Semaphore, DevicePools> =
|
|
ExternallyManagedObject::new(summon::<vk::Semaphore>(), summon::<DevicePools>());
|
|
}
|
|
}
|