vidya/crates/renderer/src/images.rs

1538 lines
47 KiB
Rust

use std::{
borrow::Cow,
mem::ManuallyDrop,
ops::{Add, Sub},
sync::Arc,
};
use crate::{
device::{
Allocation, AllocationStrategy, DeviceInner, DeviceObject, QueueFlags,
asdf::traits::ExternallyManagedObject,
},
swapchain::Swapchain,
util::weak_vec::WeakVec,
};
use super::Device;
use ash::vk::{self, Handle};
use gpu_allocator::vulkan::{AllocationCreateDesc, AllocationScheme};
use parking_lot::Mutex;
#[derive(Clone)]
pub struct ImageDesc {
pub flags: vk::ImageCreateFlags,
pub name: Option<Cow<'static, str>>,
pub format: vk::Format,
pub kind: vk::ImageType,
pub mip_levels: u32,
pub array_layers: u32,
pub samples: vk::SampleCountFlags,
pub extent: vk::Extent3D,
pub tiling: vk::ImageTiling,
pub usage: vk::ImageUsageFlags,
pub queue_families: QueueFlags,
pub layout: vk::ImageLayout,
pub mem_location: gpu_allocator::MemoryLocation,
pub alloc_scheme: AllocationStrategy,
}
impl ImageDesc {
pub fn mip_level_range(&self) -> MipRange {
MipRange {
start: 0,
end: self.mip_levels,
}
}
pub fn layer_range(&self) -> MipRange {
MipRange {
start: 0,
end: self.array_layers,
}
}
}
impl std::hash::Hash for ImageDesc {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.flags.hash(state);
self.format.hash(state);
self.kind.hash(state);
self.mip_levels.hash(state);
self.array_layers.hash(state);
self.samples.hash(state);
self.extent.hash(state);
self.tiling.hash(state);
self.usage.hash(state);
self.queue_families.hash(state);
self.layout.hash(state);
self.mem_location.hash(state);
}
}
impl Eq for ImageDesc {}
impl PartialEq for ImageDesc {
fn eq(&self, other: &Self) -> bool {
self.flags == other.flags
&& self.name == other.name
&& self.format == other.format
&& self.kind == other.kind
&& self.mip_levels == other.mip_levels
&& self.array_layers == other.array_layers
&& self.samples == other.samples
&& self.extent == other.extent
&& self.tiling == other.tiling
&& self.usage == other.usage
&& self.queue_families == other.queue_families
&& self.layout == other.layout
&& self.mem_location == other.mem_location
}
}
impl<'a> std::fmt::Debug for ImageDesc {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("ImageDesc")
.field("flags", &self.flags)
.field("name", &self.name)
.field("format", &self.format)
.field("kind", &self.kind)
.field("mip_levels", &self.mip_levels)
.field("array_layers", &self.array_layers)
.field("samples", &self.samples)
.field("extent", &self.extent)
.field("tiling", &self.tiling)
.field("usage", &self.usage)
.field("queue_families", &self.queue_families)
.field("layout", &self.layout)
.field("mem_location", &self.mem_location)
.field("alloc_scheme", &self.alloc_scheme)
.finish()
}
}
impl Default for ImageDesc {
fn default() -> Self {
Self {
flags: Default::default(),
name: Default::default(),
format: Default::default(),
kind: vk::ImageType::TYPE_2D,
samples: vk::SampleCountFlags::TYPE_1,
mip_levels: 1,
array_layers: 1,
extent: Default::default(),
tiling: vk::ImageTiling::OPTIMAL,
usage: Default::default(),
queue_families: QueueFlags::empty(),
layout: vk::ImageLayout::UNDEFINED,
mem_location: gpu_allocator::MemoryLocation::Unknown,
alloc_scheme: AllocationStrategy::AllocatorManaged,
}
}
}
#[derive(Debug)]
enum ImageInner {
Swapchain(vk::Image, Device),
Allocated(DeviceObject<vk::Image>, Allocation),
}
impl<T: AsRef<DeviceInner>> ExternallyManagedObject<T> for vk::Image {
unsafe fn destroy(self, device: &T) {
unsafe {
device.as_ref().raw.destroy_image(self, None);
}
}
}
impl ImageInner {
fn image(&self) -> vk::Image {
match self {
Self::Swapchain(image, _) => *image,
Self::Allocated(image, _) => **image,
}
}
fn device(&self) -> &Device {
match self {
Self::Swapchain(_, device) => device,
Self::Allocated(image, _) => image.device(),
}
}
fn allocation(&self) -> Option<&Allocation> {
match self {
Self::Swapchain(_, _) => None,
Self::Allocated(_, alloc) => Some(alloc),
}
}
fn allocation_mut(&mut self) -> Option<&mut Allocation> {
match self {
Self::Swapchain(_, _) => None,
Self::Allocated(_, alloc) => Some(alloc),
}
}
}
#[derive(Debug)]
pub struct Image {
image: ImageInner,
desc: ImageDesc,
#[allow(dead_code)]
views: Mutex<WeakVec<ImageView>>,
}
impl Image {
pub fn new(device: Device, desc: ImageDesc) -> crate::Result<Self> {
let (image, requirements) = Self::new_raw(device.clone(), &desc)?;
let alloc = device.alloc2.lock().allocate(&AllocationCreateDesc {
name: desc.name.as_deref().unwrap_or(""),
requirements,
location: desc.mem_location,
linear: desc.tiling == vk::ImageTiling::LINEAR,
allocation_scheme: match desc.alloc_scheme {
AllocationStrategy::AllocatorManaged => AllocationScheme::GpuAllocatorManaged,
AllocationStrategy::Dedicated => AllocationScheme::DedicatedImage(image),
},
})?;
Self::new_with_allocation_unchecked(
device.clone(),
image,
Allocation::Owned(DeviceObject::new(device, alloc)),
desc,
)
}
fn new_with_allocation_unchecked(
device: Device,
image: vk::Image,
allocation: Allocation,
desc: ImageDesc,
) -> crate::Result<Self> {
// bind memory
if let Some(alloc) = allocation.allocation() {
unsafe {
device
.raw
.bind_image_memory(image, alloc.memory(), alloc.offset())?;
}
}
Ok(Self {
image: ImageInner::Allocated(
DeviceObject::new_debug_named(device, image, desc.name.clone()),
allocation,
),
desc,
views: Default::default(),
})
}
pub fn new_with_allocation(
device: Device,
allocation: Allocation,
desc: ImageDesc,
) -> crate::Result<Self> {
let (image, requirements) = Self::new_raw(device.clone(), &desc)?;
// validate allocation
let alloc_size = allocation
.allocation()
.map(|alloc| alloc.size())
.unwrap_or(0);
if alloc_size < requirements.size {
tracing::error!(
"allocation size {} is smaller than image memory requirements {}",
alloc_size,
requirements.size
);
return Err(crate::Error::Unspecified);
}
if allocation
.allocation()
.map(|alloc| 1 << alloc.memory_type_index())
.unwrap_or(0)
& requirements.memory_type_bits
== 0
{
return Err(crate::Error::Unspecified);
}
Self::new_with_allocation_unchecked(device, image, allocation, desc)
}
pub fn from_swapchain_image(image: vk::Image, swapchain: &Swapchain) -> Self {
Self {
image: ImageInner::Swapchain(image, swapchain.swapchain.device().clone()),
desc: ImageDesc {
format: swapchain.config.format,
kind: vk::ImageType::TYPE_2D,
mip_levels: 1,
array_layers: 1,
samples: vk::SampleCountFlags::TYPE_1,
extent: vk::Extent3D {
width: swapchain.config.extent.width,
height: swapchain.config.extent.height,
depth: 1,
},
tiling: vk::ImageTiling::OPTIMAL,
usage: swapchain.config.usage,
queue_families: QueueFlags::PRESENT,
layout: vk::ImageLayout::UNDEFINED,
mem_location: gpu_allocator::MemoryLocation::GpuOnly,
..Default::default()
},
views: Default::default(),
}
}
fn new_raw(
device: Device,
desc: &ImageDesc,
) -> crate::Result<(vk::Image, vk::MemoryRequirements)> {
tracing::trace!("new image with desc={desc:?}");
let ImageDesc {
flags,
format,
kind,
mip_levels,
array_layers,
samples,
extent,
tiling,
usage,
queue_families,
layout,
..
} = desc;
let queue_families = device.queues.family_indices(*queue_families);
let sharing_mode = if queue_families.len() > 1 {
vk::SharingMode::CONCURRENT
} else {
vk::SharingMode::EXCLUSIVE
};
let info = &vk::ImageCreateInfo::default()
.flags(*flags)
.image_type(*kind)
.format(*format)
.extent(*extent)
.samples(*samples)
.initial_layout(*layout)
.tiling(*tiling)
.usage(*usage)
.sharing_mode(sharing_mode)
.queue_family_indices(&queue_families)
.array_layers(*array_layers)
.mip_levels(*mip_levels);
// validate
let limits = &device.adapter.properties.core.limits;
let max_dim = match *kind {
vk::ImageType::TYPE_1D => limits.max_image_dimension1_d,
vk::ImageType::TYPE_2D => limits.max_image_dimension2_d,
vk::ImageType::TYPE_3D => limits.max_image_dimension3_d,
_ => unreachable!(),
};
if extent.width > max_dim || extent.height > max_dim || extent.depth > max_dim {
tracing::error!(
"image extent {extent:?} exceeds device limits (max dimension: {max_dim})"
);
return Err(crate::Error::ImageTooLarge {
width: extent.width,
height: extent.height,
max_size: max_dim,
});
}
let image = unsafe { device.raw.create_image(&info, None)? };
let requirements = unsafe { device.raw.get_image_memory_requirements(image) };
Ok((image, requirements))
}
pub fn raw(&self) -> vk::Image {
self.image.image()
}
}
impl Eq for Image {}
impl PartialEq for Image {
fn eq(&self, other: &Self) -> bool {
self.image.image() == other.image.image()
}
}
impl Image {
pub fn format(&self) -> vk::Format {
self.desc.format
}
pub fn image(&self) -> vk::Image {
self.image.image()
}
pub fn size(&self) -> vk::Extent3D {
self.desc.extent
}
pub fn extent_2d(&self) -> vk::Extent2D {
vk::Extent2D {
width: self.desc.extent.width,
height: self.desc.extent.height,
}
}
pub fn width(&self) -> u32 {
self.desc.extent.width
}
pub fn height(&self) -> u32 {
self.desc.extent.height
}
pub fn depth(&self) -> u32 {
self.desc.extent.depth
}
pub fn allocation(&self) -> Option<&Allocation> {
self.image.allocation()
}
pub fn allocation_mut(&mut self) -> Option<&mut Allocation> {
self.image.allocation_mut()
}
// /// technically, this ImageView belongs to the image and is managed by it.
// pub fn get_view(&self, desc: ImageViewDesc) -> VkResult<vk::ImageView> {
// use std::collections::hash_map::Entry::*;
// match self.views.lock().entry(desc.hash_eq_copy()) {
// Occupied(occupied) => Ok(*occupied.get()),
// Vacant(vacant) => {
// let view = unsafe {
// let create_info = vk::ImageViewCreateInfo::default()
// .flags(desc.flags)
// .image(self.image())
// .view_type(vk::ImageViewType::TYPE_2D)
// .format(desc.format)
// .components(desc.components)
// .subresource_range(
// vk::ImageSubresourceRange::default()
// .aspect_mask(desc.aspect)
// .base_mip_level(desc.mip_range.0)
// .level_count(desc.mip_range.count())
// .base_array_layer(desc.layer_range.0)
// .layer_count(desc.layer_range.count()),
// );
// self.device().dev().create_image_view(&create_info, None)?
// };
// Ok(*vacant.insert(view))
// }
// }
// }
pub fn create_view(
self: &Arc<Self>,
mut desc: ImageViewDesc,
) -> crate::Result<ManuallyDrop<ImageView>> {
// validate
if !view_kind_compatible(self.desc.kind, desc.kind) {
tracing::error!(
"image view kind {:?} is not compatible with image kind {:?}",
desc.kind,
self.desc.kind
);
return Err(crate::Error::IncompatibleImageViewKind {
image_kind: self.desc.kind,
view_kind: desc.kind,
});
}
// update imageview desc to make sure the mip range and layer ranges don't contain `vk::REMAINING_MIP_LEVELS`.
desc.mip_range.set_max_end(self.desc.mip_levels);
desc.layer_range.set_max_end(self.desc.array_layers);
if !MipRange::from(..self.desc.mip_levels).contains(&desc.mip_range) {
tracing::error!(
"image view mip range {:?} exceeds image mip levels {}",
desc.mip_range,
self.desc.mip_levels
);
return Err(crate::Error::Todo(
"image view mip range exceeds image mip levels",
));
}
if desc.format == vk::Format::UNDEFINED {
desc.format = self.desc.format;
}
if !validate_image_view_format(&self.desc, desc.format) {
tracing::error!(
"image view format {:?} is not compatible with image format {:?}",
desc.format,
self.desc.format
);
return Err(crate::Error::Todo(
"image view format is not compatible with image format",
));
}
tracing::trace!(
"new image view with desc={desc:?} for image {:x}",
self.image().as_raw()
);
let create_info = vk::ImageViewCreateInfo::default()
.flags(desc.flags)
.image(self.image())
.view_type(desc.kind)
.format(desc.format)
.components(desc.components)
.subresource_range(
vk::ImageSubresourceRange::default()
.aspect_mask(desc.aspect)
.base_mip_level(desc.mip_range.start)
.level_count(desc.mip_range.len())
.base_array_layer(desc.layer_range.start)
.layer_count(desc.layer_range.len()),
);
let device = self.image.device();
let view = unsafe { device.raw.create_image_view(&create_info, None)? };
Ok(ManuallyDrop::new(ImageView {
view: DeviceObject::new_debug_named(device.clone(), view, desc.name.clone()),
desc,
image: self.clone(),
}))
}
}
fn validate_image_view_format(image: &ImageDesc, view_format: vk::Format) -> bool {
let mutable = image.flags.contains(vk::ImageCreateFlags::MUTABLE_FORMAT);
if mutable {
image.format == view_format
|| FormatClass::from(image.format) == FormatClass::from(view_format)
} else {
image.format == view_format
}
}
fn view_kind_compatible(image_kind: vk::ImageType, view_kind: vk::ImageViewType) -> bool {
use vk::ImageType as IT;
use vk::ImageViewType as VT;
match (image_kind, view_kind) {
(IT::TYPE_1D, VT::TYPE_1D | VT::TYPE_1D_ARRAY) => true,
(IT::TYPE_2D, VT::TYPE_2D | VT::TYPE_2D_ARRAY | VT::CUBE | VT::CUBE_ARRAY) => true,
(IT::TYPE_3D, VT::TYPE_2D | VT::TYPE_2D_ARRAY | VT::TYPE_3D) => true,
_ => false,
}
}
#[derive(Debug, Default, Clone)]
pub struct ImageViewDesc {
pub name: Option<Cow<'static, str>>,
pub flags: vk::ImageViewCreateFlags,
pub kind: vk::ImageViewType,
pub format: vk::Format,
pub components: vk::ComponentMapping,
pub aspect: vk::ImageAspectFlags,
pub mip_range: MipRange,
pub layer_range: MipRange,
}
impl ImageViewDesc {
pub fn color_2d() -> Self {
Self {
kind: vk::ImageViewType::TYPE_2D,
aspect: vk::ImageAspectFlags::COLOR,
mip_range: MipRange { start: 0, end: 1 },
layer_range: MipRange { start: 0, end: 1 },
..Default::default()
}
}
pub fn with_format(self, format: vk::Format) -> Self {
Self { format, ..self }
}
pub fn with_mip_range<R: core::ops::RangeBounds<u32>>(self, range: R) -> Self {
Self {
mip_range: range.into(),
..self
}
}
pub fn with_layer_range<R: core::ops::RangeBounds<u32>>(self, range: R) -> Self {
Self {
layer_range: range.into(),
..self
}
}
pub fn with_aspect(self, aspect: vk::ImageAspectFlags) -> Self {
Self { aspect, ..self }
}
pub fn with_name(self, name: impl Into<Cow<'static, str>>) -> Self {
Self {
name: Some(name.into()),
..self
}
}
#[allow(dead_code)]
pub(crate) fn hash_eq_copy(&self) -> Self {
let &Self {
flags,
kind,
format,
components,
aspect,
mip_range,
layer_range,
..
} = self;
Self {
flags,
name: None,
kind,
format,
components,
aspect,
mip_range,
layer_range,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Extent {
pub width: u32,
pub height: u32,
pub depth: u32,
}
impl Extent {
pub fn as_offset(self) -> Offset {
Offset {
x: self.width as i32,
y: self.height as i32,
z: self.depth as i32,
}
}
pub fn from_offset(offset: Offset) -> Self {
Self {
width: offset.x as u32,
height: offset.y as u32,
depth: offset.z as u32,
}
}
pub fn min(self, other: Self) -> Self {
Self {
width: self.width.min(other.width),
height: self.height.min(other.height),
depth: self.depth.min(other.depth),
}
}
pub fn max(self, other: Self) -> Self {
Self {
width: self.width.max(other.width),
height: self.height.max(other.height),
depth: self.depth.max(other.depth),
}
}
}
impl Sub<Offset> for Extent {
type Output = Self;
fn sub(self, rhs: Offset) -> Self::Output {
Self {
width: self.width.saturating_sub(rhs.x as u32),
height: self.height.saturating_sub(rhs.y as u32),
depth: self.depth.saturating_sub(rhs.z as u32),
}
}
}
impl Add<Offset> for Extent {
type Output = Self;
fn add(self, rhs: Offset) -> Self::Output {
Self {
width: self.width.saturating_add(rhs.x as u32),
height: self.height.saturating_add(rhs.y as u32),
depth: self.depth.saturating_add(rhs.z as u32),
}
}
}
impl From<Extent> for vk::Extent3D {
fn from(extent: Extent) -> Self {
Self {
width: extent.width,
height: extent.height,
depth: extent.depth,
}
}
}
impl From<Extent> for (u32, u32, u32) {
fn from(extent: Extent) -> Self {
(extent.width, extent.height, extent.depth)
}
}
impl From<vk::Extent3D> for Extent {
fn from(extent: vk::Extent3D) -> Self {
Self {
width: extent.width,
height: extent.height,
depth: extent.depth,
}
}
}
impl From<(u32, u32, u32)> for Extent {
fn from((width, height, depth): (u32, u32, u32)) -> Self {
Self {
width,
height,
depth,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct Offset {
pub x: i32,
pub y: i32,
pub z: i32,
}
impl Offset {
pub fn min(self, other: Self) -> Self {
Self {
x: self.x.min(other.x),
y: self.y.min(other.y),
z: self.z.min(other.z),
}
}
pub fn max(self, other: Self) -> Self {
Self {
x: self.x.max(other.x),
y: self.y.max(other.y),
z: self.z.max(other.z),
}
}
}
impl Add for Offset {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
Self {
x: self.x.saturating_add(rhs.x),
y: self.y.saturating_add(rhs.y),
z: self.z.saturating_add(rhs.z),
}
}
}
impl Sub for Offset {
type Output = Self;
fn sub(self, rhs: Self) -> Self::Output {
Self {
x: self.x.saturating_sub(rhs.x),
y: self.y.saturating_sub(rhs.y),
z: self.z.saturating_sub(rhs.z),
}
}
}
impl From<Offset> for vk::Offset3D {
fn from(offset: Offset) -> Self {
Self {
x: offset.x,
y: offset.y,
z: offset.z,
}
}
}
impl From<Offset> for (i32, i32, i32) {
fn from(offset: Offset) -> Self {
(offset.x, offset.y, offset.z)
}
}
impl From<vk::Offset3D> for Offset {
fn from(offset: vk::Offset3D) -> Self {
Self {
x: offset.x,
y: offset.y,
z: offset.z,
}
}
}
impl From<(i32, i32, i32)> for Offset {
fn from((x, y, z): (i32, i32, i32)) -> Self {
Self { x, y, z }
}
}
/// `start..end` range of mip levels or array layers.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct MipRange {
start: u32,
end: u32,
}
impl Default for MipRange {
fn default() -> Self {
Self {
start: 0,
end: vk::REMAINING_ARRAY_LAYERS,
}
}
}
impl MipRange {
pub fn new(start: u32, end: u32) -> Self {
Self { start, end }
}
pub fn with_max_end(mut self, end: u32) -> Self {
self.set_max_end(end);
self
}
pub fn set_max_end(&mut self, end: u32) {
if self.end == vk::REMAINING_MIP_LEVELS {
self.end = end;
}
}
pub fn len(&self) -> u32 {
self.end - self.start
}
pub fn start(&self) -> u32 {
self.start
}
pub fn end(&self) -> u32 {
self.end
}
/// Returns the intersection of this range with another range.
/// If the ranges do not intersect, returns an empty range.
pub fn range(&self, other: &Self) -> Self {
Self {
start: self.start.max(other.start).min(self.end),
end: self.end.min(other.end).max(self.start),
}
}
pub fn iter(&self) -> core::range::RangeIter<u32> {
self.into_iter()
}
pub fn fits_in(&self, total_mips: u32) -> bool {
self.start < total_mips && (self.end == vk::REMAINING_MIP_LEVELS || self.end <= total_mips)
}
pub fn contains(&self, other: &Self) -> bool {
self.start <= other.start && self.end >= other.end
}
pub fn intersects(&self, other: &Self) -> bool {
self.start < other.end && self.end > other.start
}
/// Returns the union of this range with another range.
/// The union of two ranges is the smallest range that contains both ranges.
/// Note that since the union of two ranges may contain values that are not
/// in either range, this function does not satisfy the properties of a
/// mathematical union operation.
pub fn union(&self, other: &Self) -> Self {
Self {
start: self.start.min(other.start),
end: self.end.max(other.end),
}
}
}
use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign};
macro_rules! impl_op {
(impl $trait:ident {$fn:ident} for $ty:ident { fn: $method:ident }) => {
impl $trait for $ty {
type Output = $ty;
fn $fn(self, rhs: $ty) -> Self::Output {
self.$method(&rhs)
}
}
impl $trait<&$ty> for $ty {
type Output = $ty;
fn $fn(self, rhs: &$ty) -> Self::Output {
self.$method(rhs)
}
}
impl $trait<&$ty> for &$ty {
type Output = $ty;
fn $fn(self, rhs: &$ty) -> Self::Output {
self.$method(rhs)
}
}
impl $trait<$ty> for &$ty {
type Output = $ty;
fn $fn(self, rhs: $ty) -> Self::Output {
self.$method(&rhs)
}
}
};
(impl $trait:ident {$fn:ident} for $ty:ident { λ: $method:expr }) => {
impl $trait for $ty {
fn $fn(&mut self, rhs: $ty) {
$method(self, &rhs)
}
}
impl $trait<&$ty> for $ty {
fn $fn(&mut self, rhs: &$ty) {
$method(self, rhs)
}
}
};
}
impl_op!(impl BitOr { bitor } for MipRange { fn: union });
impl_op!(impl BitOrAssign { bitor_assign } for MipRange { λ: |this: &mut MipRange, other: &MipRange| {*this = this.union(other)} });
impl_op!(impl BitAnd { bitand } for MipRange { fn: range });
impl_op!(impl BitAndAssign { bitand_assign } for MipRange { λ: |this: &mut MipRange, other: &MipRange| {*this = this.range(other)} });
impl IntoIterator for MipRange {
type Item = u32;
type IntoIter = core::range::RangeIter<u32>;
fn into_iter(self) -> Self::IntoIter {
core::range::Range {
start: self.start,
end: self.end,
}
.into_iter()
}
}
impl<R: core::ops::RangeBounds<u32>> From<R> for MipRange {
fn from(value: R) -> Self {
let start = match value.start_bound() {
std::ops::Bound::Included(v) => *v,
std::ops::Bound::Excluded(v) => *v + 1,
std::ops::Bound::Unbounded => 0,
};
let count = match value.end_bound() {
std::ops::Bound::Included(v) => *v + 1 - start,
std::ops::Bound::Excluded(v) => *v - start,
std::ops::Bound::Unbounded => vk::REMAINING_MIP_LEVELS,
};
Self { start, end: count }
}
}
impl std::hash::Hash for ImageViewDesc {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.flags.hash(state);
self.kind.hash(state);
self.format.hash(state);
(
self.components.r,
self.components.g,
self.components.b,
self.components.a,
)
.hash(state);
self.aspect.hash(state);
self.layer_range.hash(state);
self.mip_range.hash(state);
}
}
impl Eq for ImageViewDesc {}
impl PartialEq for ImageViewDesc {
fn eq(&self, other: &Self) -> bool {
self.flags == other.flags
&& self.kind == other.kind
&& self.format == other.format
&& (
self.components.r,
self.components.g,
self.components.b,
self.components.a,
) == (
other.components.r,
other.components.g,
other.components.b,
other.components.a,
)
&& self.aspect == other.aspect
&& self.mip_range == other.mip_range
&& self.layer_range == other.layer_range
}
}
#[derive(Debug)]
pub struct ImageView {
view: DeviceObject<vk::ImageView>,
#[allow(dead_code)]
desc: ImageViewDesc,
image: Arc<Image>,
}
impl ImageView {
pub fn raw(&self) -> vk::ImageView {
*self.view
}
pub fn image(&self) -> &Arc<Image> {
&self.image
}
}
impl<T: AsRef<DeviceInner>> ExternallyManagedObject<T> for vk::ImageView {
unsafe fn destroy(self, device: &T) {
unsafe {
device.as_ref().raw.destroy_image_view(self, None);
}
}
}
pub struct QueueOwnership {
pub src: u32,
pub dst: u32,
}
pub const SUBRESOURCERANGE_ALL: vk::ImageSubresourceRange = vk::ImageSubresourceRange {
aspect_mask: vk::ImageAspectFlags::empty(),
base_mip_level: 0,
level_count: vk::REMAINING_MIP_LEVELS,
base_array_layer: 0,
layer_count: vk::REMAINING_ARRAY_LAYERS,
};
pub const SUBRESOURCERANGE_COLOR_ALL: vk::ImageSubresourceRange = vk::ImageSubresourceRange {
aspect_mask: vk::ImageAspectFlags::COLOR,
base_mip_level: 0,
level_count: vk::REMAINING_MIP_LEVELS,
base_array_layer: 0,
layer_count: vk::REMAINING_ARRAY_LAYERS,
};
// copilot generated from spec:
#[allow(dead_code)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum FormatClass {
Bits8,
Bits16,
Alpha8,
Bits24,
Bits32,
Bits48,
Bits64,
Bits96,
Bits128,
Bits192,
Bits256,
D16,
D24,
D32,
S8,
D16S8,
D24S8,
D32S8,
Bc1Rgb,
Bc1Rgba,
Bc2,
Bc3,
Bc4,
Bc5,
Bc6h,
Bc7,
Etc2Rgb,
Etc2Rgba,
Etc2EacRgba,
EacR,
EacRg,
Astc4x4,
Astc5x4,
Astc5x5,
Astc6x5,
Astc6x6,
Astc8x5,
Astc8x6,
Astc8x8,
Astc10x5,
Astc10x6,
Astc10x8,
Astc10x10,
Astc12x10,
Astc12x12,
YuvG8B8G8R8_422,
YuvB8G8R8G8_422,
YuvG8B8R8Triplane420,
YuvG8B8R8Biplane420,
YuvG8B8R8Triplane422,
YuvG8B8R8Biplane422,
YuvG8B8R8Triplane444,
YuvG10X6B10X6R10X6Triplane420,
YuvG10X6B10X6R10X6Biplane420,
YuvG10X6B10X6R10X6Triplane422,
YuvG10X6B10X6R10X6Biplane422,
YuvG10X6B10X6R10X6Triplane444,
YuvG12X4B12X4R12X4Triplane420,
YuvG12X4B12X4R12X4Biplane420,
YuvG12X4B12X4R12X4Triplane422,
YuvG12X4B12X4R12X4Biplane422,
YuvG12X4B12X4R12X4Triplane444,
YuvG16B16R16Triplane420,
YuvG16B16R16Biplane420,
YuvG16B16R16Triplane422,
YuvG16B16R16Biplane422,
YuvG16B16R16Triplane444,
YuvG8B8R8Biplane444,
YuvG10X6B10X6R10X6Biplane444,
YuvG12X4B12X4R12X4Biplane444,
YuvG16B16R16Biplane444,
Bits64R10G10B10A10,
Bits64G10B10G10R10_422,
Bits64B10G10R10G10_422,
Bits64R12G12B12A12,
Bits64G12B12G12R12_422,
Bits64B12G12R12G12_422,
Bits64G16B16G16R16_422,
Bits64B16G16R16G16_422,
Bits64R14G14B14A14,
Pvrtc1_2bpp,
Pvrtc1_4bpp,
Pvrtc2_2bpp,
Pvrtc2_4bpp,
Astc3x3x3,
Astc4x3x3,
Astc4x4x3,
Astc4x4x4,
Astc5x4x4,
Astc5x5x4,
Astc5x5x5,
Astc6x5x5,
Astc6x6x5,
Astc6x6x6,
YuvG14X2B14X2R14X2Biplane420,
YuvG14X2B14X2R14X2Biplane422,
}
impl From<vk::Format> for FormatClass {
fn from(format: vk::Format) -> Self {
use vk::Format as F;
match format {
// 8-bit
F::R4G4_UNORM_PACK8
| F::R8_UNORM
| F::R8_SNORM
| F::R8_USCALED
| F::R8_SSCALED
// | F::R8_BOOL_ARM
// | F::R8_SFLOAT_FPENCODING_FLOAT8E4M3_ARM
// | F::R8_SFLOAT_FPENCODING_FLOAT8E5M2_ARM
| F::R8_UINT
| F::R8_SINT
| F::R8_SRGB => FormatClass::Bits8,
// 16-bit
F::A1B5G5R5_UNORM_PACK16_KHR
| F::R10X6_UNORM_PACK16
| F::R12X4_UNORM_PACK16
| F::A4R4G4B4_UNORM_PACK16
| F::A4B4G4R4_UNORM_PACK16
// | F::R10X6_UINT_PACK16_ARM
// | F::R12X4_UINT_PACK16_ARM
// | F::R14X2_UINT_PACK16_ARM
// | F::R14X2_UNORM_PACK16_ARM
// | F::R16_SFLOAT_FPENCODING_BFLOAT16_ARM
| F::R4G4B4A4_UNORM_PACK16
| F::B4G4R4A4_UNORM_PACK16
| F::R5G6B5_UNORM_PACK16
| F::B5G6R5_UNORM_PACK16
| F::R5G5B5A1_UNORM_PACK16
| F::B5G5R5A1_UNORM_PACK16
| F::A1R5G5B5_UNORM_PACK16
| F::R8G8_UNORM
| F::R8G8_SNORM
| F::R8G8_USCALED
| F::R8G8_SSCALED
| F::R8G8_UINT
| F::R8G8_SINT
| F::R8G8_SRGB
| F::R16_UNORM
| F::R16_SNORM
| F::R16_USCALED
| F::R16_SSCALED
| F::R16_UINT
| F::R16_SINT
| F::R16_SFLOAT => FormatClass::Bits16,
// 8-bit alpha
F::A8_UNORM_KHR => FormatClass::Alpha8,
// 24-bit
F::R8G8B8_UNORM
| F::R8G8B8_SNORM
| F::R8G8B8_USCALED
| F::R8G8B8_SSCALED
| F::R8G8B8_UINT
| F::R8G8B8_SINT
| F::R8G8B8_SRGB
| F::B8G8R8_UNORM
| F::B8G8R8_SNORM
| F::B8G8R8_USCALED
| F::B8G8R8_SSCALED
| F::B8G8R8_UINT
| F::B8G8R8_SINT
| F::B8G8R8_SRGB => FormatClass::Bits24,
// 32-bit
F::R10X6G10X6_UNORM_2PACK16
| F::R12X4G12X4_UNORM_2PACK16
// | F::R16G16_SFIXED5_NV
// | F::R10X6G10X6_UINT_2PACK16_ARM
// | F::R12X4G12X4_UINT_2PACK16_ARM
// | F::R14X2G14X2_UINT_2PACK16_ARM
// | F::R14X2G14X2_UNORM_2PACK16_ARM
| F::R8G8B8A8_UNORM
| F::R8G8B8A8_SNORM
| F::R8G8B8A8_USCALED
| F::R8G8B8A8_SSCALED
| F::R8G8B8A8_UINT
| F::R8G8B8A8_SINT
| F::R8G8B8A8_SRGB
| F::B8G8R8A8_UNORM
| F::B8G8R8A8_SNORM
| F::B8G8R8A8_USCALED
| F::B8G8R8A8_SSCALED
| F::B8G8R8A8_UINT
| F::B8G8R8A8_SINT
| F::B8G8R8A8_SRGB
| F::A8B8G8R8_UNORM_PACK32
| F::A8B8G8R8_SNORM_PACK32
| F::A8B8G8R8_USCALED_PACK32
| F::A8B8G8R8_SSCALED_PACK32
| F::A8B8G8R8_UINT_PACK32
| F::A8B8G8R8_SINT_PACK32
| F::A8B8G8R8_SRGB_PACK32
| F::A2R10G10B10_UNORM_PACK32
| F::A2R10G10B10_SNORM_PACK32
| F::A2R10G10B10_USCALED_PACK32
| F::A2R10G10B10_SSCALED_PACK32
| F::A2R10G10B10_UINT_PACK32
| F::A2R10G10B10_SINT_PACK32
| F::A2B10G10R10_UNORM_PACK32
| F::A2B10G10R10_SNORM_PACK32
| F::A2B10G10R10_USCALED_PACK32
| F::A2B10G10R10_SSCALED_PACK32
| F::A2B10G10R10_UINT_PACK32
| F::A2B10G10R10_SINT_PACK32
| F::R16G16_UNORM
| F::R16G16_SNORM
| F::R16G16_USCALED
| F::R16G16_SSCALED
| F::R16G16_UINT
| F::R16G16_SINT
| F::R16G16_SFLOAT
| F::R32_UINT
| F::R32_SINT
| F::R32_SFLOAT
| F::B10G11R11_UFLOAT_PACK32
| F::E5B9G9R9_UFLOAT_PACK32 => FormatClass::Bits32,
// 48-bit
F::R16G16B16_UNORM
| F::R16G16B16_SNORM
| F::R16G16B16_USCALED
| F::R16G16B16_SSCALED
| F::R16G16B16_UINT
| F::R16G16B16_SINT
| F::R16G16B16_SFLOAT => FormatClass::Bits48,
// 64-bit
F::R16G16B16A16_UNORM
| F::R16G16B16A16_SNORM
| F::R16G16B16A16_USCALED
| F::R16G16B16A16_SSCALED
| F::R16G16B16A16_UINT
| F::R16G16B16A16_SINT
| F::R16G16B16A16_SFLOAT
| F::R32G32_UINT
| F::R32G32_SINT
| F::R32G32_SFLOAT
| F::R64_UINT
| F::R64_SINT
| F::R64_SFLOAT => FormatClass::Bits64,
// 96-bit
F::R32G32B32_UINT | F::R32G32B32_SINT | F::R32G32B32_SFLOAT => FormatClass::Bits96,
// 128-bit
F::R32G32B32A32_UINT
| F::R32G32B32A32_SINT
| F::R32G32B32A32_SFLOAT
| F::R64G64_UINT
| F::R64G64_SINT
| F::R64G64_SFLOAT => FormatClass::Bits128,
// 192-bit
F::R64G64B64_UINT | F::R64G64B64_SINT | F::R64G64B64_SFLOAT => FormatClass::Bits192,
// 256-bit
F::R64G64B64A64_UINT | F::R64G64B64A64_SINT | F::R64G64B64A64_SFLOAT => {
FormatClass::Bits256
}
// Depth / Stencil
F::D16_UNORM => FormatClass::D16,
F::X8_D24_UNORM_PACK32 => FormatClass::D24,
F::D32_SFLOAT => FormatClass::D32,
F::S8_UINT => FormatClass::S8,
F::D16_UNORM_S8_UINT => FormatClass::D16S8,
F::D24_UNORM_S8_UINT => FormatClass::D24S8,
F::D32_SFLOAT_S8_UINT => FormatClass::D32S8,
// BCn
F::BC1_RGB_UNORM_BLOCK | F::BC1_RGB_SRGB_BLOCK => FormatClass::Bc1Rgb,
F::BC1_RGBA_UNORM_BLOCK | F::BC1_RGBA_SRGB_BLOCK => FormatClass::Bc1Rgba,
F::BC2_UNORM_BLOCK | F::BC2_SRGB_BLOCK => FormatClass::Bc2,
F::BC3_UNORM_BLOCK | F::BC3_SRGB_BLOCK => FormatClass::Bc3,
F::BC4_UNORM_BLOCK | F::BC4_SNORM_BLOCK => FormatClass::Bc4,
F::BC5_UNORM_BLOCK | F::BC5_SNORM_BLOCK => FormatClass::Bc5,
F::BC6H_UFLOAT_BLOCK | F::BC6H_SFLOAT_BLOCK => FormatClass::Bc6h,
F::BC7_UNORM_BLOCK | F::BC7_SRGB_BLOCK => FormatClass::Bc7,
// ETC2 / EAC
F::ETC2_R8G8B8_UNORM_BLOCK | F::ETC2_R8G8B8_SRGB_BLOCK => FormatClass::Etc2Rgb,
F::ETC2_R8G8B8A1_UNORM_BLOCK | F::ETC2_R8G8B8A1_SRGB_BLOCK => FormatClass::Etc2Rgba,
F::ETC2_R8G8B8A8_UNORM_BLOCK | F::ETC2_R8G8B8A8_SRGB_BLOCK => FormatClass::Etc2EacRgba,
F::EAC_R11_UNORM_BLOCK | F::EAC_R11_SNORM_BLOCK => FormatClass::EacR,
F::EAC_R11G11_UNORM_BLOCK | F::EAC_R11G11_SNORM_BLOCK => FormatClass::EacRg,
// ASTC 2D
F::ASTC_4X4_SFLOAT_BLOCK | F::ASTC_4X4_UNORM_BLOCK | F::ASTC_4X4_SRGB_BLOCK => {
FormatClass::Astc4x4
}
F::ASTC_5X4_SFLOAT_BLOCK | F::ASTC_5X4_UNORM_BLOCK | F::ASTC_5X4_SRGB_BLOCK => {
FormatClass::Astc5x4
}
F::ASTC_5X5_SFLOAT_BLOCK | F::ASTC_5X5_UNORM_BLOCK | F::ASTC_5X5_SRGB_BLOCK => {
FormatClass::Astc5x5
}
F::ASTC_6X5_SFLOAT_BLOCK | F::ASTC_6X5_UNORM_BLOCK | F::ASTC_6X5_SRGB_BLOCK => {
FormatClass::Astc6x5
}
F::ASTC_6X6_SFLOAT_BLOCK | F::ASTC_6X6_UNORM_BLOCK | F::ASTC_6X6_SRGB_BLOCK => {
FormatClass::Astc6x6
}
F::ASTC_8X5_SFLOAT_BLOCK | F::ASTC_8X5_UNORM_BLOCK | F::ASTC_8X5_SRGB_BLOCK => {
FormatClass::Astc8x5
}
F::ASTC_8X6_SFLOAT_BLOCK | F::ASTC_8X6_UNORM_BLOCK | F::ASTC_8X6_SRGB_BLOCK => {
FormatClass::Astc8x6
}
F::ASTC_8X8_SFLOAT_BLOCK | F::ASTC_8X8_UNORM_BLOCK | F::ASTC_8X8_SRGB_BLOCK => {
FormatClass::Astc8x8
}
F::ASTC_10X5_SFLOAT_BLOCK | F::ASTC_10X5_UNORM_BLOCK | F::ASTC_10X5_SRGB_BLOCK => {
FormatClass::Astc10x5
}
F::ASTC_10X6_SFLOAT_BLOCK | F::ASTC_10X6_UNORM_BLOCK | F::ASTC_10X6_SRGB_BLOCK => {
FormatClass::Astc10x6
}
F::ASTC_10X8_SFLOAT_BLOCK | F::ASTC_10X8_UNORM_BLOCK | F::ASTC_10X8_SRGB_BLOCK => {
FormatClass::Astc10x8
}
F::ASTC_10X10_SFLOAT_BLOCK | F::ASTC_10X10_UNORM_BLOCK | F::ASTC_10X10_SRGB_BLOCK => {
FormatClass::Astc10x10
}
F::ASTC_12X10_SFLOAT_BLOCK | F::ASTC_12X10_UNORM_BLOCK | F::ASTC_12X10_SRGB_BLOCK => {
FormatClass::Astc12x10
}
F::ASTC_12X12_SFLOAT_BLOCK | F::ASTC_12X12_UNORM_BLOCK | F::ASTC_12X12_SRGB_BLOCK => {
FormatClass::Astc12x12
}
// Packed/YCbCr-ish (as listed)
F::G8B8G8R8_422_UNORM => FormatClass::YuvG8B8G8R8_422,
F::B8G8R8G8_422_UNORM => FormatClass::YuvB8G8R8G8_422,
F::G8_B8_R8_3PLANE_420_UNORM => FormatClass::YuvG8B8R8Triplane420,
F::G8_B8R8_2PLANE_420_UNORM => FormatClass::YuvG8B8R8Biplane420,
F::G8_B8_R8_3PLANE_422_UNORM => FormatClass::YuvG8B8R8Triplane422,
F::G8_B8R8_2PLANE_422_UNORM => FormatClass::YuvG8B8R8Biplane422,
F::G8_B8_R8_3PLANE_444_UNORM => FormatClass::YuvG8B8R8Triplane444,
F::G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16 => {
FormatClass::YuvG10X6B10X6R10X6Triplane420
}
F::G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16 => {
FormatClass::YuvG10X6B10X6R10X6Biplane420
}
F::G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16 => {
FormatClass::YuvG10X6B10X6R10X6Triplane422
}
F::G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16 => {
FormatClass::YuvG10X6B10X6R10X6Biplane422
}
F::G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16 => {
FormatClass::YuvG10X6B10X6R10X6Triplane444
}
F::G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16 => {
FormatClass::YuvG12X4B12X4R12X4Triplane420
}
F::G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16 => {
FormatClass::YuvG12X4B12X4R12X4Biplane420
}
F::G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16 => {
FormatClass::YuvG12X4B12X4R12X4Triplane422
}
F::G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16 => {
FormatClass::YuvG12X4B12X4R12X4Biplane422
}
F::G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16 => {
FormatClass::YuvG12X4B12X4R12X4Triplane444
}
F::G16_B16_R16_3PLANE_420_UNORM => FormatClass::YuvG16B16R16Triplane420,
F::G16_B16R16_2PLANE_420_UNORM => FormatClass::YuvG16B16R16Biplane420,
F::G16_B16_R16_3PLANE_422_UNORM => FormatClass::YuvG16B16R16Triplane422,
F::G16_B16R16_2PLANE_422_UNORM => FormatClass::YuvG16B16R16Biplane422,
F::G16_B16_R16_3PLANE_444_UNORM => FormatClass::YuvG16B16R16Triplane444,
// 2-plane 444 (additional section)
F::G8_B8R8_2PLANE_444_UNORM => FormatClass::YuvG8B8R8Biplane444,
F::G10X6_B10X6R10X6_2PLANE_444_UNORM_3PACK16 => {
FormatClass::YuvG10X6B10X6R10X6Biplane444
}
F::G12X4_B12X4R12X4_2PLANE_444_UNORM_3PACK16 => {
FormatClass::YuvG12X4B12X4R12X4Biplane444
}
F::G16_B16R16_2PLANE_444_UNORM => FormatClass::YuvG16B16R16Biplane444,
// 64-bit special grouped headings from the markdown
F::R10X6G10X6B10X6A10X6_UNORM_4PACK16 /* | F::R10X6G10X6B10X6A10X6_UINT_4PACK16_ARM */
=> {
FormatClass::Bits64R10G10B10A10
}
F::G10X6B10X6G10X6R10X6_422_UNORM_4PACK16 => FormatClass::Bits64G10B10G10R10_422,
F::B10X6G10X6R10X6G10X6_422_UNORM_4PACK16 => FormatClass::Bits64B10G10R10G10_422,
F::R12X4G12X4B12X4A12X4_UNORM_4PACK16 /*| F::R12X4G12X4B12X4A12X4_UINT_4PACK16_ARM */
=> {
FormatClass::Bits64R12G12B12A12
}
F::G12X4B12X4G12X4R12X4_422_UNORM_4PACK16 => FormatClass::Bits64G12B12G12R12_422,
F::B12X4G12X4R12X4G12X4_422_UNORM_4PACK16 => FormatClass::Bits64B12G12R12G12_422,
F::G16B16G16R16_422_UNORM => FormatClass::Bits64G16B16G16R16_422,
F::B16G16R16G16_422_UNORM => FormatClass::Bits64B16G16R16G16_422,
// F::R14X2G14X2B14X2A14X2_UINT_4PACK16_ARM
// | F::R14X2G14X2B14X2A14X2_UNORM_4PACK16_ARM => FormatClass::Bits64_R14G14B14A14,
// PVRTC
F::PVRTC1_2BPP_UNORM_BLOCK_IMG | F::PVRTC1_2BPP_SRGB_BLOCK_IMG => {
FormatClass::Pvrtc1_2bpp
}
F::PVRTC1_4BPP_UNORM_BLOCK_IMG | F::PVRTC1_4BPP_SRGB_BLOCK_IMG => {
FormatClass::Pvrtc1_4bpp
}
F::PVRTC2_2BPP_UNORM_BLOCK_IMG | F::PVRTC2_2BPP_SRGB_BLOCK_IMG => {
FormatClass::Pvrtc2_2bpp
}
F::PVRTC2_4BPP_UNORM_BLOCK_IMG | F::PVRTC2_4BPP_SRGB_BLOCK_IMG => {
FormatClass::Pvrtc2_4bpp
}
// ASTC 3D (EXT)
// F::ASTC_3X3X3_UNORM_BLOCK_EXT
// | F::ASTC_3X3X3_SRGB_BLOCK_EXT
// | F::ASTC_3X3X3_SFLOAT_BLOCK_EXT => FormatClass::Astc3x3x3,
// F::ASTC_4X3X3_UNORM_BLOCK_EXT
// | F::ASTC_4X3X3_SRGB_BLOCK_EXT
// | F::ASTC_4X3X3_SFLOAT_BLOCK_EXT => FormatClass::Astc4x3x3,
// F::ASTC_4X4X3_UNORM_BLOCK_EXT
// | F::ASTC_4X4X3_SRGB_BLOCK_EXT
// | F::ASTC_4X4X3_SFLOAT_BLOCK_EXT => FormatClass::Astc4x4x3,
// F::ASTC_4X4X4_UNORM_BLOCK_EXT
// | F::ASTC_4X4X4_SRGB_BLOCK_EXT
// | F::ASTC_4X4X4_SFLOAT_BLOCK_EXT => FormatClass::Astc4x4x4,
// F::ASTC_5X4X4_UNORM_BLOCK_EXT
// | F::ASTC_5X4X4_SRGB_BLOCK_EXT
// | F::ASTC_5X4X4_SFLOAT_BLOCK_EXT => FormatClass::Astc5x4x4,
// F::ASTC_5X5X4_UNORM_BLOCK_EXT
// | F::ASTC_5X5X4_SRGB_BLOCK_EXT
// | F::ASTC_5X5X4_SFLOAT_BLOCK_EXT => FormatClass::Astc5x5x4,
// F::ASTC_5X5X5_UNORM_BLOCK_EXT
// | F::ASTC_5X5X5_SRGB_BLOCK_EXT
// | F::ASTC_5X5X5_SFLOAT_BLOCK_EXT => FormatClass::Astc5x5x5,
// F::ASTC_6X5X5_UNORM_BLOCK_EXT
// | F::ASTC_6X5X5_SRGB_BLOCK_EXT
// | F::ASTC_6X5X5_SFLOAT_BLOCK_EXT => FormatClass::Astc6x5x5,
// F::ASTC_6X6X5_UNORM_BLOCK_EXT
// | F::ASTC_6X6X5_SRGB_BLOCK_EXT
// | F::ASTC_6X6X5_SFLOAT_BLOCK_EXT => FormatClass::Astc6x6x5,
// F::ASTC_6X6X6_UNORM_BLOCK_EXT
// | F::ASTC_6X6X6_SRGB_BLOCK_EXT
// | F::ASTC_6X6X6_SFLOAT_BLOCK_EXT => FormatClass::Astc6x6x6,
// // 14-bit 2-plane
// F::G14X2_B14X2R14X2_2PLANE_420_UNORM_3PACK16_ARM => {
// FormatClass::YuvG14X2_B14X2R14X2_2Plane_420
// }
// F::G14X2_B14X2R14X2_2PLANE_422_UNORM_3PACK16_ARM => {
// FormatClass::YuvG14X2_B14X2R14X2_2Plane_422
// }
_ => todo!(),
}
}
}
pub fn format_to_aspect_mask(format: vk::Format) -> vk::ImageAspectFlags {
use vk::Format as F;
match format {
F::D16_UNORM | F::X8_D24_UNORM_PACK32 | F::D32_SFLOAT => vk::ImageAspectFlags::DEPTH,
F::D16_UNORM_S8_UINT | F::D24_UNORM_S8_UINT | F::D32_SFLOAT_S8_UINT => {
vk::ImageAspectFlags::DEPTH | vk::ImageAspectFlags::STENCIL
}
F::S8_UINT => vk::ImageAspectFlags::STENCIL,
_ => vk::ImageAspectFlags::COLOR,
}
}