vidya/crates/renderer/src/buffers.rs

172 lines
5 KiB
Rust

use std::borrow::Cow;
use ash::vk;
use gpu_allocator::vulkan::AllocationScheme;
use crate::{
Device,
device::{Allocation, AllocationStrategy, DeviceObject, QueueFlags},
};
#[derive(Clone)]
pub struct BufferDesc {
pub flags: vk::BufferCreateFlags,
pub name: Option<Cow<'static, str>>,
pub size: u64,
pub usage: vk::BufferUsageFlags,
pub queue_families: QueueFlags,
pub mem_location: gpu_allocator::MemoryLocation,
pub alloc_scheme: AllocationStrategy,
}
impl std::hash::Hash for BufferDesc {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.flags.hash(state);
self.size.hash(state);
self.usage.hash(state);
self.queue_families.hash(state);
self.mem_location.hash(state);
}
}
impl std::fmt::Debug for BufferDesc {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("BufferDesc")
.field("flags", &self.flags)
.field("name", &self.name)
.field("size", &self.size)
.field("usage", &self.usage)
.field("queue_families", &self.queue_families)
.field("mem_location", &self.mem_location)
.field("alloc_scheme", &self.alloc_scheme)
.finish()
}
}
impl Eq for BufferDesc {}
impl PartialEq for BufferDesc {
fn eq(&self, other: &Self) -> bool {
self.flags == other.flags
// for hashmaps, `Eq` may be more strict than `Hash`
&& self.name == other.name
&& self.size == other.size
&& self.usage == other.usage
&& self.queue_families == other.queue_families
&& self.mem_location == other.mem_location
}
}
impl Default for BufferDesc {
fn default() -> Self {
Self {
flags: Default::default(),
name: Default::default(),
size: Default::default(),
usage: Default::default(),
queue_families: QueueFlags::empty(),
mem_location: gpu_allocator::MemoryLocation::Unknown,
alloc_scheme: AllocationStrategy::default(),
}
}
}
#[derive(Debug)]
pub struct Buffer {
buffer: DeviceObject<vk::Buffer>,
pub desc: BufferDesc,
alloc: Allocation,
}
impl Eq for Buffer {}
impl PartialEq for Buffer {
fn eq(&self, other: &Self) -> bool {
*self.buffer == *other.buffer
}
}
impl Buffer {
pub fn new(device: Device, desc: BufferDesc) -> crate::Result<Self> {
let (buffer, requirements) = Self::new_raw(device.clone(), &desc)?;
let alloc =
device
.alloc2
.lock()
.allocate(&gpu_allocator::vulkan::AllocationCreateDesc {
name: desc.name.as_deref().unwrap_or_default(),
requirements,
location: desc.mem_location,
linear: true,
allocation_scheme: match desc.alloc_scheme {
AllocationStrategy::AllocatorManaged => {
AllocationScheme::GpuAllocatorManaged
}
AllocationStrategy::Dedicated => AllocationScheme::DedicatedBuffer(buffer),
},
})?;
unsafe {
device
.raw
.bind_buffer_memory(buffer, alloc.memory(), alloc.offset())?;
}
Ok(Self {
buffer: DeviceObject::new_debug_named(device.clone(), buffer, desc.name.clone()),
desc,
alloc: Allocation::Owned(DeviceObject::new(device, alloc)),
})
}
fn new_raw(
device: Device,
desc: &BufferDesc,
) -> crate::Result<(vk::Buffer, vk::MemoryRequirements)> {
let queue_families = device.queues.family_indices(desc.queue_families);
let sharing_mode = if queue_families.len() > 1 {
vk::SharingMode::CONCURRENT
} else {
vk::SharingMode::EXCLUSIVE
};
let create_info = vk::BufferCreateInfo::default()
.size(desc.size)
.usage(desc.usage)
.queue_family_indices(&queue_families)
.sharing_mode(sharing_mode);
let buffer = unsafe { device.dev().create_buffer(&create_info, None)? };
let mem_reqs = unsafe { device.dev().get_buffer_memory_requirements(buffer) };
Ok((buffer, mem_reqs))
}
pub fn map(&mut self) -> Option<&[u8]> {
if let Some(alloc) = self.alloc.allocation() {
alloc
.mapped_slice()
.map(|slice| &slice[..self.desc.size as usize])
} else {
None
}
}
pub fn map_mut(&mut self) -> Option<&mut [u8]> {
if let Some(alloc) = self.alloc.allocation_mut() {
alloc
.mapped_slice_mut()
.map(|slice| &mut slice[..self.desc.size as usize])
} else {
None
}
}
pub fn raw(&self) -> vk::Buffer {
*self.buffer
}
pub fn len(&self) -> u64 {
self.desc.size
}
}