Compare commits

..

No commits in common. "fdfc74c6681aa069ebd8642d5726a4483c81bf70" and "ea3c24ec46ea2f07d06765c6b8572387f9ce14db" have entirely different histories.

14 changed files with 706 additions and 2952 deletions

2
.gitignore vendored
View file

@ -1,2 +0,0 @@
/target/
/Cargo.lock

View file

@ -14,18 +14,13 @@ ash-window = "0.13.0"
glam = {version = "0.29.0", features = ["bytemuck"]}
thiserror = "2.0"
tracing = "0.1.40"
tracing-subscriber = {version ="0.3.18", features = ["env-filter"]}
tracing-subscriber = "0.3.18"
vk-mem = "0.4.0"
vk-sync = "0.1.6"
tinyvec = "1.8"
rand = "0.8.5"
bitflags = "2.6"
petgraph = "0.7"
tokio = "1.42.0"
indexmap = "2"
itertools = "0.14.0"
tokio = "1.42"
futures = "0.3"
smol = "2.0"
rayon = "1.10"

View file

@ -23,7 +23,6 @@ struct WinitState {
window_attrs: WindowAttributes,
windows2: BTreeMap<WindowId, WindowState>,
renderer: Renderer<WindowId>,
last_redraw: std::time::Instant,
}
impl WinitState {
@ -39,7 +38,6 @@ impl WinitState {
.with_inner_size(LogicalSize::new(Self::BASE_WIDTH, Self::BASE_HEIGHT)),
// TODO: pass down this error and add some kind of error handling UI or dump
renderer: Renderer::new(display.as_raw()).expect("renderer"),
last_redraw: std::time::Instant::now(),
}
}
@ -57,12 +55,10 @@ impl WinitState {
fn handle_draw_request(&mut self, window_id: WindowId) {
_ = window_id;
tracing::debug!(
info!(
window_id = u64::from(window_id),
"TODO: implement draw request {}ms",
self.last_redraw.elapsed().as_millis()
"TODO: implement draw request"
);
self.last_redraw = std::time::Instant::now();
if let Some(window) = self.windows2.get_mut(&window_id) {
// egui
@ -70,17 +66,14 @@ impl WinitState {
window.demo_app.ui(&window.egui_platform.context());
let output = window.egui_platform.end_pass(Some(&window.window));
// self.renderer
// .draw_egui(&window.egui_platform.context(), output)
// .unwrap();
let egui_state = self
.renderer
.draw_egui(&window.egui_platform.context(), output)
.unwrap();
// rendering
self.renderer
.debug_draw_egui(&window_id, &window.egui_platform.context(), output, || {
window.window.pre_present_notify();
})
.inspect_err(|err| {
tracing::error!("error encountered while drawing: {err}");
.debug_draw(&window_id, || { // window.window.pre_present_notify()
})
.expect("drawing");
window.window.request_redraw();
@ -141,17 +134,16 @@ impl WinitState {
impl ApplicationHandler for WinitState {
fn resumed(&mut self, event_loop: &winit::event_loop::ActiveEventLoop) {
tracing::debug!("winit::resumed");
tracing::info!("winit::resumed");
self.create_window(event_loop);
}
fn about_to_wait(&mut self, event_loop: &winit::event_loop::ActiveEventLoop) {
tracing::trace!("winit::about_to_wait");
tracing::info!("winit::about_to_wait");
for (&window, &resize) in self.last_resize_events.clone().iter() {
self.handle_final_resize(window, resize);
}
self.last_resize_events.clear();
if self.windows2.is_empty() {

View file

@ -6,7 +6,7 @@ edition = "2021"
[dependencies]
tinyvec = {workspace = true}
rand = {workspace = true}
# tokio = {workspace = true, features = ["rt", "sync"]}
tokio = {workspace = true, features = ["rt", "sync"]}
dyn-clone = "1"
anyhow = "1.0.89"
ash = "0.38.0"
@ -14,21 +14,16 @@ ash-window = "0.13.0"
glam = {workspace = true}
thiserror = {workspace = true}
tracing = "0.1.40"
tracing-subscriber = "0.3.18"
vk-mem = "0.4.0"
vk-sync = "0.1.6"
crossbeam = "0.8.4"
parking_lot = "0.12.3"
smol.workspace = true
bitflags.workspace = true
petgraph.workspace = true
itertools.workspace = true
indexmap.workspace = true
futures.workspace = true
bytemuck = { version = "1.21.0", features = ["derive"] }
tracing-test = "0.2.5"
raw-window-handle = { workspace = true }
egui = { workspace = true , features = ["bytemuck"]}
egui_winit_platform = { workspace = true }
[dev-dependencies]
tracing-test = "0.2.5"
bytemuck = { version = "1.21.0", features = ["derive"] }
indexmap = "2.7.0"

View file

@ -1,89 +1,34 @@
use std::{
borrow::Cow,
ops::{Deref, DerefMut},
sync::Arc,
};
use ash::{prelude::VkResult, vk};
use itertools::Itertools;
use vk_mem::Alloc;
use crate::{
define_device_owned_handle,
device::{DeviceOwned, QueueFlags},
Device,
};
#[derive(Clone)]
pub struct BufferDesc {
pub flags: vk::BufferCreateFlags,
pub name: Option<Cow<'static, str>>,
pub size: u64,
pub usage: vk::BufferUsageFlags,
pub queue_families: QueueFlags,
pub mem_usage: vk_mem::MemoryUsage,
pub alloc_flags: vk_mem::AllocationCreateFlags,
}
impl std::fmt::Debug for BufferDesc {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("BufferDesc")
.field("flags", &self.flags)
.field("name", &self.name)
.field("size", &self.size)
.field("usage", &self.usage)
.field("queue_families", &self.queue_families)
.field("mem_usage", &self.mem_usage)
.field_with("alloc_flags", |f| {
write!(
f,
"{}",
self.alloc_flags
.iter_names()
.map(|(name, _)| name)
.format(" | ")
)
})
.finish()
}
}
impl Default for BufferDesc {
fn default() -> Self {
Self {
flags: Default::default(),
name: Default::default(),
size: Default::default(),
usage: Default::default(),
queue_families: QueueFlags::empty(),
alloc_flags: vk_mem::AllocationCreateFlags::empty(),
mem_usage: vk_mem::MemoryUsage::Auto,
}
}
}
use crate::{define_device_owned_handle, device::DeviceOwned, Device};
define_device_owned_handle! {
#[derive(Debug)]
pub Buffer(vk::Buffer) {
alloc: vk_mem::Allocation,
usage: vk::BufferUsageFlags,
size: u64,
} => |this| unsafe {
this.device().clone().alloc().destroy_buffer(this.handle(), &mut this.alloc);
}
}
impl Eq for Buffer {}
impl PartialEq for Buffer {
fn eq(&self, other: &Self) -> bool {
self.inner == other.inner
}
}
impl Buffer {
pub fn new(device: Device, desc: BufferDesc) -> VkResult<Self> {
let queue_families = device.queue_families().family_indices(desc.queue_families);
pub fn new(
device: Device,
size: usize,
usage: vk::BufferUsageFlags,
queue_families: &[u32],
memory_usage: vk_mem::MemoryUsage,
alloc_flags: vk_mem::AllocationCreateFlags,
name: Option<std::borrow::Cow<'static, str>>,
) -> VkResult<Arc<Self>> {
let sharing_mode = if queue_families.len() > 1 {
vk::SharingMode::CONCURRENT
} else {
@ -93,24 +38,28 @@ impl Buffer {
let (buffer, allocation) = unsafe {
device.alloc().create_buffer(
&vk::BufferCreateInfo::default()
.size(desc.size)
.usage(desc.usage)
.queue_family_indices(&queue_families)
.size(size as u64)
.usage(usage)
.queue_family_indices(queue_families)
.sharing_mode(sharing_mode),
&vk_mem::AllocationCreateInfo {
flags: desc.alloc_flags,
usage: desc.mem_usage,
flags: alloc_flags,
usage: memory_usage,
..Default::default()
},
)?
};
Ok(Self::construct(
device, buffer, desc.name, allocation, desc.size,
)?)
Ok(Arc::new(Self::construct(
device,
buffer,
name,
allocation,
usage,
size as u64,
)?))
}
#[allow(dead_code)]
pub fn map_arc(self: &mut Arc<Self>) -> VkResult<MappedBuffer<'_>> {
Arc::get_mut(self).map(Self::map).unwrap()
}

View file

@ -1,9 +1,9 @@
use std::sync::{atomic::AtomicU8, Arc};
use std::{future::Future, marker::PhantomData, sync::Arc};
use crate::{
define_device_owned_handle,
buffers::Buffer,
device::DeviceOwned,
images::{Image, QueueOwnership},
images::{Image2D, QueueOwnership},
pipeline::{Pipeline, PipelineLayout},
sync::{self, FenceFuture},
util::{self, FormatExt, MutexExt},
@ -13,7 +13,6 @@ use super::{Device, Queue};
use ash::{prelude::*, vk};
use parking_lot::Mutex;
#[derive(Debug)]
pub struct SingleUseCommandPool {
device: Device,
pool: Mutex<vk::CommandPool>,
@ -45,16 +44,6 @@ impl SingleUseCommandPool {
}))
}
#[allow(dead_code)]
pub fn reset(&self) -> VkResult<()> {
unsafe {
self.pool.with_locked(|pool| {
self.device
.dev()
.reset_command_pool(*pool, vk::CommandPoolResetFlags::empty())
})
}
}
pub fn alloc(self: &Arc<Self>) -> VkResult<SingleUseCommand> {
SingleUseCommand::new(self.device.clone(), self.clone())
}
@ -69,122 +58,24 @@ impl SingleUseCommandPool {
}
}
pub trait CommandBuffer: DeviceOwned<vk::CommandBuffer> {
fn queue(&self) -> &Queue;
}
impl CommandBuffer for SingleUseCommand {
fn queue(&self) -> &Queue {
&self.pool.queue
}
}
pub struct CommandList<T: CommandBuffer>(pub Vec<T>);
impl<T: CommandBuffer> CommandList<T> {
/// all commands in list must be allocated from the same queue.
pub fn submit<'a>(
&'a self,
wait: Option<(vk::Semaphore, vk::PipelineStageFlags)>,
signal: Option<vk::Semaphore>,
fence: Arc<sync::Fence>,
) -> VkResult<FenceFuture<'a>> {
if self.0.is_empty() {
//exit
}
let buffers = self.0.iter().map(|cmd| cmd.handle()).collect::<Vec<_>>();
let mut info = vk::SubmitInfo::default().command_buffers(&buffers);
if let Some((sema, stage)) = wait.as_ref() {
info = info
.wait_dst_stage_mask(core::slice::from_ref(stage))
.wait_semaphores(core::slice::from_ref(sema));
}
if let Some(signal) = signal.as_ref() {
info = info.signal_semaphores(core::slice::from_ref(signal));
}
self.0[0].queue().with_locked(|queue| unsafe {
self.0[0]
.device()
.dev()
.queue_submit(queue, &[info], fence.fence())
})?;
Ok(FenceFuture::<'a>::new(fence))
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
#[repr(u8)]
pub enum CommandBufferState {
Initial = 0,
Recording,
Executable,
Pending,
Invalid,
}
#[derive(Debug)]
#[repr(transparent)]
struct CommandBufferState2(AtomicU8);
impl CommandBufferState2 {
fn initial() -> Self {
Self(AtomicU8::new(CommandBufferState::Initial as u8))
}
fn recording() -> Self {
Self(AtomicU8::new(CommandBufferState::Recording as u8))
}
fn state(&self) -> CommandBufferState {
let value = self.0.load(std::sync::atomic::Ordering::Relaxed);
unsafe { *<*const _>::from(&value).cast::<CommandBufferState>() }
}
fn is_initial(&self) -> bool {
self.0.load(std::sync::atomic::Ordering::Relaxed) == 0
}
fn set_executable(&self) {
self.0.store(
CommandBufferState::Executable as u8,
std::sync::atomic::Ordering::Relaxed,
);
}
fn set_recording(&self) {
self.0.store(
CommandBufferState::Recording as u8,
std::sync::atomic::Ordering::Relaxed,
);
}
fn set_pending(&self) {
self.0.store(
CommandBufferState::Pending as u8,
std::sync::atomic::Ordering::Relaxed,
);
}
fn set_invalid(&self) {
self.0.store(
CommandBufferState::Invalid as u8,
std::sync::atomic::Ordering::Relaxed,
);
}
}
define_device_owned_handle! {
#[derive(Debug)]
pub SingleUseCommand(vk::CommandBuffer) {
pool: Arc<SingleUseCommandPool>,
state: CommandBufferState2,
} => |this| unsafe {
this.pool
.pool
.with_locked(|&pool| this.device().dev().free_command_buffers(pool, &[this.handle()]))
}
pub struct SingleUseCommand {
device: Device,
pool: Arc<SingleUseCommandPool>,
buffer: vk::CommandBuffer,
}
impl !Sync for SingleUseCommand {}
impl Drop for SingleUseCommand {
fn drop(&mut self) {
unsafe {
self.pool
.pool
.with_locked(|&pool| self.device.dev().free_command_buffers(pool, &[self.buffer]))
};
}
}
impl SingleUseCommand {
pub fn new(device: Device, pool: Arc<SingleUseCommandPool>) -> VkResult<Self> {
let buffer = unsafe {
@ -202,26 +93,16 @@ impl SingleUseCommand {
buffer
};
Self::construct(device, buffer, None, pool, CommandBufferState2::recording())
}
pub fn state(&self) -> CommandBufferState {
self.state.state()
}
pub fn end(&self) -> VkResult<()> {
assert_eq!(self.state(), CommandBufferState::Recording);
unsafe {
self.inner.dev().dev().end_command_buffer(self.handle())?;
}
self.state.set_executable();
Ok(())
Ok(Self {
device,
pool,
buffer,
})
}
/// Safety: commandbuffer must not be accessed from multiple threads at the same time
pub unsafe fn buffer(&self) -> vk::CommandBuffer {
self.handle()
self.buffer
}
//pub fn copy_buffer_to_image(&self, image: &Image2D, buffer: &Buffer, )
@ -238,7 +119,6 @@ impl SingleUseCommand {
new_layout: vk::ImageLayout,
queue_ownership_op: Option<QueueOwnership>,
) {
assert_eq!(self.state(), CommandBufferState::Recording);
let (src_family, dst_family) = queue_ownership_op
.map(|t| (t.src, t.dst))
.unwrap_or((vk::QUEUE_FAMILY_IGNORED, vk::QUEUE_FAMILY_IGNORED));
@ -263,8 +143,8 @@ impl SingleUseCommand {
.new_layout(new_layout);
unsafe {
self.device().dev().cmd_pipeline_barrier2(
self.handle(),
self.device.dev().cmd_pipeline_barrier2(
self.buffer,
&vk::DependencyInfo::default()
.dependency_flags(vk::DependencyFlags::BY_REGION)
.image_memory_barriers(core::slice::from_ref(&barrier)),
@ -274,15 +154,14 @@ impl SingleUseCommand {
pub fn blit_images(
&self,
src: &Image,
src: &Image2D,
src_region: util::Rect2D,
dst: &Image,
dst: &Image2D,
dst_region: util::Rect2D,
) {
assert_eq!(self.state(), CommandBufferState::Recording);
unsafe {
self.device().dev().cmd_blit_image(
self.buffer(),
self.device.dev().cmd_blit_image(
self.buffer,
src.image(),
vk::ImageLayout::TRANSFER_SRC_OPTIMAL,
dst.image(),
@ -312,45 +191,18 @@ impl SingleUseCommand {
layout: vk::ImageLayout,
regions: &[vk::BufferImageCopy],
) {
assert_eq!(self.state(), CommandBufferState::Recording);
unsafe {
self.device().dev().cmd_copy_buffer_to_image(
self.handle(),
buffer,
image,
layout,
regions,
);
self.device
.dev()
.cmd_copy_buffer_to_image(self.buffer, buffer, image, layout, regions);
}
}
pub fn copy_buffers(&self, src: vk::Buffer, dst: vk::Buffer, regions: &[vk::BufferCopy]) {
assert_eq!(self.state(), CommandBufferState::Recording);
unsafe {
self.device()
self.device
.dev()
.cmd_copy_buffer(self.handle(), src, dst, regions);
}
}
#[allow(dead_code)]
pub fn copy_images(
&self,
src: vk::Image,
src_layout: vk::ImageLayout,
dst: vk::Image,
dst_layout: vk::ImageLayout,
regions: &[vk::ImageCopy],
) {
assert_eq!(self.state(), CommandBufferState::Recording);
unsafe {
self.device().dev().cmd_copy_image(
self.handle(),
src,
src_layout,
dst,
dst_layout,
regions,
);
.cmd_copy_buffer(self.buffer, src, dst, regions);
}
}
@ -362,7 +214,6 @@ impl SingleUseCommand {
color: crate::Rgba,
subresources: &[vk::ImageSubresourceRange],
) {
assert_eq!(self.state(), CommandBufferState::Recording);
let clear_colors = match format.get_component_kind() {
crate::util::FormatComponentKind::Float => vk::ClearColorValue {
float32: color.into_f32(),
@ -376,8 +227,8 @@ impl SingleUseCommand {
};
unsafe {
self.device().dev().cmd_clear_color_image(
self.handle(),
self.device.dev().cmd_clear_color_image(
self.buffer,
image,
layout,
&clear_colors,
@ -387,26 +238,23 @@ impl SingleUseCommand {
}
pub fn begin_rendering(&self, rendering_info: vk::RenderingInfo<'_>) {
assert_eq!(self.state(), CommandBufferState::Recording);
unsafe {
self.device()
self.device
.dev()
.cmd_begin_rendering(self.buffer(), &rendering_info);
}
}
pub fn set_viewport(&self, viewports: &[vk::Viewport]) {
assert_eq!(self.state(), CommandBufferState::Recording);
unsafe {
self.device()
self.device
.dev()
.cmd_set_viewport(self.buffer(), 0, viewports);
}
}
pub fn set_scissors(&self, scissors: &[vk::Rect2D]) {
assert_eq!(self.state(), CommandBufferState::Recording);
unsafe {
self.device()
self.device
.dev()
.cmd_set_scissor(self.buffer(), 0, scissors);
}
@ -418,10 +266,9 @@ impl SingleUseCommand {
offset: u32,
bytes: &[u8],
) {
assert_eq!(self.state(), CommandBufferState::Recording);
unsafe {
self.device().dev().cmd_push_constants(
self.handle(),
self.device.dev().cmd_push_constants(
self.buffer,
layout.handle(),
stage,
offset,
@ -430,9 +277,8 @@ impl SingleUseCommand {
}
}
pub fn bind_pipeline(&self, pipeline: &Pipeline) {
assert_eq!(self.state(), CommandBufferState::Recording);
unsafe {
self.device().dev().cmd_bind_pipeline(
self.device.dev().cmd_bind_pipeline(
self.buffer(),
pipeline.bind_point(),
pipeline.handle(),
@ -440,17 +286,15 @@ impl SingleUseCommand {
}
}
pub fn bind_vertices(&self, buffer: vk::Buffer, offset: u64) {
assert_eq!(self.state(), CommandBufferState::Recording);
unsafe {
self.device()
self.device
.dev()
.cmd_bind_vertex_buffers(self.buffer(), 0, &[buffer], &[offset]);
}
}
pub fn bind_indices(&self, buffer: vk::Buffer, offset: u64, kind: vk::IndexType) {
assert_eq!(self.state(), CommandBufferState::Recording);
unsafe {
self.device()
self.device
.dev()
.cmd_bind_index_buffer(self.buffer(), buffer, offset, kind);
}
@ -462,10 +306,9 @@ impl SingleUseCommand {
bind_point: vk::PipelineBindPoint,
descriptor_sets: &[vk::DescriptorSet],
) {
assert_eq!(self.state(), CommandBufferState::Recording);
use crate::device::DeviceOwned;
unsafe {
self.device().dev().cmd_bind_descriptor_sets(
self.device.dev().cmd_bind_descriptor_sets(
self.buffer(),
bind_point,
layout.handle(),
@ -475,8 +318,6 @@ impl SingleUseCommand {
);
}
}
#[allow(dead_code)]
pub fn draw_indexed(
&self,
indices: u32,
@ -485,9 +326,8 @@ impl SingleUseCommand {
vertex_offset: i32,
instance_offset: u32,
) {
assert_eq!(self.state(), CommandBufferState::Recording);
unsafe {
self.device().dev().cmd_draw_indexed(
self.device.dev().cmd_draw_indexed(
self.buffer(),
indices,
instances,
@ -499,9 +339,8 @@ impl SingleUseCommand {
}
pub fn draw_indexed_indirect(&self, buffer: vk::Buffer, offset: u64, count: u32, stride: u32) {
assert_eq!(self.state(), CommandBufferState::Recording);
unsafe {
self.device().dev().cmd_draw_indexed_indirect(
self.device.dev().cmd_draw_indexed_indirect(
self.buffer(),
buffer,
offset,
@ -512,9 +351,8 @@ impl SingleUseCommand {
}
pub fn end_rendering(&self) {
assert_eq!(self.state(), CommandBufferState::Recording);
unsafe {
self.device().dev().cmd_end_rendering(self.buffer());
self.device.dev().cmd_end_rendering(self.buffer());
}
}
@ -524,10 +362,9 @@ impl SingleUseCommand {
signal: Option<vk::Semaphore>,
fence: Option<vk::Fence>,
) -> VkResult<()> {
assert_eq!(self.state(), CommandBufferState::Recording);
unsafe { self.device().dev().end_command_buffer(self.handle())? };
unsafe { self.device.dev().end_command_buffer(self.buffer)? };
let buffers = [self.handle()];
let buffers = [self.buffer];
let mut submit_info = vk::SubmitInfo::default().command_buffers(&buffers);
if let Some(semaphore) = signal.as_ref() {
@ -544,11 +381,9 @@ impl SingleUseCommand {
let fence = fence.unwrap_or(vk::Fence::null());
self.pool.queue().with_locked(|queue| unsafe {
self.device()
.dev()
.queue_submit(queue, &[submit_info], fence)
self.device.dev().queue_submit(queue, &[submit_info], fence)
})?;
tracing::trace!(
tracing::info!(
"submitted queue {:?} and fence {:?}",
self.pool.queue(),
fence
@ -563,20 +398,35 @@ impl SingleUseCommand {
signal: Option<vk::Semaphore>,
fence: Arc<sync::Fence>,
) -> VkResult<FenceFuture<'a>> {
let device = self.device.clone();
self.submit_fence(wait, signal, Some(fence.fence()))?;
Ok(FenceFuture::new(fence))
Ok(unsafe { FenceFuture::new(fence) })
}
#[allow(dead_code)]
pub fn submit_blocking(
self,
wait: Option<(vk::Semaphore, vk::PipelineStageFlags)>,
signal: Option<vk::Semaphore>,
) -> VkResult<()> {
let fence = Arc::new(sync::Fence::create(self.device().clone())?);
let fence = Arc::new(sync::Fence::create(self.device.clone())?);
let future = self.submit_async(wait, signal, fence)?;
future.block()?;
future.block();
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
async fn async_submit(cmd: SingleUseCommand, queue: Queue) {
cmd.submit_async(
None,
None,
Arc::new(sync::Fence::create(cmd.device.clone()).unwrap()),
)
.unwrap()
.await;
}
}

View file

@ -1,10 +1,16 @@
use std::{borrow::Cow, collections::BTreeMap, ops::Deref, sync::Arc};
use std::{
borrow::Cow,
collections::{BTreeMap, HashMap},
ops::Deref,
sync::Arc,
};
use ash::{
khr,
prelude::VkResult,
vk::{self, Handle},
};
use parking_lot::Mutex;
use tinyvec::{array_vec, ArrayVec};
use crate::{sync, Instance, PhysicalDevice, Queue};
@ -40,41 +46,6 @@ impl DeviceQueueFamilies {
pub fn transfer_familty(&self) -> u32 {
self.transfer.0
}
pub fn family_indices(&self, flags: QueueFlags) -> ArrayVec<[u32; 4]> {
let mut indices = array_vec!([u32; 4]);
if flags.contains(QueueFlags::GRAPHICS) {
indices.push(self.graphics_familty());
}
if flags.contains(QueueFlags::PRESENT) {
indices.push(self.present_familty());
}
if flags.contains(QueueFlags::ASYNC_COMPUTE) {
indices.push(self.async_compute_familty());
}
if flags.contains(QueueFlags::TRANSFER) {
indices.push(self.transfer_familty());
}
let unique_len = indices.partition_dedup().0.len();
indices.drain(unique_len..);
indices
}
}
bitflags::bitflags! {
#[repr(transparent)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct QueueFlags: u32 {
const GRAPHICS = 1 << 0;
const PRESENT = 1 << 1;
const ASYNC_COMPUTE = 1 << 2;
const TRANSFER = 1 << 3;
const NONE = 0;
const PRESENT_GRAPHICS = 1 << 0 | 1 << 1;
}
}
#[repr(transparent)]
@ -97,7 +68,6 @@ impl Drop for DeviceWrapper {
}
}
#[allow(unused)]
pub struct DeviceInner {
alloc: vk_mem::Allocator,
device: DeviceWrapper,
@ -188,7 +158,7 @@ impl Device {
let alloc_info =
vk_mem::AllocatorCreateInfo::new(&instance.instance, &device, physical.pdev);
let alloc = vk_mem::Allocator::new(alloc_info)?;
let alloc = unsafe { vk_mem::Allocator::new(alloc_info)? };
DeviceInner {
device: DeviceWrapper(device.clone()),
@ -221,9 +191,6 @@ impl Device {
pub fn dev(&self) -> &ash::Device {
&self.0.device
}
pub fn instance(&self) -> &Arc<Instance> {
&self.0.instance
}
pub fn swapchain(&self) -> &khr::swapchain::Device {
&self.0.swapchain
}
@ -309,7 +276,6 @@ impl AsRef<ash::Device> for Device {
}
}
#[allow(dead_code)]
pub struct DeviceAndQueues {
pub(crate) device: Device,
pub(crate) main_queue: Queue,
@ -333,32 +299,18 @@ impl AsRef<ash::khr::swapchain::Device> for DeviceAndQueues {
pub struct DeviceOwnedDebugObject<T> {
device: Device,
object: T,
#[cfg(debug_assertions)]
name: Option<Cow<'static, str>>,
}
impl<T: Eq> Eq for DeviceOwnedDebugObject<T> {}
impl<T: PartialEq> PartialEq for DeviceOwnedDebugObject<T> {
fn eq(&self, other: &Self) -> bool {
std::sync::Arc::ptr_eq(&self.device.0, &other.device.0) && self.object == other.object
}
}
impl<T: std::fmt::Debug + vk::Handle + Copy> std::fmt::Debug for DeviceOwnedDebugObject<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut fmt = f.debug_struct(core::any::type_name::<T>());
fmt.field_with("device", |f| {
write!(f, "0x{:x}", self.device.0.device.handle().as_raw())
})
.field_with("handle", |f| write!(f, "0x{:x}", &self.object.as_raw()));
#[cfg(debug_assertions)]
{
fmt.field("name", &self.name);
}
fmt.finish()
f.debug_struct(core::any::type_name::<T>())
.field_with("device", |f| {
write!(f, "0x{:x}", self.device.0.device.handle().as_raw())
})
.field_with("handle", |f| write!(f, "0x{:x}", &self.object.as_raw()))
.field("name", &self.name)
.finish()
}
}
@ -372,13 +324,12 @@ impl<T> DeviceOwnedDebugObject<T> {
T: vk::Handle + Copy,
{
if let Some(name) = name.as_ref() {
device.debug_name_object(object, name)?;
device.debug_name_object(object, name);
}
Ok(Self {
device,
object,
#[cfg(debug_assertions)]
name,
})
}
@ -444,7 +395,6 @@ macro_rules! define_device_owned_handle {
$(
impl Drop for $ty {
fn drop(&mut self) {
#[allow(unused_mut)]
let mut $this = self;
$dtor
}

View file

@ -1,737 +0,0 @@
use std::{collections::BTreeMap, sync::Arc};
use ash::{prelude::VkResult, vk};
use indexmap::IndexMap;
use crate::{
buffers::{Buffer, BufferDesc},
device::{self, DeviceOwned},
images::{Image, ImageDesc, ImageViewDesc},
render_graph::{
buffer_barrier, image_barrier, Access, Barrier, GraphResourceDesc, GraphResourceId,
PassDesc, RecordFn, RenderContext, RenderGraph,
},
texture,
util::Rect2D,
EguiState,
};
pub fn egui_pre_pass(
dev: &device::Device,
rg: &mut RenderGraph,
textures: &mut crate::texture::TextureManager,
egui_state: &mut EguiState,
output: &egui::FullOutput,
) -> VkResult<()> {
// allocate resource ids for textures in tessellated list (imported from texture manager)
// define accesses for resource ids
if output.textures_delta.set.is_empty() {
return Ok(());
}
// create textures for new egui textures
for (egui_id, delta) in output
.textures_delta
.set
.iter()
.filter(|(_, image)| image.is_whole())
{
tracing::trace!("creating texture image for egui image {egui_id:?}");
let image = Image::new(
dev.clone(),
ImageDesc {
name: Some(format!("egui-texture-{egui_id:?}").into()),
format: vk::Format::R8G8B8A8_UNORM,
extent: vk::Extent3D {
width: delta.image.width() as u32,
height: delta.image.height() as u32,
depth: 1,
},
usage: vk::ImageUsageFlags::SAMPLED | vk::ImageUsageFlags::TRANSFER_DST,
mem_usage: vk_mem::MemoryUsage::AutoPreferDevice,
..Default::default()
},
)?;
let tid = textures.insert_image(Arc::new(image));
if let Some(old) = egui_state.textures.insert(
*egui_id,
crate::EguiTextureInfo {
id: tid,
options: delta.options,
},
) {
textures.remove_texture(old.id);
}
}
// calculate size for staging buffer.
// calculate size for staging image.
let (staging_size, image_size) = output.textures_delta.set.iter().fold(
(0usize, glam::UVec2::ZERO),
|(mut buffer, mut image), (_id, delta)| {
let bytes = delta.image.height() * delta.image.width() * delta.image.bytes_per_pixel();
image = image.max(glam::uvec2(
delta.image.width() as u32,
delta.image.height() as u32,
));
buffer = buffer + bytes;
(buffer, image)
},
);
tracing::trace!(
staging_size,
"creating staging buffer for uploading egui textures"
);
let mut staging_buffer = Buffer::new(
dev.clone(),
BufferDesc {
name: Some("egui-prepass-staging-buffer".into()),
size: staging_size as u64,
usage: vk::BufferUsageFlags::TRANSFER_SRC,
queue_families: device::QueueFlags::empty(),
mem_usage: vk_mem::MemoryUsage::AutoPreferHost,
alloc_flags: vk_mem::AllocationCreateFlags::MAPPED
| vk_mem::AllocationCreateFlags::HOST_ACCESS_SEQUENTIAL_WRITE
| vk_mem::AllocationCreateFlags::STRATEGY_FIRST_FIT,
..Default::default()
},
)?;
tracing::trace!("creating staging image for uploading egui textures with dims={image_size:?}");
let staging_image = Arc::new(Image::new(
dev.clone(),
ImageDesc {
name: Some("egui-prepass-staging-buffer".into()),
format: vk::Format::R8G8B8A8_UNORM,
extent: vk::Extent3D {
width: image_size.x,
height: image_size.y,
depth: 1,
},
usage: vk::ImageUsageFlags::TRANSFER_SRC | vk::ImageUsageFlags::TRANSFER_DST,
queue_families: device::QueueFlags::empty(),
mem_usage: vk_mem::MemoryUsage::AutoPreferDevice,
..Default::default()
},
)?);
let aliased_images = {
tracing::trace!("mmap-ing staging buffer");
let mut staging_map = staging_buffer.map()?;
let mut offset = 0;
let aliased_images = output
.textures_delta
.set
.iter()
.map(|(id, delta)| {
let bytes =
delta.image.height() * delta.image.width() * delta.image.bytes_per_pixel();
let mem = &mut staging_map[offset..offset + bytes];
match &delta.image {
egui::ImageData::Color(arc) => {
let slice = unsafe {
core::slice::from_raw_parts(
arc.pixels.as_ptr().cast::<u8>(),
arc.pixels.len() * size_of::<egui::Color32>(),
)
};
mem[..slice.len()].copy_from_slice(slice);
}
egui::ImageData::Font(font_image) => {
for (i, c) in font_image.srgba_pixels(None).enumerate() {
let bytes = c.to_array();
mem[i * 4..(i + 1) * 4].copy_from_slice(&bytes);
}
}
}
let old_offset = offset;
offset += bytes;
let pos = delta.pos.unwrap_or_default();
let rect = Rect2D::new_from_size(
glam::ivec2(pos[0] as i32, pos[1] as i32),
glam::ivec2(delta.image.width() as i32, delta.image.height() as i32),
);
(*id, (old_offset, bytes, rect))
})
.collect::<BTreeMap<_, _>>();
// let tessellated = egui.tessellate(output.shapes, output.pixels_per_point);
aliased_images
};
let textures = output
.textures_delta
.set
.iter()
.filter_map(|(egui_id, _)| {
egui_state
.lookup_texture(*egui_id)
.and_then(|tid| textures.get_texture(tid))
.map(|img| (*egui_id, img))
})
.map(|(id, img)| {
(
id,
rg.import_image(
img,
Access {
layout: Some(vk::ImageLayout::GENERAL),
..Access::undefined()
},
),
)
})
.collect::<BTreeMap<_, _>>();
let staging_buffer = rg.import_buffer(Arc::new(staging_buffer), Access::undefined());
let staging_image = rg.import_image(staging_image, Access::undefined());
let record = Box::new({
let textures = textures.clone();
move |ctx: &RenderContext| -> crate::Result<()> {
let staging_image = ctx.get_image(staging_image).unwrap().clone();
let staging_buffer = ctx.get_buffer(staging_buffer).unwrap();
for (id, (offset, _, rect)) in aliased_images {
tracing::trace!(
"record-prepass: fetching alias of prepass staging image id={id:?}"
);
let alias = unsafe {
staging_image.get_alias(ImageDesc {
name: Some(format!("egui-prepass-staging-aliased-{id:?}v").into()),
format: vk::Format::R8G8B8A8_UNORM,
extent: vk::Extent3D {
width: rect.width() as u32,
height: rect.height() as u32,
depth: 1,
},
usage: vk::ImageUsageFlags::TRANSFER_SRC
| vk::ImageUsageFlags::TRANSFER_DST,
queue_families: device::QueueFlags::empty(),
..Default::default()
})?
};
let texture = textures.get(&id).and_then(|id| ctx.get_image(*id)).unwrap();
let image: Barrier = image_barrier(
alias.handle(),
alias.format(),
Access {
stage: vk::PipelineStageFlags2::NONE,
mask: vk::AccessFlags2::empty(),
layout: Some(vk::ImageLayout::UNDEFINED),
},
Access {
stage: vk::PipelineStageFlags2::TRANSFER,
mask: vk::AccessFlags2::TRANSFER_WRITE,
layout: Some(vk::ImageLayout::TRANSFER_DST_OPTIMAL),
},
None,
)
.into();
unsafe {
ctx.device
.dev()
.cmd_pipeline_barrier2(ctx.cmd.buffer(), &((&image).into()));
}
ctx.cmd.copy_buffer_to_image(
staging_buffer.handle(),
alias.handle(),
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
&[vk::BufferImageCopy {
buffer_offset: offset as u64,
buffer_row_length: alias.width(),
buffer_image_height: alias.height(),
image_subresource: vk::ImageSubresourceLayers::default()
.aspect_mask(vk::ImageAspectFlags::COLOR)
.base_array_layer(0)
.mip_level(0)
.layer_count(1),
image_offset: vk::Offset3D { x: 0, y: 0, z: 0 },
image_extent: alias.size(),
}],
);
let from_barrier = image_barrier(
alias.handle(),
alias.format(),
Access {
stage: vk::PipelineStageFlags2::TRANSFER,
mask: vk::AccessFlags2::TRANSFER_WRITE,
layout: Some(vk::ImageLayout::TRANSFER_DST_OPTIMAL),
},
Access {
stage: vk::PipelineStageFlags2::TRANSFER,
mask: vk::AccessFlags2::TRANSFER_READ,
layout: Some(vk::ImageLayout::TRANSFER_SRC_OPTIMAL),
},
None,
);
let to_barrier = image_barrier(
texture.handle(),
texture.format(),
Access {
stage: vk::PipelineStageFlags2::NONE,
mask: vk::AccessFlags2::empty(),
layout: Some(vk::ImageLayout::GENERAL),
},
Access {
stage: vk::PipelineStageFlags2::TRANSFER,
mask: vk::AccessFlags2::TRANSFER_WRITE,
layout: Some(vk::ImageLayout::TRANSFER_DST_OPTIMAL),
},
None,
);
unsafe {
ctx.device.dev().cmd_pipeline_barrier2(
ctx.cmd.buffer(),
&vk::DependencyInfo::default()
.image_memory_barriers(&[from_barrier, to_barrier]),
);
}
ctx.cmd.copy_images(
alias.handle(),
vk::ImageLayout::TRANSFER_SRC_OPTIMAL,
texture.handle(),
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
&[vk::ImageCopy {
src_subresource: vk::ImageSubresourceLayers::default()
.aspect_mask(vk::ImageAspectFlags::COLOR)
.base_array_layer(0)
.mip_level(0)
.layer_count(1),
src_offset: vk::Offset3D { x: 0, y: 0, z: 0 },
dst_subresource: vk::ImageSubresourceLayers::default()
.aspect_mask(vk::ImageAspectFlags::COLOR)
.base_array_layer(0)
.mip_level(0)
.layer_count(1),
dst_offset: vk::Offset3D {
x: rect.top_left().x,
y: rect.top_left().y,
z: 0,
},
extent: alias.size(),
}],
);
let image: Barrier = image_barrier(
texture.handle(),
texture.format(),
Access {
stage: vk::PipelineStageFlags2::TRANSFER,
mask: vk::AccessFlags2::TRANSFER_WRITE,
layout: Some(vk::ImageLayout::TRANSFER_DST_OPTIMAL),
},
Access {
stage: vk::PipelineStageFlags2::ALL_COMMANDS,
mask: vk::AccessFlags2::empty(),
layout: Some(vk::ImageLayout::GENERAL),
},
None,
)
.into();
unsafe {
ctx.device
.dev()
.cmd_pipeline_barrier2(ctx.cmd.buffer(), &((&image).into()));
}
}
Ok(())
}
});
rg.add_pass(PassDesc {
reads: [
(
staging_buffer,
Access {
stage: vk::PipelineStageFlags2::TRANSFER,
mask: vk::AccessFlags2::TRANSFER_READ,
..Access::undefined()
},
),
(staging_image, Access::undefined()),
]
.to_vec(),
writes: textures
.iter()
.map(|(_, id)| {
(
*id,
Access {
layout: Some(vk::ImageLayout::GENERAL),
..Access::undefined()
},
)
})
.collect(),
record,
});
Ok(())
}
// fn egui_pass()
pub fn egui_pass(
dev: &device::Device,
rg: &mut RenderGraph,
texture_handler: &mut crate::texture::TextureManager,
samplers: &mut crate::SamplerCache,
egui_state: &mut EguiState,
egui: &egui::Context,
output: egui::FullOutput,
target: GraphResourceId,
) -> VkResult<Vec<texture::TextureId>> {
let draw_data = egui.tessellate(output.shapes, output.pixels_per_point);
#[repr(C)]
#[derive(Debug, Clone, Copy, bytemuck::Pod, bytemuck::Zeroable)]
struct Vertex {
pos: glam::Vec2,
uv: glam::Vec2,
color: egui::epaint::Color32,
}
#[repr(transparent)]
#[derive(Debug, Clone, Copy)]
struct DrawCall(vk::DrawIndexedIndirectCommand);
unsafe impl bytemuck::Zeroable for DrawCall {}
unsafe impl bytemuck::Pod for DrawCall {}
let mut vertices = Vec::new();
let mut indices = Vec::new();
let mut draw_calls = Vec::new();
let mut textures = IndexMap::new();
let mut textures_indices = Vec::new();
for draw in draw_data {
let egui::epaint::Primitive::Mesh(mesh) = draw.primitive else {
continue;
};
draw_calls.push(DrawCall(vk::DrawIndexedIndirectCommand {
index_count: mesh.indices.len() as u32,
instance_count: 1,
first_index: indices.len() as u32,
vertex_offset: vertices.len() as i32,
first_instance: 0,
}));
vertices.extend(mesh.vertices.iter().map(|v| Vertex {
pos: glam::vec2(v.pos.x, v.pos.y),
uv: glam::vec2(v.uv.x, v.uv.y),
color: v.color,
}));
indices.extend(mesh.indices);
let texture = egui_state.textures.get(&mesh.texture_id).cloned().unwrap();
if !textures.contains_key(&texture.id) {
textures.insert(texture.id, texture);
}
let idx = textures.get_index_of(&texture.id).unwrap();
textures_indices.push(idx as u32);
}
let num_draw_calls = draw_calls.len();
let vertices_size = vertices.len() * size_of::<Vertex>();
let indices_size = indices.len() * size_of::<u32>();
let draw_calls_size = draw_calls.len() * size_of::<vk::DrawIndexedIndirectCommand>();
let (draw_staging, vertices, indices, draw_calls, texture_ids) = {
let staging_size = vertices_size + indices_size + draw_calls_size;
let mut staging = Buffer::new(
dev.clone(),
BufferDesc {
name: Some("egui-draw-staging".into()),
size: staging_size as u64,
usage: vk::BufferUsageFlags::TRANSFER_SRC,
mem_usage: vk_mem::MemoryUsage::AutoPreferHost,
alloc_flags: vk_mem::AllocationCreateFlags::MAPPED
| vk_mem::AllocationCreateFlags::HOST_ACCESS_SEQUENTIAL_WRITE
| vk_mem::AllocationCreateFlags::STRATEGY_FIRST_FIT,
..Default::default()
},
)?;
{
let mut map = staging.map()?;
let (st_vertices, rest) = map.split_at_mut(vertices_size);
let (st_indices, st_drawcalls) = rest.split_at_mut(indices_size);
st_vertices.copy_from_slice(bytemuck::cast_slice(&vertices));
st_indices.copy_from_slice(bytemuck::cast_slice(&indices));
st_drawcalls.copy_from_slice(bytemuck::cast_slice(&draw_calls));
}
let staging = rg.import_buffer(Arc::new(staging), Access::undefined());
let vertices = rg.add_resource(GraphResourceDesc::Buffer(BufferDesc {
name: Some("egui-draw-vertices".into()),
size: vertices_size as u64,
usage: vk::BufferUsageFlags::TRANSFER_DST | vk::BufferUsageFlags::VERTEX_BUFFER,
mem_usage: vk_mem::MemoryUsage::AutoPreferDevice,
..Default::default()
}));
let indices = rg.add_resource(GraphResourceDesc::Buffer(BufferDesc {
name: Some("egui-draw-indices".into()),
size: indices_size as u64,
usage: vk::BufferUsageFlags::TRANSFER_DST | vk::BufferUsageFlags::INDEX_BUFFER,
mem_usage: vk_mem::MemoryUsage::AutoPreferDevice,
..Default::default()
}));
let draw_calls = rg.add_resource(GraphResourceDesc::Buffer(BufferDesc {
name: Some("egui-draw-draw_calls".into()),
size: draw_calls_size as u64,
usage: vk::BufferUsageFlags::TRANSFER_DST | vk::BufferUsageFlags::INDIRECT_BUFFER,
mem_usage: vk_mem::MemoryUsage::AutoPreferDevice,
..Default::default()
}));
let mut texture_ids = Buffer::new(
dev.clone(),
BufferDesc {
name: Some("egui-draw-texture_ids".into()),
size: (textures_indices.len() * size_of::<u32>()) as u64,
usage: vk::BufferUsageFlags::STORAGE_BUFFER,
mem_usage: vk_mem::MemoryUsage::AutoPreferDevice,
alloc_flags: vk_mem::AllocationCreateFlags::HOST_ACCESS_SEQUENTIAL_WRITE,
..Default::default()
},
)?;
{
let mut map = texture_ids.map()?;
map.copy_from_slice(bytemuck::cast_slice(&textures_indices));
}
(staging, vertices, indices, draw_calls, texture_ids)
};
let descriptor_infos = textures
.values()
.map(|entry| {
let texture = texture_handler.get_texture(entry.id).unwrap();
let info = vk::DescriptorImageInfo {
sampler: samplers.get_sampler(entry.into_sampler_desc()).unwrap(),
image_view: texture
.get_view(ImageViewDesc {
kind: vk::ImageViewType::TYPE_2D,
format: texture.format(),
aspect: vk::ImageAspectFlags::COLOR,
mip_range: (0..1).into(),
layer_range: (0..1).into(),
..Default::default()
})
.unwrap(),
image_layout: vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL,
};
info
})
.collect::<Vec<_>>();
let uniform_info = vk::DescriptorBufferInfo {
buffer: texture_ids.buffer(),
offset: 0,
range: texture_ids.len(),
};
let descriptor_writes = descriptor_infos
.iter()
.enumerate()
.map(|(i, info)| {
vk::WriteDescriptorSet::default()
.image_info(core::slice::from_ref(info))
.descriptor_count(1)
.descriptor_type(vk::DescriptorType::COMBINED_IMAGE_SAMPLER)
.dst_set(egui_state.descriptor_set)
.dst_binding(EguiState::TEXTURE_BINDING)
.dst_array_element(i as u32)
})
.chain(core::iter::once({
vk::WriteDescriptorSet::default()
.buffer_info(core::slice::from_ref(&uniform_info))
.descriptor_count(1)
.dst_binding(EguiState::UNIFORM_BINDING)
.descriptor_type(vk::DescriptorType::STORAGE_BUFFER)
.dst_array_element(0)
.dst_set(egui_state.descriptor_set)
}))
.collect::<Vec<_>>();
unsafe {
dev.dev().update_descriptor_sets(&descriptor_writes, &[]);
}
let to_remove_tex_ids = output
.textures_delta
.free
.iter()
.filter_map(|id| egui_state.textures.get(id).cloned())
.map(|entry| entry.id)
.collect::<Vec<_>>();
let reads = textures
.keys()
.filter_map(|id| {
texture_handler
.get_texture(*id)
.map(|img| (rg.import_image(img, Access::general()), Access::general()))
})
.chain([(target, Access::color_attachment_read_write())])
.collect::<Vec<_>>();
let writes = [(target, Access::color_attachment_write_only())].to_vec();
let record: Box<RecordFn> = Box::new({
let pipeline = egui_state.pipeline.clone();
let pipeline_layout = egui_state.pipeline_layout.clone();
let descriptor_set = egui_state.descriptor_set;
move |ctx: &RenderContext| -> crate::Result<()> {
let cmd = &ctx.cmd;
let staging = ctx.get_buffer(draw_staging).unwrap();
let vertices = ctx.get_buffer(vertices).unwrap();
let indices = ctx.get_buffer(indices).unwrap();
let draw_calls = ctx.get_buffer(draw_calls).unwrap();
let target = ctx.get_image(target).unwrap();
cmd.copy_buffers(
staging.buffer(),
vertices.buffer(),
&[vk::BufferCopy {
src_offset: 0,
dst_offset: 0,
size: vertices_size as u64,
}],
);
cmd.copy_buffers(
staging.buffer(),
indices.buffer(),
&[vk::BufferCopy {
src_offset: vertices_size as u64,
dst_offset: 0,
size: indices_size as u64,
}],
);
cmd.copy_buffers(
staging.buffer(),
draw_calls.buffer(),
&[vk::BufferCopy {
src_offset: (vertices_size + indices_size) as u64,
dst_offset: 0,
size: draw_calls_size as u64,
}],
);
let barriers = [
buffer_barrier(
vertices.handle(),
0,
vertices.len(),
Access::transfer_write(),
Access::vertex_read(),
None,
),
buffer_barrier(
indices.handle(),
0,
indices.len(),
Access::transfer_write(),
Access::index_read(),
None,
),
buffer_barrier(
draw_calls.handle(),
0,
draw_calls.len(),
Access::transfer_write(),
Access::indirect_read(),
None,
),
];
unsafe {
ctx.device.dev().cmd_pipeline_barrier2(
cmd.buffer(),
&vk::DependencyInfo::default().buffer_memory_barriers(&barriers),
);
}
let color_attachment = &vk::RenderingAttachmentInfo::default()
.image_layout(vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL)
.image_view(target.get_view(ImageViewDesc {
kind: vk::ImageViewType::TYPE_2D,
format: target.format(),
aspect: vk::ImageAspectFlags::COLOR,
..Default::default()
})?)
.load_op(vk::AttachmentLoadOp::LOAD)
.store_op(vk::AttachmentStoreOp::STORE);
cmd.begin_rendering(
vk::RenderingInfo::default()
.color_attachments(core::slice::from_ref(color_attachment))
.layer_count(1)
.render_area(vk::Rect2D::default().extent(target.extent_2d())),
);
cmd.set_scissors(&[vk::Rect2D::default()
.offset(vk::Offset2D::default())
.extent(target.extent_2d())]);
cmd.set_viewport(&[vk::Viewport::default()
.x(0.0)
.y(0.0)
.min_depth(0.0)
.max_depth(1.0)
.width(target.width() as f32)
.height(target.height() as f32)]);
cmd.bind_pipeline(&pipeline);
cmd.bind_indices(indices.buffer(), 0, vk::IndexType::UINT32);
cmd.bind_vertices(vertices.buffer(), 0);
cmd.push_constants(
&pipeline_layout,
vk::ShaderStageFlags::VERTEX,
0,
bytemuck::cast_slice(
&[target.width() as f32, target.height() as f32].map(|f| f.to_bits()),
),
);
cmd.bind_descriptor_sets(
&pipeline_layout,
vk::PipelineBindPoint::GRAPHICS,
&[descriptor_set],
);
cmd.draw_indexed_indirect(
draw_calls.buffer(),
0,
num_draw_calls as u32,
size_of::<vk::DrawIndexedIndirectCommand>() as u32,
);
cmd.end_rendering();
Ok(())
}
});
rg.add_pass(PassDesc {
reads,
writes,
record,
});
Ok(to_remove_tex_ids)
}

View file

@ -1,415 +1,11 @@
use std::{borrow::Cow, collections::HashMap, sync::Arc};
use std::{borrow::Cow, sync::Arc};
use crate::{
define_device_owned_handle,
device::{DeviceOwned, QueueFlags},
};
use crate::{buffers::Buffer, define_device_owned_handle, device::DeviceOwned};
use super::Device;
use super::{Device, Queue};
use ash::{prelude::*, vk};
use itertools::Itertools;
use parking_lot::Mutex;
use vk_mem::Alloc;
#[derive(Clone)]
pub struct ImageDesc {
pub flags: vk::ImageCreateFlags,
pub name: Option<Cow<'static, str>>,
pub format: vk::Format,
pub kind: vk::ImageType,
pub mip_levels: u32,
pub array_layers: u32,
pub samples: vk::SampleCountFlags,
pub extent: vk::Extent3D,
pub tiling: vk::ImageTiling,
pub usage: vk::ImageUsageFlags,
pub queue_families: QueueFlags,
pub layout: vk::ImageLayout,
pub mem_usage: vk_mem::MemoryUsage,
pub alloc_flags: vk_mem::AllocationCreateFlags,
}
impl std::hash::Hash for ImageDesc {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.flags.hash(state);
self.name.hash(state);
self.format.hash(state);
self.kind.hash(state);
self.mip_levels.hash(state);
self.array_layers.hash(state);
self.samples.hash(state);
self.extent.hash(state);
self.tiling.hash(state);
self.usage.hash(state);
self.queue_families.hash(state);
self.layout.hash(state);
self.mem_usage.hash(state);
self.alloc_flags.bits().hash(state);
}
}
impl Eq for ImageDesc {}
impl PartialEq for ImageDesc {
fn eq(&self, other: &Self) -> bool {
self.flags == other.flags
&& self.name == other.name
&& self.format == other.format
&& self.kind == other.kind
&& self.mip_levels == other.mip_levels
&& self.array_layers == other.array_layers
&& self.samples == other.samples
&& self.extent == other.extent
&& self.tiling == other.tiling
&& self.usage == other.usage
&& self.queue_families == other.queue_families
&& self.layout == other.layout
&& self.mem_usage == other.mem_usage
&& self.alloc_flags.bits() == other.alloc_flags.bits()
}
}
impl<'a> std::fmt::Debug for ImageDesc {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("ImageDesc")
.field("flags", &self.flags)
.field("name", &self.name)
.field("format", &self.format)
.field("kind", &self.kind)
.field("mip_levels", &self.mip_levels)
.field("array_layers", &self.array_layers)
.field("samples", &self.samples)
.field("extent", &self.extent)
.field("tiling", &self.tiling)
.field("usage", &self.usage)
.field("queue_families", &self.queue_families)
.field("layout", &self.layout)
.field("mem_usage", &self.mem_usage)
.field_with("alloc_flags", |f| {
write!(
f,
"{}",
self.alloc_flags
.iter_names()
.map(|(name, _)| name)
.format(" | ")
)
})
.finish()
}
}
impl Default for ImageDesc {
fn default() -> Self {
Self {
flags: Default::default(),
name: Default::default(),
format: Default::default(),
kind: vk::ImageType::TYPE_2D,
samples: vk::SampleCountFlags::TYPE_1,
mip_levels: 1,
array_layers: 1,
extent: Default::default(),
tiling: vk::ImageTiling::OPTIMAL,
usage: Default::default(),
queue_families: QueueFlags::empty(),
layout: vk::ImageLayout::UNDEFINED,
alloc_flags: vk_mem::AllocationCreateFlags::empty(),
mem_usage: vk_mem::MemoryUsage::Auto,
}
}
}
define_device_owned_handle! {
#[derive(Debug)]
pub Image(vk::Image) {
alloc: Option<vk_mem::Allocation>,
size: vk::Extent3D,
format: vk::Format,
views: Mutex<HashMap<ImageViewDesc, vk::ImageView>>,
aliases: Mutex<HashMap<ImageDesc, Arc<Image>>>,
parent: Option<Arc<Image>>,
is_swapchain_image:bool,
} => |this| if !this.is_swapchain_image {
unsafe {
for &view in this.views.lock().values() {
this.inner.dev().dev().destroy_image_view(view, None);
}
let handle = this.handle();
let dev = this.device().clone();
if let Some(alloc) = this.alloc.as_mut() {
// destroy image handle and allocation
dev.alloc().destroy_image(handle, alloc);
} else {
// destroy image handle
dev.dev().destroy_image(handle, None);
}
}
}
}
impl Eq for Image {}
impl PartialEq for Image {
fn eq(&self, other: &Self) -> bool {
self.inner == other.inner
}
}
impl Image {
pub fn new(device: Device, desc: ImageDesc) -> VkResult<Self> {
tracing::trace!("allocate new image with desc={desc:?}");
let ImageDesc {
flags,
name,
format,
kind,
mip_levels,
array_layers,
samples,
extent,
tiling,
usage,
queue_families,
layout,
mem_usage,
alloc_flags,
} = desc;
let queue_families = device.queue_families().family_indices(queue_families);
let sharing_mode = if queue_families.len() > 1 {
vk::SharingMode::CONCURRENT
} else {
vk::SharingMode::EXCLUSIVE
};
let info = &vk::ImageCreateInfo::default()
.flags(flags)
.image_type(kind)
.format(format)
.extent(extent)
.samples(samples)
.initial_layout(layout)
.tiling(tiling)
.usage(usage)
.sharing_mode(sharing_mode)
.queue_family_indices(&queue_families)
.array_layers(array_layers)
.mip_levels(mip_levels);
let alloc_info = &vk_mem::AllocationCreateInfo {
usage: mem_usage,
flags: alloc_flags,
..Default::default()
};
let (handle, alloc) = unsafe { device.alloc().create_image(info, alloc_info)? };
Self::construct(
device,
handle,
name,
Some(alloc),
extent,
format,
Mutex::new(HashMap::new()),
Mutex::new(HashMap::new()),
None, // aliased
false,
)
}
pub unsafe fn from_swapchain_image(
device: Device,
image: vk::Image,
name: Option<Cow<'static, str>>,
extent: vk::Extent3D,
format: vk::Format,
) -> Result<Image, vk::Result> {
Self::construct(
device,
image,
name,
None,
extent,
format,
Mutex::new(HashMap::new()),
Mutex::new(HashMap::new()),
None,
true,
)
}
pub fn format(&self) -> vk::Format {
self.format
}
pub fn image(&self) -> vk::Image {
self.handle()
}
pub fn size(&self) -> vk::Extent3D {
self.size
}
pub fn extent_2d(&self) -> vk::Extent2D {
vk::Extent2D {
width: self.size.width,
height: self.size.height,
}
}
pub fn width(&self) -> u32 {
self.size.width
}
pub fn height(&self) -> u32 {
self.size.height
}
pub fn depth(&self) -> u32 {
self.size.depth
}
fn get_parent(self: &Arc<Self>) -> Arc<Image> {
self.parent.clone().unwrap_or_else(|| self.clone())
}
fn get_alloc(&self) -> Option<&vk_mem::Allocation> {
self.alloc
.as_ref()
.or(self.parent.as_ref().and_then(|image| image.get_alloc()))
}
pub unsafe fn get_alias(self: &Arc<Self>, desc: ImageDesc) -> VkResult<Arc<Self>> {
self.get_parent().get_alias_inner(desc)
}
unsafe fn get_alias_inner(self: Arc<Self>, desc: ImageDesc) -> VkResult<Arc<Image>> {
use std::collections::hash_map::Entry::*;
match self.aliases.lock().entry(desc.clone()) {
Occupied(occupied) => Ok(occupied.get().clone()),
Vacant(vacant) => {
let ImageDesc {
flags,
name,
format,
kind,
mip_levels,
array_layers,
samples,
extent,
tiling,
usage,
queue_families,
layout,
..
} = desc;
let queue_families = self
.device()
.queue_families()
.family_indices(queue_families);
let sharing_mode = if queue_families.len() > 1 {
vk::SharingMode::CONCURRENT
} else {
vk::SharingMode::EXCLUSIVE
};
let info = &vk::ImageCreateInfo::default()
.flags(flags)
.image_type(kind)
.format(format)
.extent(extent)
.samples(samples)
.initial_layout(layout)
.tiling(tiling)
.usage(usage)
.sharing_mode(sharing_mode)
.queue_family_indices(&queue_families)
.array_layers(array_layers)
.mip_levels(mip_levels);
let alloc = self
.get_alloc()
.expect("no alloc associated with image. is this the framebuffer?");
let image = unsafe {
let image = self.device().dev().create_image(info, None)?;
let req = self.device().dev().get_image_memory_requirements(image);
if self.device().alloc().get_allocation_info(alloc).size < req.size {
return Err(vk::Result::ERROR_MEMORY_MAP_FAILED);
}
self.device().alloc().bind_image_memory(alloc, image)?;
image
};
let parent = self.parent.clone().unwrap_or(self.clone());
let alias = Self::construct(
self.device().clone(),
image,
name,
None,
extent,
format,
Mutex::new(HashMap::new()),
Mutex::new(HashMap::new()),
Some(parent.clone()),
parent.is_swapchain_image,
)?;
Ok(vacant.insert(Arc::new(alias)).clone())
}
}
}
/// technically, this ImageView belongs to the image and is managed by it.
pub fn get_view(&self, desc: ImageViewDesc) -> VkResult<vk::ImageView> {
use std::collections::hash_map::Entry::*;
match self.views.lock().entry(desc.hash_eq_copy()) {
Occupied(occupied) => Ok(*occupied.get()),
Vacant(vacant) => {
let view = unsafe {
let create_info = vk::ImageViewCreateInfo::default()
.flags(desc.flags)
.image(self.image())
.view_type(vk::ImageViewType::TYPE_2D)
.format(desc.format)
.components(desc.components)
.subresource_range(
vk::ImageSubresourceRange::default()
.aspect_mask(desc.aspect)
.base_mip_level(desc.mip_range.0)
.level_count(desc.mip_range.count())
.base_array_layer(desc.layer_range.0)
.layer_count(desc.layer_range.count()),
);
self.device().dev().create_image_view(&create_info, None)?
};
Ok(*vacant.insert(view))
}
}
}
pub fn create_view(&self, desc: ImageViewDesc) -> VkResult<ImageView> {
let create_info = vk::ImageViewCreateInfo::default()
.flags(desc.flags)
.image(self.image())
.view_type(vk::ImageViewType::TYPE_2D)
.format(desc.format)
.components(desc.components)
.subresource_range(
vk::ImageSubresourceRange::default()
.aspect_mask(desc.aspect)
.base_mip_level(desc.mip_range.0)
.level_count(desc.mip_range.count())
.base_array_layer(desc.layer_range.0)
.layer_count(desc.layer_range.count()),
);
let view = unsafe { self.device().dev().create_image_view(&create_info, None)? };
ImageView::construct(self.device().clone(), view, desc.name)
}
}
#[derive(Debug, Default, Clone)]
pub struct ImageViewDesc {
pub flags: vk::ImageViewCreateFlags,
@ -422,32 +18,6 @@ pub struct ImageViewDesc {
pub layer_range: MipRange,
}
impl ImageViewDesc {
pub fn hash_eq_copy(&self) -> Self {
let &Self {
flags,
kind,
format,
components,
aspect,
mip_range,
layer_range,
..
} = self;
Self {
flags,
name: None,
kind,
format,
components,
aspect,
mip_range,
layer_range,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub struct MipRange(u32, u32);
@ -521,6 +91,133 @@ impl PartialEq for ImageViewDesc {
}
}
#[derive(Debug)]
pub struct Image2D {
device: Device,
size: vk::Extent2D,
mip_levels: u32,
format: vk::Format,
image: vk::Image,
allocation: vk_mem::Allocation,
name: Option<String>,
}
impl Drop for Image2D {
fn drop(&mut self) {
tracing::debug!("destroying image {:?}", self);
unsafe {
self.device
.alloc()
.destroy_image(self.image, &mut self.allocation);
}
}
}
impl Image2D {
pub fn new_exclusive(
device: &Device,
extent: vk::Extent2D,
mip_levels: u32,
array_layers: u32,
format: vk::Format,
tiling: vk::ImageTiling,
usage: vk::ImageUsageFlags,
memory_usage: vk_mem::MemoryUsage,
alloc_flags: vk_mem::AllocationCreateFlags,
name: Option<&str>,
) -> VkResult<Arc<Self>> {
let create_info = vk::ImageCreateInfo::default()
.array_layers(array_layers)
.mip_levels(mip_levels)
.extent(vk::Extent3D {
width: extent.width,
height: extent.height,
depth: 1,
})
.image_type(vk::ImageType::TYPE_2D)
.format(format)
.tiling(tiling)
.initial_layout(vk::ImageLayout::UNDEFINED)
.usage(usage)
.sharing_mode(vk::SharingMode::EXCLUSIVE)
.samples(vk::SampleCountFlags::TYPE_1);
let alloc_info = vk_mem::AllocationCreateInfo {
usage: memory_usage,
flags: alloc_flags,
..Default::default()
};
let (image, allocation) =
unsafe { device.alloc().create_image(&create_info, &alloc_info)? };
if let Some(name) = name {
let info = device.alloc().get_allocation_info(&allocation);
let name = std::ffi::CString::new(name).unwrap_or(c"invalid name".to_owned());
unsafe {
device.debug_utils().set_debug_utils_object_name(
&vk::DebugUtilsObjectNameInfoEXT::default()
.object_handle(info.device_memory)
.object_name(&name),
)?;
}
}
Ok(Arc::new(Self {
size: extent,
mip_levels,
format,
device: device.clone(),
image,
allocation,
name: name.map(|s| s.to_owned()),
}))
}
pub fn format(&self) -> vk::Format {
self.format
}
pub fn device(&self) -> Device {
self.device.clone()
}
pub fn view(&self, desc: ImageViewDesc) -> VkResult<ImageView> {
let create_info = vk::ImageViewCreateInfo::default()
.flags(desc.flags)
.image(self.image())
.view_type(vk::ImageViewType::TYPE_2D)
.format(desc.format)
.components(desc.components)
.subresource_range(
vk::ImageSubresourceRange::default()
.aspect_mask(desc.aspect)
.base_mip_level(desc.mip_range.0)
.level_count(desc.mip_range.count())
.base_array_layer(desc.layer_range.0)
.layer_count(desc.layer_range.count()),
);
let view = unsafe { self.device.dev().create_image_view(&create_info, None)? };
ImageView::construct(self.device.clone(), view, desc.name)
}
pub fn image(&self) -> vk::Image {
self.image
}
pub fn size(&self) -> vk::Extent2D {
self.size
}
pub fn width(&self) -> u32 {
self.size.width
}
pub fn height(&self) -> u32 {
self.size.height
}
}
define_device_owned_handle! {
#[derive(Debug)]
pub ImageView(vk::ImageView) {} => |this| unsafe {
@ -533,13 +230,40 @@ pub struct QueueOwnership {
pub dst: u32,
}
pub const SUBRESOURCERANGE_ALL: vk::ImageSubresourceRange = vk::ImageSubresourceRange {
aspect_mask: vk::ImageAspectFlags::empty(),
base_mip_level: 0,
level_count: vk::REMAINING_MIP_LEVELS,
base_array_layer: 0,
layer_count: vk::REMAINING_ARRAY_LAYERS,
};
pub fn image_barrier<'a>(
image: vk::Image,
aspects: vk::ImageAspectFlags,
src_stage: vk::PipelineStageFlags2,
src_access: vk::AccessFlags2,
dst_stage: vk::PipelineStageFlags2,
dst_access: vk::AccessFlags2,
old_layout: vk::ImageLayout,
new_layout: vk::ImageLayout,
queue_ownership_op: Option<QueueOwnership>,
) -> vk::ImageMemoryBarrier2<'a> {
let (src_family, dst_family) = queue_ownership_op
.map(|t| (t.src, t.dst))
.unwrap_or((vk::QUEUE_FAMILY_IGNORED, vk::QUEUE_FAMILY_IGNORED));
vk::ImageMemoryBarrier2::default()
.image(image)
.subresource_range(
vk::ImageSubresourceRange::default()
.aspect_mask(aspects)
.base_mip_level(0)
.base_array_layer(0)
.level_count(vk::REMAINING_MIP_LEVELS)
.layer_count(vk::REMAINING_ARRAY_LAYERS),
)
.src_stage_mask(src_stage)
.src_access_mask(src_access)
.dst_stage_mask(dst_stage)
.dst_access_mask(dst_access)
.dst_queue_family_index(dst_family)
.src_queue_family_index(src_family)
.old_layout(old_layout)
.new_layout(new_layout)
}
pub const SUBRESOURCERANGE_COLOR_ALL: vk::ImageSubresourceRange = vk::ImageSubresourceRange {
aspect_mask: vk::ImageAspectFlags::COLOR,

File diff suppressed because it is too large Load diff

View file

@ -45,6 +45,15 @@ pub enum PipelineDesc<'a> {
Graphics(GraphicsPipelineDesc<'a>),
}
impl PipelineDesc<'_> {
fn name(self) -> Option<Cow<'static, str>> {
match self {
PipelineDesc::Compute(desc) => desc.name,
PipelineDesc::Graphics(desc) => desc.name,
}
}
}
#[derive(Debug)]
pub struct ComputePipelineDesc<'a> {
pub flags: vk::PipelineCreateFlags,
@ -252,17 +261,10 @@ impl DescriptorPool {
.set_layouts(&layouts);
let sets = unsafe { self.device().dev().allocate_descriptor_sets(&info)? };
for (&set, desc) in sets.iter().zip(descs) {
if let Some(name) = desc.name.as_ref() {
self.device().debug_name_object(set, &name)?;
}
}
Ok(sets)
}
// pub fn free(&self) {}
#[allow(dead_code)]
pub fn reset(&self) -> VkResult<()> {
unsafe {
self.device()
@ -509,13 +511,7 @@ impl Pipeline {
name = desc.name;
bind_point = vk::PipelineBindPoint::COMPUTE;
let info = &vk::ComputePipelineCreateInfo::default()
.flags(desc.flags)
.layout(desc.layout.handle())
.base_pipeline_handle(
desc.base_pipeline
.map(|p| p.handle())
.unwrap_or(vk::Pipeline::null()),
)
.stage(desc.shader_stage.into_create_info());
unsafe {
@ -585,12 +581,11 @@ impl Pipeline {
});
let multisample = desc.multisample.map(|state| {
let info = vk::PipelineMultisampleStateCreateInfo::default()
let mut info = vk::PipelineMultisampleStateCreateInfo::default()
.flags(state.flags)
.min_sample_shading(state.min_sample_shading)
.rasterization_samples(state.rasterization_samples)
.sample_mask(state.sample_mask)
.sample_shading_enable(state.sample_shading_enable)
.alpha_to_coverage_enable(state.alpha_to_coverage_enable)
.alpha_to_one_enable(state.alpha_to_one_enable);
@ -598,7 +593,7 @@ impl Pipeline {
});
let color_blend = desc.color_blend.map(|state| {
let info = vk::PipelineColorBlendStateCreateInfo::default()
let mut info = vk::PipelineColorBlendStateCreateInfo::default()
.flags(state.flags)
.attachments(state.attachments)
.blend_constants(state.blend_constants)
@ -636,7 +631,7 @@ impl Pipeline {
});
let dynamic = desc.dynamic.map(|state| {
let info = vk::PipelineDynamicStateCreateInfo::default()
let mut info = vk::PipelineDynamicStateCreateInfo::default()
.flags(state.flags)
.dynamic_states(state.dynamic_states);
@ -644,7 +639,7 @@ impl Pipeline {
});
let mut rendering = desc.rendering.map(|state| {
let info = vk::PipelineRenderingCreateInfo::default()
let mut info = vk::PipelineRenderingCreateInfo::default()
.color_attachment_formats(state.color_formats)
.depth_attachment_format(state.depth_format.unwrap_or_default())
.stencil_attachment_format(state.stencil_format.unwrap_or_default());

View file

@ -1,864 +1,55 @@
#![allow(dead_code)]
use std::hash::Hash;
use std::{
collections::{BTreeMap, BTreeSet},
fmt::Debug,
sync::Arc,
};
use crate::{
buffers::{Buffer, BufferDesc},
commands, def_monotonic_id,
device::{self, DeviceOwned},
images::{self, Image, ImageDesc},
sync,
util::{self, Rgba, WithLifetime},
SwapchainFrame,
};
use crate::util::hash_f32;
use ash::vk;
use itertools::Itertools;
use petgraph::{
graph::NodeIndex,
visit::{EdgeRef, IntoNodeReferences, NodeRef},
};
def_monotonic_id!(pub GraphResourceId);
#[derive(Debug, Clone)]
pub enum GraphResourceDesc {
Image(ImageDesc),
Buffer(BufferDesc),
}
#[derive(Debug, PartialEq, Eq)]
pub enum GraphResource {
Framebuffer(Arc<SwapchainFrame>),
ImportedImage(Arc<Image>),
ImportedBuffer(Arc<Buffer>),
Image(Arc<Image>),
Buffer(Buffer),
}
#[derive(Debug, Clone, Copy)]
pub enum LoadOp {
pub struct Rgba(pub [f32; 4]);
impl std::hash::Hash for Rgba {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.0.map(|f| hash_f32(state, f));
}
}
impl Rgba {
pub fn new(r: f32, g: f32, b: f32, a: f32) -> Self {
Self([r, g, b, a])
}
pub fn into_u32(&self) -> [u32; 4] {
self.0.map(|f| (f.clamp(0.0, 1.0) * 255.0) as u32)
}
pub fn into_f32(&self) -> [f32; 4] {
self.0
}
pub fn into_snorm(&self) -> [f32; 4] {
self.0.map(|f| (f - 0.5) * 2.0)
}
pub fn into_i32(&self) -> [i32; 4] {
self.0.map(|f| (f.clamp(0.0, 1.0) * 255.0) as i32 - 128)
}
}
enum LoadOp {
Clear(Rgba),
Load,
DontCare,
}
#[derive(Debug, Clone, Copy)]
pub enum StoreOp {
enum StoreOp {
DontCare,
Store,
}
pub struct RenderContext<'a> {
pub device: device::Device,
pub cmd: commands::SingleUseCommand,
pub resources: &'a BTreeMap<GraphResourceId, GraphResource>,
}
impl RenderContext<'_> {
pub fn get_image(&self, id: GraphResourceId) -> Option<&Arc<Image>> {
self.resources.get(&id).and_then(|res| match res {
GraphResource::ImportedImage(arc) => Some(arc),
GraphResource::Image(image) => Some(image),
GraphResource::Framebuffer(fb) => Some(&fb.image),
_ => None,
})
}
pub fn get_buffer(&self, id: GraphResourceId) -> Option<&Buffer> {
self.resources.get(&id).and_then(|res| match res {
GraphResource::ImportedBuffer(arc) => Some(arc.as_ref()),
GraphResource::Buffer(buffer) => Some(buffer),
_ => None,
})
}
}
#[derive(Debug, Clone, Copy, Default)]
pub struct Access {
pub stage: vk::PipelineStageFlags2,
pub mask: vk::AccessFlags2,
pub layout: Option<vk::ImageLayout>,
}
impl core::ops::BitOr for Access {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
assert_eq!(self.layout, rhs.layout);
Self {
stage: self.stage | rhs.stage,
mask: self.mask | rhs.mask,
layout: self.layout,
}
}
}
impl Access {
pub fn undefined() -> Self {
Self {
stage: vk::PipelineStageFlags2::NONE,
mask: vk::AccessFlags2::empty(),
layout: Some(vk::ImageLayout::UNDEFINED),
}
}
pub fn general() -> Self {
Self {
stage: vk::PipelineStageFlags2::NONE,
mask: vk::AccessFlags2::empty(),
layout: Some(vk::ImageLayout::GENERAL),
}
}
pub fn transfer_read() -> Self {
Self {
stage: vk::PipelineStageFlags2::TRANSFER,
mask: vk::AccessFlags2::TRANSFER_READ,
layout: Some(vk::ImageLayout::TRANSFER_SRC_OPTIMAL),
}
}
pub fn transfer_write() -> Self {
Self {
stage: vk::PipelineStageFlags2::TRANSFER,
mask: vk::AccessFlags2::TRANSFER_WRITE,
layout: Some(vk::ImageLayout::TRANSFER_DST_OPTIMAL),
}
}
pub fn vertex_read() -> Self {
Self {
stage: vk::PipelineStageFlags2::VERTEX_ATTRIBUTE_INPUT,
mask: vk::AccessFlags2::VERTEX_ATTRIBUTE_READ,
layout: None,
}
}
pub fn index_read() -> Self {
Self {
stage: vk::PipelineStageFlags2::INDEX_INPUT,
mask: vk::AccessFlags2::INDEX_READ,
layout: None,
}
}
pub fn indirect_read() -> Self {
Self {
stage: vk::PipelineStageFlags2::DRAW_INDIRECT,
mask: vk::AccessFlags2::INDIRECT_COMMAND_READ,
layout: None,
}
}
pub fn color_attachment_read_only() -> Self {
Self {
stage: vk::PipelineStageFlags2::COLOR_ATTACHMENT_OUTPUT,
mask: vk::AccessFlags2::COLOR_ATTACHMENT_READ,
layout: Some(vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL),
}
}
pub fn color_attachment_write_only() -> Self {
Self {
stage: vk::PipelineStageFlags2::COLOR_ATTACHMENT_OUTPUT,
mask: vk::AccessFlags2::COLOR_ATTACHMENT_WRITE,
layout: Some(vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL),
}
}
pub fn color_attachment_read_write() -> Self {
Self {
stage: vk::PipelineStageFlags2::COLOR_ATTACHMENT_OUTPUT,
mask: vk::AccessFlags2::COLOR_ATTACHMENT_WRITE
| vk::AccessFlags2::COLOR_ATTACHMENT_READ,
layout: Some(vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL),
}
}
pub fn present() -> Self {
Self {
stage: vk::PipelineStageFlags2::NONE,
mask: vk::AccessFlags2::empty(),
layout: Some(vk::ImageLayout::PRESENT_SRC_KHR),
}
}
}
pub type RecordFn = dyn FnOnce(&RenderContext) -> crate::Result<()> + Send;
pub struct PassDesc {
// this pass performs `Access` read on `GraphResourceId`.
// some `GraphResourceId` may occur multiple times.
pub reads: Vec<(GraphResourceId, Access)>,
// this pass performs `Access` write on `GraphResourceId`.
// some `GraphResourceId` may occur multiple times.
pub writes: Vec<(GraphResourceId, Access)>,
pub record: Box<RecordFn>,
}
impl Default for PassDesc {
fn default() -> Self {
Self {
reads: Default::default(),
writes: Default::default(),
record: Box::new(|_| Ok(())),
}
}
}
impl Debug for PassDesc {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("PassDesc")
.field("reads", &self.reads)
.field("write", &self.writes)
.finish_non_exhaustive()
}
}
def_monotonic_id!(pub RenderGraphPassId);
// Non-imported resources remain `RenderGraphResourceDesc`s because they may be
// able to be aliased.
// This should be dual to liveness/register allocation in a compiler.
// Dummy-impl is just allocating every resource_desc itself. 5head-impl is trying
// to find resource_descs which are eq, but whose liveness doesn't overlap.
#[derive(Debug)]
pub struct RenderGraph {
resource_descs: BTreeMap<GraphResourceId, GraphResourceDesc>,
resources: BTreeMap<GraphResourceId, GraphResource>,
accesses: BTreeMap<GraphResourceId, Access>,
pass_descs: Vec<PassDesc>,
/// the rendergraph produces these resources. Any passes on which these
/// outputs do not depend are pruned.
outputs: Vec<GraphResourceId>,
}
impl RenderGraph {
pub fn new() -> Self {
Self {
resource_descs: BTreeMap::new(),
resources: BTreeMap::new(),
pass_descs: Vec::new(),
accesses: BTreeMap::new(),
outputs: Vec::new(),
}
}
pub fn add_resource(&mut self, desc: GraphResourceDesc) -> GraphResourceId {
let id = GraphResourceId::new();
self.resource_descs.insert(id, desc);
self.accesses.insert(id, Access::undefined());
id
}
pub fn mark_as_output(&mut self, id: GraphResourceId) {
// TODO: dedup
self.outputs.push(id);
}
pub fn import_resource(&mut self, res: GraphResource, access: Access) -> GraphResourceId {
if let Some((&id, _)) = self
.resources
.iter()
.find(|(_, resident)| &&res == resident)
{
id
} else {
let id = GraphResourceId::new();
self.resources.insert(id, res);
self.accesses.insert(id, access);
id
}
}
pub fn import_image(&mut self, image: Arc<Image>, access: Access) -> GraphResourceId {
let res = GraphResource::ImportedImage(image);
self.import_resource(res, access)
}
pub fn import_buffer(&mut self, buffer: Arc<Buffer>, access: Access) -> GraphResourceId {
let res = GraphResource::ImportedBuffer(buffer);
self.import_resource(res, access)
}
pub fn import_framebuffer(&mut self, frame: Arc<SwapchainFrame>) -> GraphResourceId {
let id = GraphResourceId::new();
self.resources.insert(id, GraphResource::Framebuffer(frame));
self.mark_as_output(id);
id
}
pub fn add_pass(&mut self, pass: PassDesc) {
self.pass_descs.push(pass);
}
// https://blog.traverseresearch.nl/render-graph-101-f42646255636
// https://github.com/EmbarkStudios/kajiya/blob/main/crates/lib/kajiya-rg/src/graph.rs
// https://themaister.net/blog/2017/08/15/render-graphs-and-vulkan-a-deep-dive/
pub fn resolve(
&mut self,
device: device::Device,
) -> crate::Result<WithLifetime<'_, commands::CommandList<commands::SingleUseCommand>>> {
// create internal resources:
for (&id, desc) in self.resource_descs.iter() {
tracing::trace!("creating resource {id:?} with {desc:?}");
match desc.clone() {
GraphResourceDesc::Image(image_desc) => {
self.resources.insert(
id,
GraphResource::Image(Arc::new(Image::new(device.clone(), image_desc)?)),
);
}
GraphResourceDesc::Buffer(buffer_desc) => {
self.resources.insert(
id,
GraphResource::Buffer(Buffer::new(device.clone(), buffer_desc)?),
);
}
}
}
let mut dag = petgraph::stable_graph::StableDiGraph::new();
#[derive(Debug, Clone, Copy)]
enum PassNode {
First,
Pass(usize),
Last,
}
let root = dag.add_node(PassNode::First);
let mut last_write = self
.resources
.keys()
.filter_map(|id| self.accesses.get(id).map(|access| (*id, (root, *access))))
.collect::<BTreeMap<_, _>>();
// insert edges between write->read edges of 2 passes
for (i, pass) in self.pass_descs.iter().enumerate() {
let node = dag.add_node(PassNode::Pass(i));
let mut read_accesses = BTreeMap::new();
for (rid, access) in &pass.reads {
read_accesses
.entry(*rid)
.and_modify(|a| {
// a single pass must not read one image twice with different layouts
*a = *a | *access;
})
.or_insert(*access);
}
for (rid, after) in read_accesses {
if let Some(&(other, before)) = last_write.get(&rid) {
tracing::trace!("adding edge between {other:?} and {node:?} for {rid:?} with ({before:?} -> {after:?})");
dag.add_edge(other, node, (rid, (before, after)));
}
}
let mut write_accesses = BTreeMap::new();
for (rid, access) in &pass.writes {
write_accesses
.entry(*rid)
.and_modify(|a| {
// a single pass must not write one image twice with different layouts
*a = *a | *access;
})
.or_insert(*access);
}
for (rid, after) in write_accesses {
last_write.insert(rid, (node, after));
}
}
// pseudo pass for tracking outputs
let output = dag.add_node(PassNode::Last);
for (id, (node, access)) in self
.outputs
.iter()
.filter_map(|&id| last_write.get(&id).cloned().map(|node| (id, node)))
{
dag.add_edge(
node,
output,
(
id,
(
access,
// make output writes available
Access {
stage: vk::PipelineStageFlags2::NONE,
mask: vk::AccessFlags2::empty(),
..access
},
),
),
);
}
// prune dead nodes
loop {
let sinks = dag
.externals(petgraph::Direction::Outgoing)
.filter(|idx| idx != &output)
.collect::<Vec<_>>();
if sinks.is_empty() {
break;
}
for sink in sinks {
dag.remove_node(sink);
}
}
// handle layout additional transitions
let edges = dag
.node_references()
.map(|(source, _)| {
let mut per_resourcelayout_multimap: BTreeMap<
(GraphResourceId, Option<vk::ImageLayout>),
Vec<(Access, NodeIndex)>,
> = BTreeMap::new();
let mut resources = BTreeSet::new();
dag.edges_directed(source, petgraph::Direction::Outgoing)
.for_each(|edge| {
let (rid, (_, after)) = edge.weight();
let target = edge.target();
let key = (*rid, after.layout);
let item = (*after, target);
resources.insert(*rid);
per_resourcelayout_multimap
.entry(key)
.and_modify(|list| list.push(item))
.or_insert(vec![item]);
});
let mut edges = vec![];
for resource in resources {
for (a, b) in per_resourcelayout_multimap
.range(
(resource, None)
..=(resource, Some(vk::ImageLayout::from_raw(i32::MAX))),
)
.tuple_windows()
{
let a = a.1;
let b = b.1;
// create new edge between all members of (a) and (b).
// topological mapping will fold all transitions into one.
for i in 0..a.len().max(b.len()) {
let from = a.get(i).unwrap_or(a.last().unwrap());
let to = b.get(i).unwrap_or(b.last().unwrap());
let edge = ((from.1, to.1), (resource, (from.0, to.0)));
edges.push(edge);
}
}
}
edges
})
.flatten()
.collect::<Vec<_>>();
for ((from, to), edge) in edges {
tracing::trace!(
"adding additional edge between {from:?} and {to:?} for {:?} with ({:?} -> {:?})",
edge.0,
edge.1 .0,
edge.1 .1
);
dag.add_edge(from, to, edge);
}
// #[cfg(any(debug_assertions, test))]
// std::fs::write(
// "render_graph.dot",
// &format!(
// "{:?}",
// petgraph::dot::Dot::with_attr_getters(
// &dag,
// &[],
// &|_graph, edgeref| { format!("label = \"{:?}\"", edgeref.weight()) },
// &|_graph, noderef| { format!("label = \"Pass({:?})\"", noderef.weight()) }
// )
// ),
// )
// .expect("writing render_graph repr");
let mut topological_map = Vec::new();
let mut top_dag = dag.clone();
// create topological map of DAG from sink to source
loop {
let (sinks, passes): (Vec<_>, Vec<_>) = top_dag
.externals(petgraph::Direction::Outgoing)
.filter(|&id| id != root)
.filter_map(|id| top_dag.node_weight(id).cloned().map(|idx| (id, idx)))
.unzip();
if sinks.is_empty() {
break;
}
let mut barriers = BTreeMap::new();
for &sink in &sinks {
top_dag
.edges_directed(sink, petgraph::Direction::Incoming)
.for_each(|edge| {
let (rid, (before, after)) = edge.weight();
barriers
.entry(*rid)
.and_modify(|(from, to)| {
*from = *from | *before;
*to = *to | *after;
})
.or_insert((*before, *after));
});
top_dag.remove_node(sink);
}
topological_map.push((passes, barriers));
}
// I don't think this can currently happen with the way passes are added.
top_dag.remove_node(root);
if top_dag.node_count() > 0 {
eprintln!("dag: {top_dag:?}");
panic!("dag is cyclic!");
}
let pool =
commands::SingleUseCommandPool::new(device.clone(), device.graphics_queue().clone())?;
let resources = &self.resources;
let cmds = topological_map
.iter()
.rev()
.map(|(set, accesses)| {
let pool = pool.clone();
let device = device.clone();
let passes = set
.into_iter()
.filter_map(|pass| {
if let &PassNode::Pass(i) = pass {
Some(i)
} else {
None
}
})
.map(|i| core::mem::take(&mut self.pass_descs[i]))
.collect::<Vec<_>>();
let cmd = pool.alloc()?;
// transitions
for (&id, &(from, to)) in accesses.iter() {
Self::transition_resource(
resources.get(&id).unwrap(),
device.dev(),
unsafe { &cmd.buffer() },
from,
to,
);
}
let ctx = RenderContext {
device,
cmd,
resources,
};
for pass in passes {
(pass.record)(&ctx)?;
}
ctx.cmd.end()?;
crate::Result::Ok(ctx.cmd)
})
.collect::<crate::Result<Vec<_>>>()?;
let cmd_list = commands::CommandList(cmds);
// let future = cmd_list.submit(None, None, Arc::new(sync::Fence::create(device.clone())?))?;
// future.block()?;
// let outputs = self
// .outputs
// .iter()
// .filter_map(|id| self.resources.remove(id).map(|res| (*id, res)))
// .collect::<BTreeMap<_, _>>();
Ok(WithLifetime::new(cmd_list))
}
pub fn get_outputs(&mut self) -> BTreeMap<GraphResourceId, GraphResource> {
let outputs = self
.outputs
.iter()
.filter_map(|id| self.resources.remove(id).map(|res| (*id, res)))
.collect::<BTreeMap<_, _>>();
outputs
}
pub fn transition_resource(
res: &GraphResource,
dev: &ash::Device,
cmd: &vk::CommandBuffer,
from: Access,
to: Access,
) {
let barrier: Barrier = match res {
GraphResource::Framebuffer(arc) => {
image_barrier(arc.image.handle(), arc.image.format(), from, to, None).into()
}
GraphResource::ImportedImage(arc) => {
image_barrier(arc.handle(), arc.format(), from, to, None).into()
}
GraphResource::ImportedBuffer(arc) => {
buffer_barrier(arc.handle(), 0, arc.len(), from, to, None).into()
}
GraphResource::Image(image) => {
image_barrier(image.handle(), image.format(), from, to, None).into()
}
GraphResource::Buffer(buffer) => {
buffer_barrier(buffer.handle(), 0, buffer.len(), from, to, None).into()
}
};
unsafe {
dev.cmd_pipeline_barrier2(*cmd, &((&barrier).into()));
}
}
fn transition_resource_to(
accesses: &mut BTreeMap<GraphResourceId, Access>,
resources: &BTreeMap<GraphResourceId, GraphResource>,
dev: &ash::Device,
cmd: &vk::CommandBuffer,
id: GraphResourceId,
to: Access,
) {
let old_access = accesses.get(&id);
let res = resources.get(&id);
if let (Some(&old_access), Some(res)) = (old_access, res) {
Self::transition_resource(res, dev, cmd, old_access, to);
accesses.insert(id, to);
}
}
}
pub enum Barrier {
Image(vk::ImageMemoryBarrier2<'static>),
Buffer(vk::BufferMemoryBarrier2<'static>),
}
impl<'a> From<&'a Barrier> for vk::DependencyInfo<'a> {
fn from(value: &'a Barrier) -> Self {
let info = vk::DependencyInfo::default();
let info = match value {
Barrier::Image(barrier) => info.image_memory_barriers(core::slice::from_ref(barrier)),
Barrier::Buffer(barrier) => info.buffer_memory_barriers(core::slice::from_ref(barrier)),
};
info
}
}
impl From<vk::ImageMemoryBarrier2<'static>> for Barrier {
fn from(value: vk::ImageMemoryBarrier2<'static>) -> Self {
Self::Image(value)
}
}
impl From<vk::BufferMemoryBarrier2<'static>> for Barrier {
fn from(value: vk::BufferMemoryBarrier2<'static>) -> Self {
Self::Buffer(value)
}
}
pub fn buffer_barrier(
buffer: vk::Buffer,
offset: u64,
size: u64,
before: Access,
after: Access,
queue_families: Option<(u32, u32)>,
) -> vk::BufferMemoryBarrier2<'static> {
vk::BufferMemoryBarrier2::default()
.buffer(buffer)
.offset(offset)
.size(size)
.src_access_mask(before.mask)
.src_stage_mask(before.stage)
.dst_access_mask(after.mask)
.dst_stage_mask(after.stage)
.src_queue_family_index(
queue_families
.map(|(src, _)| src)
.unwrap_or(vk::QUEUE_FAMILY_IGNORED),
)
.dst_queue_family_index(
queue_families
.map(|(_, dst)| dst)
.unwrap_or(vk::QUEUE_FAMILY_IGNORED),
)
}
pub fn image_barrier(
image: vk::Image,
struct AttachmentInfo {
size: glam::UVec2,
format: vk::Format,
before_access: Access,
after_access: Access,
queue_families: Option<(u32, u32)>,
) -> vk::ImageMemoryBarrier2<'static> {
vk::ImageMemoryBarrier2::default()
.src_access_mask(before_access.mask)
.src_stage_mask(before_access.stage)
.dst_access_mask(after_access.mask)
.dst_stage_mask(after_access.stage)
.image(image)
.old_layout(before_access.layout.unwrap_or_default())
.new_layout(after_access.layout.unwrap_or_default())
.subresource_range(vk::ImageSubresourceRange {
aspect_mask: util::image_aspect_from_format(format),
..images::SUBRESOURCERANGE_ALL
})
.src_queue_family_index(
queue_families
.map(|(src, _)| src)
.unwrap_or(vk::QUEUE_FAMILY_IGNORED),
)
.dst_queue_family_index(
queue_families
.map(|(_, dst)| dst)
.unwrap_or(vk::QUEUE_FAMILY_IGNORED),
)
load: LoadOp,
store: StoreOp,
}
// #[cfg(test)]
// mod tests {
// use super::*;
// macro_rules! def_dummy_pass {
// ($name:ident: {$queue:path, $layout_in:path, $layout_out:path}) => {
// #[derive(Debug, Clone)]
// struct $name(Vec<RenderGraphResourceId>, Vec<RenderGraphResourceId>);
// impl Pass for $name {
// fn get_read_resource_access(&self, _id: RenderGraphResourceId) -> ResourceAccess {
// ResourceAccess::default()
// }
// fn get_write_resource_access(&self, _id: RenderGraphResourceId) -> ResourceAccess {
// ResourceAccess::default()
// }
// fn get_queue_capability_requirements(&self) -> device::QueueFlags {
// $queue
// }
// fn get_read_dependencies<'a>(
// &'a self,
// ) -> Box<dyn Iterator<Item = RenderGraphResourceId> + 'a> {
// Box::new(self.0.iter().cloned())
// }
// fn get_write_dependencies<'a>(
// &'a self,
// ) -> Box<dyn Iterator<Item = RenderGraphResourceId> + 'a> {
// Box::new(self.1.iter().cloned())
// }
// fn record(&self, _ctx: &RenderContext) -> crate::Result<()> {
// Ok(())
// }
// }
// };
// }
// def_dummy_pass!(DepthPass: {
// device::QueueFlags::GRAPHICS,
// vk::ImageLayout::DEPTH_ATTACHMENT_OPTIMAL,
// vk::ImageLayout::DEPTH_ATTACHMENT_OPTIMAL});
// def_dummy_pass!(RenderPass: {
// device::QueueFlags::GRAPHICS,
// vk::ImageLayout::COLORiATTACHMENT_OPTIMAL,
// vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL});
// def_dummy_pass!(AsyncPass: {
// device::QueueFlags::ASYNC_COMPUTE,
// vk::ImageLayout::UNDEFINED,
// vk::ImageLayout::GENERAL});
// def_dummy_pass!(PostProcessPass: {
// device::QueueFlags::ASYNC_COMPUTE,
// vk::ImageLayout::GENERAL,
// vk::ImageLayout::GENERAL});
// def_dummy_pass!(PresentPass: {
// device::QueueFlags::PRESENT,
// vk::ImageLayout::PRESENT_SRC_KHR,
// vk::ImageLayout::UNDEFINED});
// def_dummy_pass!(DepthVisualisationPass: {
// device::QueueFlags::ASYNC_COMPUTE,
// vk::ImageLayout::GENERAL,
// vk::ImageLayout::UNDEFINED});
// #[test]
// fn resolve_graph() {
// let mut graph = RenderGraph::new();
// let gbuffer = graph.add_resource(RenderGraphResourceDesc::Image(ImageDesc {
// ..Default::default()
// }));
// let depth_image = graph.add_resource(RenderGraphResourceDesc::Image(ImageDesc {
// ..Default::default()
// }));
// let depth_visualisation = graph.add_resource(RenderGraphResourceDesc::Image(ImageDesc {
// ..Default::default()
// }));
// let compute_buffer = graph.add_resource(RenderGraphResourceDesc::Buffer(BufferDesc {
// ..Default::default()
// }));
// graph.add_pass(DepthPass(vec![depth_image], vec![depth_image]));
// graph.add_pass(DepthVisualisationPass(
// vec![depth_image, depth_visualisation],
// vec![depth_visualisation],
// ));
// graph.add_pass(AsyncPass(vec![compute_buffer], vec![compute_buffer]));
// graph.add_pass(RenderPass(
// vec![depth_image, compute_buffer, gbuffer],
// vec![gbuffer],
// ));
// graph.add_pass(PostProcessPass(vec![gbuffer], vec![gbuffer]));
// graph.mark_as_output(gbuffer);
// graph.mark_as_output(depth_image);
// // graph.resolve();
// }
// }
pub fn clear_pass(rg: &mut RenderGraph, color: Rgba, target: GraphResourceId) {
let reads = [(target, Access::transfer_write())].to_vec();
let writes = [(target, Access::transfer_write())].to_vec();
let record: Box<RecordFn> = Box::new({
move |ctx| {
let target = ctx.get_image(target).unwrap();
let cmd = &ctx.cmd;
cmd.clear_color_image(
target.handle(),
target.format(),
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
color,
&[images::SUBRESOURCERANGE_COLOR_ALL],
);
Ok(())
}
});
rg.add_pass(PassDesc {
reads,
writes,
record,
});
struct Texture {
texture: vk::Image,
}
pub fn present_pass(rg: &mut RenderGraph, target: GraphResourceId) {
let record: Box<RecordFn> = Box::new(|_| Ok(()));
let reads = vec![(target, Access::present())];
let writes = vec![(target, Access::present())];
rg.add_pass(PassDesc {
reads,
writes,
record,
});
}
pub struct RenderGraph {}

View file

@ -24,7 +24,7 @@ enum SyncPrimitive {
Fence(Arc<Fence>),
// actually, I think this is an awful idea because I would have to hold a
// lock on all queues.
// DeviceIdle(Device),
DeviceIdle(Device),
}
impl SyncThreadpool {
@ -64,15 +64,16 @@ impl SyncThreadpool {
impl SyncThread {
fn run(self, barrier: Arc<std::sync::Barrier>) {
tracing::trace!("spawned new sync thread");
tracing::info!("spawned new sync thread");
barrier.wait();
while let Ok((sync, waker)) = self.rx.recv_timeout(self.thread_dies_after) {
tracing::trace!("received ({:?}, {:?})", sync, waker);
tracing::info!("received ({:?}, {:?})", sync, waker);
loop {
let wait_result = match &sync {
SyncPrimitive::Fence(fence) => {
fence.wait_on(Some(self.timeout))
} // SyncPrimitive::DeviceIdle(device) => device.wait_idle(),
}
SyncPrimitive::DeviceIdle(device) => device.wait_idle(),
};
match wait_result {
@ -104,15 +105,14 @@ impl SyncThreadpool {
};
let barrier = Arc::new(std::sync::Barrier::new(2));
let _ = std::thread::Builder::new()
std::thread::Builder::new()
.name(format!("fence-waiter-{tid}"))
.spawn({
let barrier = barrier.clone();
move || {
thread.run(barrier);
}
})
.expect("sync-threadpool waiter thread failed to spawn.");
});
barrier.wait();
Some(())
}
@ -127,6 +127,8 @@ impl SyncThreadpool {
}
fn spawn_waiter(&self, fence: Arc<Fence>, waker: std::task::Waker) {
use std::sync::atomic::Ordering;
let mut msg = (SyncPrimitive::Fence(fence), waker);
while let Err(err) = self.channel.0.try_send(msg) {
match err {
@ -135,7 +137,7 @@ impl SyncThreadpool {
self.try_spawn_thread();
}
crossbeam::channel::TrySendError::Disconnected(_) => {
tracing::error!("sync-threadpool channel disconnected?");
//tracing::error!("sync-threadpool channel disconnected?");
unreachable!()
}
}
@ -180,7 +182,6 @@ impl Fence {
))
}
}
#[allow(dead_code)]
pub fn create_signaled(dev: Device) -> VkResult<Fence> {
unsafe {
Ok(Self::new(
@ -220,7 +221,6 @@ impl AsRef<vk::Fence> for Fence {
}
}
#[allow(dead_code)]
impl Semaphore {
pub fn new(device: Device) -> VkResult<Self> {
let mut type_info =
@ -260,7 +260,6 @@ pub struct FenceFuture<'a> {
impl FenceFuture<'_> {
/// Unsafe because `fence` must not be destroyed while this future is live.
#[allow(dead_code)]
pub unsafe fn from_fence(device: Device, fence: vk::Fence) -> Self {
Self {
fence: Arc::new(Fence::new(device, fence)),
@ -287,7 +286,7 @@ impl Future for FenceFuture<'_> {
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Self::Output> {
if self.fence.is_signaled() {
tracing::trace!("fence ({:?}) is signaled", self.fence);
tracing::info!("fence ({:?}) is signaled", self.fence);
_ = self.fence.reset();
std::task::Poll::Ready(())
} else {

View file

@ -1,12 +1,12 @@
use std::ops::{Deref, DerefMut};
use std::{borrow::Cow, ops::Deref};
use ash::vk;
#[macro_export]
macro_rules! def_monotonic_id {
($vis:vis $ty:ident) => {
($ty:ident) => {
#[derive(Copy, Clone, Hash, Eq, PartialEq, PartialOrd, Ord, Debug)]
$vis struct $ty(::core::num::NonZero<u32>);
pub struct $ty(::core::num::NonZero<u32>);
impl $ty {
pub fn new() -> Self {
@ -43,7 +43,6 @@ impl<'a, T: 'a> MutexExt<'a, T> for parking_lot::Mutex<T> {
}
}
#[allow(dead_code)]
pub trait FormatExt {
fn get_component_kind(&self) -> FormatComponentKind;
fn is_f32(&self) -> bool;
@ -281,6 +280,7 @@ impl Rect2D {
}
}
pub fn new_from_size(pos: glam::IVec2, size: glam::IVec2) -> Self {
use glam::ivec2;
Self {
top_left: pos,
bottom_right: pos + size,
@ -301,23 +301,6 @@ impl Rect2D {
},
]
}
#[allow(dead_code)]
pub fn top_left(&self) -> glam::IVec2 {
self.top_left
}
#[allow(dead_code)]
pub fn size(&self) -> glam::IVec2 {
self.bottom_right - self.top_left
}
#[allow(dead_code)]
pub fn width(&self) -> i32 {
self.bottom_right.x - self.top_left.x
}
#[allow(dead_code)]
pub fn height(&self) -> i32 {
self.bottom_right.y - self.top_left.y
}
}
pub fn eq_f32(lhs: f32, rhs: f32) -> bool {
@ -340,77 +323,3 @@ pub fn hash_f32<H: std::hash::Hasher>(state: &mut H, f: f32) {
std::num::FpCategory::Subnormal | std::num::FpCategory::Normal => f.to_bits().hash(state),
}
}
pub fn timed<T, F: FnOnce() -> T>(label: &str, f: F) -> T {
let now = std::time::Instant::now();
let out = f();
tracing::info!("{label}: {}ms", now.elapsed().as_micros() as f32 / 1e3);
out
}
#[derive(Debug, Clone, Copy)]
pub struct Rgba(pub [f32; 4]);
impl std::hash::Hash for Rgba {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.0.map(|f| hash_f32(state, f));
}
}
#[allow(dead_code)]
impl Rgba {
pub fn new(r: f32, g: f32, b: f32, a: f32) -> Self {
Self([r, g, b, a])
}
pub fn into_u32(&self) -> [u32; 4] {
self.0.map(|f| (f.clamp(0.0, 1.0) * 255.0) as u32)
}
pub fn into_f32(&self) -> [f32; 4] {
self.0
}
pub fn into_snorm(&self) -> [f32; 4] {
self.0.map(|f| (f - 0.5) * 2.0)
}
pub fn into_i32(&self) -> [i32; 4] {
self.0.map(|f| (f.clamp(0.0, 1.0) * 255.0) as i32 - 128)
}
}
#[allow(dead_code)]
pub fn image_aspect_from_format(format: vk::Format) -> vk::ImageAspectFlags {
use vk::{Format, ImageAspectFlags};
match format {
Format::D32_SFLOAT | Format::X8_D24_UNORM_PACK32 | Format::D16_UNORM => {
ImageAspectFlags::DEPTH
}
Format::S8_UINT => ImageAspectFlags::STENCIL,
Format::D32_SFLOAT_S8_UINT | Format::D16_UNORM_S8_UINT | Format::D24_UNORM_S8_UINT => {
ImageAspectFlags::DEPTH | ImageAspectFlags::STENCIL
}
_ => ImageAspectFlags::COLOR,
}
}
#[repr(transparent)]
pub struct WithLifetime<'a, T>(T, std::marker::PhantomData<&'a ()>);
impl<T> WithLifetime<'_, T> {
pub fn new(t: T) -> Self {
Self(t, std::marker::PhantomData)
}
}
impl<'a, T: 'a> Deref for WithLifetime<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<'a, T: 'a> DerefMut for WithLifetime<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}