vidya/crates/renderer/src/render_graph.rs

880 lines
29 KiB
Rust

#![allow(dead_code)]
use std::{
collections::{BTreeMap, BTreeSet},
fmt::Debug,
sync::Arc,
};
use crate::{
buffers::{Buffer, BufferDesc},
commands, def_monotonic_id,
device::{self, DeviceOwned},
images::{self, Image, ImageDesc},
sync,
util::{self, Rgba, WithLifetime},
SwapchainFrame,
};
use ash::vk;
use itertools::Itertools;
use petgraph::{
graph::NodeIndex,
visit::{EdgeRef, IntoNodeReferences, NodeRef},
};
def_monotonic_id!(pub GraphResourceId);
#[derive(Debug, Clone)]
pub enum GraphResourceDesc {
Image(ImageDesc),
Buffer(BufferDesc),
}
#[derive(Debug, PartialEq, Eq)]
pub enum GraphResource {
Framebuffer(Arc<SwapchainFrame>),
ImportedImage(Arc<Image>),
ImportedBuffer(Arc<Buffer>),
Image(Arc<Image>),
Buffer(Buffer),
}
#[derive(Debug, Clone, Copy)]
pub enum LoadOp {
Clear(Rgba),
Load,
DontCare,
}
#[derive(Debug, Clone, Copy)]
pub enum StoreOp {
DontCare,
Store,
}
pub struct RenderContext<'a> {
pub device: device::Device,
pub cmd: commands::SingleUseCommand,
pub resources: &'a BTreeMap<GraphResourceId, GraphResource>,
}
impl RenderContext<'_> {
pub fn get_image(&self, id: GraphResourceId) -> Option<&Arc<Image>> {
self.resources.get(&id).and_then(|res| match res {
GraphResource::ImportedImage(arc) => Some(arc),
GraphResource::Image(image) => Some(image),
GraphResource::Framebuffer(fb) => Some(&fb.image),
_ => None,
})
}
pub fn get_buffer(&self, id: GraphResourceId) -> Option<&Buffer> {
self.resources.get(&id).and_then(|res| match res {
GraphResource::ImportedBuffer(arc) => Some(arc.as_ref()),
GraphResource::Buffer(buffer) => Some(buffer),
_ => None,
})
}
}
#[derive(Debug, Clone, Copy, Default)]
pub struct Access {
pub stage: vk::PipelineStageFlags2,
pub mask: vk::AccessFlags2,
pub layout: Option<vk::ImageLayout>,
}
impl core::ops::BitOr for Access {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
assert_eq!(self.layout, rhs.layout);
Self {
stage: self.stage | rhs.stage,
mask: self.mask | rhs.mask,
layout: self.layout,
}
}
}
impl Access {
pub fn undefined() -> Self {
Self {
stage: vk::PipelineStageFlags2::NONE,
mask: vk::AccessFlags2::empty(),
layout: Some(vk::ImageLayout::UNDEFINED),
}
}
pub fn general() -> Self {
Self {
stage: vk::PipelineStageFlags2::NONE,
mask: vk::AccessFlags2::empty(),
layout: Some(vk::ImageLayout::GENERAL),
}
}
pub fn transfer_read() -> Self {
Self {
stage: vk::PipelineStageFlags2::TRANSFER,
mask: vk::AccessFlags2::TRANSFER_READ,
layout: Some(vk::ImageLayout::TRANSFER_SRC_OPTIMAL),
}
}
pub fn transfer_write() -> Self {
Self {
stage: vk::PipelineStageFlags2::TRANSFER,
mask: vk::AccessFlags2::TRANSFER_WRITE,
layout: Some(vk::ImageLayout::TRANSFER_DST_OPTIMAL),
}
}
pub fn vertex_read() -> Self {
Self {
stage: vk::PipelineStageFlags2::VERTEX_ATTRIBUTE_INPUT,
mask: vk::AccessFlags2::VERTEX_ATTRIBUTE_READ,
layout: None,
}
}
pub fn index_read() -> Self {
Self {
stage: vk::PipelineStageFlags2::INDEX_INPUT,
mask: vk::AccessFlags2::INDEX_READ,
layout: None,
}
}
pub fn indirect_read() -> Self {
Self {
stage: vk::PipelineStageFlags2::DRAW_INDIRECT,
mask: vk::AccessFlags2::INDIRECT_COMMAND_READ,
layout: None,
}
}
pub fn color_attachment_read_only() -> Self {
Self {
stage: vk::PipelineStageFlags2::COLOR_ATTACHMENT_OUTPUT,
mask: vk::AccessFlags2::COLOR_ATTACHMENT_READ,
layout: Some(vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL),
}
}
pub fn color_attachment_write_only() -> Self {
Self {
stage: vk::PipelineStageFlags2::COLOR_ATTACHMENT_OUTPUT,
mask: vk::AccessFlags2::COLOR_ATTACHMENT_WRITE,
layout: Some(vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL),
}
}
pub fn color_attachment_read_write() -> Self {
Self {
stage: vk::PipelineStageFlags2::COLOR_ATTACHMENT_OUTPUT,
mask: vk::AccessFlags2::COLOR_ATTACHMENT_WRITE
| vk::AccessFlags2::COLOR_ATTACHMENT_READ,
layout: Some(vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL),
}
}
pub fn present() -> Self {
Self {
stage: vk::PipelineStageFlags2::NONE,
mask: vk::AccessFlags2::empty(),
layout: Some(vk::ImageLayout::PRESENT_SRC_KHR),
}
}
}
pub type RecordFn = dyn FnOnce(&RenderContext) -> crate::Result<()> + Send;
pub struct PassDesc {
// this pass performs `Access` read on `GraphResourceId`.
// some `GraphResourceId` may occur multiple times.
pub reads: Vec<(GraphResourceId, Access)>,
// this pass performs `Access` write on `GraphResourceId`.
// some `GraphResourceId` may occur multiple times.
pub writes: Vec<(GraphResourceId, Access)>,
pub record: Box<RecordFn>,
}
impl Default for PassDesc {
fn default() -> Self {
Self {
reads: Default::default(),
writes: Default::default(),
record: Box::new(|_| Ok(())),
}
}
}
impl Debug for PassDesc {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("PassDesc")
.field("reads", &self.reads)
.field("write", &self.writes)
.finish_non_exhaustive()
}
}
def_monotonic_id!(pub RenderGraphPassId);
// Non-imported resources remain `RenderGraphResourceDesc`s because they may be
// able to be aliased.
// This should be dual to liveness/register allocation in a compiler.
// Dummy-impl is just allocating every resource_desc itself. 5head-impl is trying
// to find resource_descs which are eq, but whose liveness doesn't overlap.
#[derive(Debug)]
pub struct RenderGraph {
resource_descs: BTreeMap<GraphResourceId, GraphResourceDesc>,
resources: BTreeMap<GraphResourceId, GraphResource>,
accesses: BTreeMap<GraphResourceId, Access>,
pass_descs: Vec<PassDesc>,
/// the rendergraph produces these resources. Any passes on which these
/// outputs do not depend are pruned.
outputs: Vec<GraphResourceId>,
}
impl RenderGraph {
pub fn new() -> Self {
Self {
resource_descs: BTreeMap::new(),
resources: BTreeMap::new(),
pass_descs: Vec::new(),
accesses: BTreeMap::new(),
outputs: Vec::new(),
}
}
pub fn add_resource(&mut self, desc: GraphResourceDesc) -> GraphResourceId {
let id = GraphResourceId::new();
self.resource_descs.insert(id, desc);
self.accesses.insert(id, Access::undefined());
id
}
pub fn mark_as_output(&mut self, id: GraphResourceId) {
// TODO: dedup
self.outputs.push(id);
}
pub fn import_resource(&mut self, res: GraphResource, access: Access) -> GraphResourceId {
if let Some((&id, _)) = self
.resources
.iter()
.find(|(_, resident)| &&res == resident)
{
id
} else {
let id = GraphResourceId::new();
self.resources.insert(id, res);
self.accesses.insert(id, access);
id
}
}
pub fn import_image(&mut self, image: Arc<Image>, access: Access) -> GraphResourceId {
let res = GraphResource::ImportedImage(image);
self.import_resource(res, access)
}
pub fn import_buffer(&mut self, buffer: Arc<Buffer>, access: Access) -> GraphResourceId {
let res = GraphResource::ImportedBuffer(buffer);
self.import_resource(res, access)
}
pub fn import_framebuffer(&mut self, frame: Arc<SwapchainFrame>) -> GraphResourceId {
let id = GraphResourceId::new();
self.resources.insert(id, GraphResource::Framebuffer(frame));
self.mark_as_output(id);
id
}
pub fn add_pass(&mut self, pass: PassDesc) {
self.pass_descs.push(pass);
}
// https://blog.traverseresearch.nl/render-graph-101-f42646255636
// https://github.com/EmbarkStudios/kajiya/blob/main/crates/lib/kajiya-rg/src/graph.rs
// https://themaister.net/blog/2017/08/15/render-graphs-and-vulkan-a-deep-dive/
pub fn resolve(
&mut self,
device: device::Device,
) -> crate::Result<WithLifetime<'_, commands::CommandList<commands::SingleUseCommand>>> {
// create internal resources:
for (&id, desc) in self.resource_descs.iter() {
tracing::trace!("creating resource {id:?} with {desc:?}");
match desc.clone() {
GraphResourceDesc::Image(image_desc) => {
self.resources.insert(
id,
GraphResource::Image(Arc::new(Image::new(device.clone(), image_desc)?)),
);
}
GraphResourceDesc::Buffer(buffer_desc) => {
self.resources.insert(
id,
GraphResource::Buffer(Buffer::new(device.clone(), buffer_desc)?),
);
}
}
}
#[derive(Debug, Clone, Copy)]
enum PassNode {
First,
Pass(usize),
Last,
}
let mut dag = petgraph::stable_graph::StableDiGraph::new();
let root = dag.add_node(PassNode::First);
let mut last_write: BTreeMap<GraphResourceId, (NodeIndex, Access)> = self
.resources
.keys()
.filter_map(|id| self.accesses.get(id).map(|access| (*id, (root, *access))))
.collect::<BTreeMap<_, _>>();
let mut last_read: BTreeMap<GraphResourceId, (NodeIndex, Access)> = BTreeMap::new();
// TODO: rewrite finding edges properly.
// finding out if this graph is cyclical is actually non-trivial
// some pass might require both a read of a resource 1, and a read of a resource 2, where 2 is the product of another pass writing to resource 1.
// this could be resolved by copying resource 1 before the write pass.
// tl;dr: write-after-read makes this all more complicated
// insert edges between write->read edges of 2 passes
for (i, pass) in self.pass_descs.iter().enumerate() {
let node = dag.add_node(PassNode::Pass(i));
let mut read_accesses = BTreeMap::new();
for (rid, access) in &pass.reads {
read_accesses
.entry(*rid)
.and_modify(|a| {
// a single pass must not read one image twice with different layouts
*a = *a | *access;
})
.or_insert(*access);
}
for (rid, after) in read_accesses {
if let Some(&(other, before)) = last_write.get(&rid) {
tracing::trace!("adding edge between {other:?} and {node:?} for {rid:?} with ({before:?} -> {after:?})");
dag.add_edge(other, node, (rid, (before, after)));
}
last_read.insert(rid, (node, after));
}
let mut write_accesses = BTreeMap::new();
for (rid, access) in &pass.writes {
write_accesses
.entry(*rid)
.and_modify(|a| {
// a single pass must not write one image twice with different layouts
*a = *a | *access;
})
.or_insert(*access);
}
for (rid, after) in write_accesses {
last_write.insert(rid, (node, after));
if let Some(&(other, read)) = last_read.get(&rid)
&& other != node
{
tracing::trace!("adding edge between {other:?} and {node:?} for {rid:?} with ({read:?} -> {after:?}) (WaR)");
dag.add_edge(other, node, (rid, (read, after)));
}
}
}
// pseudo pass for tracking outputs
let output = dag.add_node(PassNode::Last);
for (id, (node, access)) in self
.outputs
.iter()
.filter_map(|&id| last_write.get(&id).cloned().map(|node| (id, node)))
{
dag.add_edge(
node,
output,
(
id,
(
access,
// make output writes available
Access {
stage: vk::PipelineStageFlags2::NONE,
mask: vk::AccessFlags2::empty(),
..access
},
),
),
);
}
// prune dead nodes
loop {
let sinks = dag
.externals(petgraph::Direction::Outgoing)
.filter(|idx| idx != &output)
.collect::<Vec<_>>();
if sinks.is_empty() {
break;
}
for sink in sinks {
dag.remove_node(sink);
}
}
// handle layout additional transitions
let edges = dag
.node_references()
.map(|(source, _)| {
let mut per_resourcelayout_multimap: BTreeMap<
(GraphResourceId, Option<vk::ImageLayout>),
Vec<(Access, NodeIndex)>,
> = BTreeMap::new();
let mut resources = BTreeSet::new();
dag.edges_directed(source, petgraph::Direction::Outgoing)
.for_each(|edge| {
let (rid, (_, after)) = edge.weight();
let target = edge.target();
let key = (*rid, after.layout);
let item = (*after, target);
resources.insert(*rid);
per_resourcelayout_multimap
.entry(key)
.and_modify(|list| list.push(item))
.or_insert(vec![item]);
});
let mut edges = vec![];
for resource in resources {
for (a, b) in per_resourcelayout_multimap
.range(
(resource, None)
..=(resource, Some(vk::ImageLayout::from_raw(i32::MAX))),
)
.tuple_windows()
{
let a = a.1;
let b = b.1;
// create new edge between all members of (a) and (b).
// topological mapping will fold all transitions into one.
for i in 0..a.len().max(b.len()) {
let from = a.get(i).unwrap_or(a.last().unwrap());
let to = b.get(i).unwrap_or(b.last().unwrap());
let edge = ((from.1, to.1), (resource, (from.0, to.0)));
edges.push(edge);
}
}
}
edges
})
.flatten()
.collect::<Vec<_>>();
for ((from, to), edge) in edges {
tracing::trace!(
"adding additional edge between {from:?} and {to:?} for {:?} with ({:?} -> {:?})",
edge.0,
edge.1 .0,
edge.1 .1
);
dag.add_edge(from, to, edge);
}
// #[cfg(any(debug_assertions, test))]
// std::fs::write(
// "render_graph.dot",
// &format!(
// "{:?}",
// petgraph::dot::Dot::with_attr_getters(
// &dag,
// &[],
// &|_graph, edgeref| { format!("label = \"{:?}\"", edgeref.weight()) },
// &|_graph, noderef| { format!("label = \"Pass({:?})\"", noderef.weight()) }
// )
// ),
// )
// .expect("writing render_graph repr");
let mut topological_map = Vec::new();
let mut top_dag = dag.clone();
// create topological map of DAG from sink to source
loop {
let (sinks, passes): (Vec<_>, Vec<_>) = top_dag
.externals(petgraph::Direction::Outgoing)
.filter(|&id| id != root)
.filter_map(|id| top_dag.node_weight(id).cloned().map(|idx| (id, idx)))
.unzip();
if sinks.is_empty() {
break;
}
let mut barriers = BTreeMap::new();
for &sink in &sinks {
top_dag
.edges_directed(sink, petgraph::Direction::Incoming)
.for_each(|edge| {
let (rid, (before, after)) = edge.weight();
barriers
.entry(*rid)
.and_modify(|(from, to)| {
*from = *from | *before;
*to = *to | *after;
})
.or_insert((*before, *after));
});
top_dag.remove_node(sink);
}
topological_map.push((passes, barriers));
}
// I don't think this can currently happen with the way passes are added.
top_dag.remove_node(root);
if top_dag.node_count() > 0 {
eprintln!("dag: {top_dag:?}");
panic!("dag is cyclic!");
}
let pool =
commands::SingleUseCommandPool::new(device.clone(), device.graphics_queue().clone())?;
let resources = &self.resources;
let cmds = topological_map
.iter()
.rev()
.map(|(set, accesses)| {
let pool = pool.clone();
let device = device.clone();
let passes = set
.into_iter()
.filter_map(|pass| {
if let &PassNode::Pass(i) = pass {
Some(i)
} else {
None
}
})
.map(|i| core::mem::take(&mut self.pass_descs[i]))
.collect::<Vec<_>>();
let cmd = pool.alloc()?;
// transitions
for (&id, &(from, to)) in accesses.iter() {
Self::transition_resource(
resources.get(&id).unwrap(),
device.dev(),
unsafe { &cmd.buffer() },
from,
to,
);
}
let ctx = RenderContext {
device,
cmd,
resources,
};
for pass in passes {
(pass.record)(&ctx)?;
}
ctx.cmd.end()?;
crate::Result::Ok(ctx.cmd)
})
.collect::<crate::Result<Vec<_>>>()?;
let cmd_list = commands::CommandList(cmds);
// let future = cmd_list.submit(None, None, Arc::new(sync::Fence::create(device.clone())?))?;
// future.block()?;
// let outputs = self
// .outputs
// .iter()
// .filter_map(|id| self.resources.remove(id).map(|res| (*id, res)))
// .collect::<BTreeMap<_, _>>();
Ok(WithLifetime::new(cmd_list))
}
pub fn get_outputs(&mut self) -> BTreeMap<GraphResourceId, GraphResource> {
let outputs = self
.outputs
.iter()
.filter_map(|id| self.resources.remove(id).map(|res| (*id, res)))
.collect::<BTreeMap<_, _>>();
outputs
}
pub fn transition_resource(
res: &GraphResource,
dev: &ash::Device,
cmd: &vk::CommandBuffer,
from: Access,
to: Access,
) {
let barrier: Barrier = match res {
GraphResource::Framebuffer(arc) => {
image_barrier(arc.image.handle(), arc.image.format(), from, to, None).into()
}
GraphResource::ImportedImage(arc) => {
image_barrier(arc.handle(), arc.format(), from, to, None).into()
}
GraphResource::ImportedBuffer(arc) => {
buffer_barrier(arc.handle(), 0, arc.len(), from, to, None).into()
}
GraphResource::Image(image) => {
image_barrier(image.handle(), image.format(), from, to, None).into()
}
GraphResource::Buffer(buffer) => {
buffer_barrier(buffer.handle(), 0, buffer.len(), from, to, None).into()
}
};
unsafe {
dev.cmd_pipeline_barrier2(*cmd, &((&barrier).into()));
}
}
fn transition_resource_to(
accesses: &mut BTreeMap<GraphResourceId, Access>,
resources: &BTreeMap<GraphResourceId, GraphResource>,
dev: &ash::Device,
cmd: &vk::CommandBuffer,
id: GraphResourceId,
to: Access,
) {
let old_access = accesses.get(&id);
let res = resources.get(&id);
if let (Some(&old_access), Some(res)) = (old_access, res) {
Self::transition_resource(res, dev, cmd, old_access, to);
accesses.insert(id, to);
}
}
}
pub enum Barrier {
Image(vk::ImageMemoryBarrier2<'static>),
Buffer(vk::BufferMemoryBarrier2<'static>),
}
impl<'a> From<&'a Barrier> for vk::DependencyInfo<'a> {
fn from(value: &'a Barrier) -> Self {
let info = vk::DependencyInfo::default();
let info = match value {
Barrier::Image(barrier) => info.image_memory_barriers(core::slice::from_ref(barrier)),
Barrier::Buffer(barrier) => info.buffer_memory_barriers(core::slice::from_ref(barrier)),
};
info
}
}
impl From<vk::ImageMemoryBarrier2<'static>> for Barrier {
fn from(value: vk::ImageMemoryBarrier2<'static>) -> Self {
Self::Image(value)
}
}
impl From<vk::BufferMemoryBarrier2<'static>> for Barrier {
fn from(value: vk::BufferMemoryBarrier2<'static>) -> Self {
Self::Buffer(value)
}
}
pub fn buffer_barrier(
buffer: vk::Buffer,
offset: u64,
size: u64,
before: Access,
after: Access,
queue_families: Option<(u32, u32)>,
) -> vk::BufferMemoryBarrier2<'static> {
vk::BufferMemoryBarrier2::default()
.buffer(buffer)
.offset(offset)
.size(size)
.src_access_mask(before.mask)
.src_stage_mask(before.stage)
.dst_access_mask(after.mask)
.dst_stage_mask(after.stage)
.src_queue_family_index(
queue_families
.map(|(src, _)| src)
.unwrap_or(vk::QUEUE_FAMILY_IGNORED),
)
.dst_queue_family_index(
queue_families
.map(|(_, dst)| dst)
.unwrap_or(vk::QUEUE_FAMILY_IGNORED),
)
}
pub fn image_barrier(
image: vk::Image,
format: vk::Format,
before_access: Access,
after_access: Access,
queue_families: Option<(u32, u32)>,
) -> vk::ImageMemoryBarrier2<'static> {
vk::ImageMemoryBarrier2::default()
.src_access_mask(before_access.mask)
.src_stage_mask(before_access.stage)
.dst_access_mask(after_access.mask)
.dst_stage_mask(after_access.stage)
.image(image)
.old_layout(before_access.layout.unwrap_or_default())
.new_layout(after_access.layout.unwrap_or_default())
.subresource_range(vk::ImageSubresourceRange {
aspect_mask: util::image_aspect_from_format(format),
..images::SUBRESOURCERANGE_ALL
})
.src_queue_family_index(
queue_families
.map(|(src, _)| src)
.unwrap_or(vk::QUEUE_FAMILY_IGNORED),
)
.dst_queue_family_index(
queue_families
.map(|(_, dst)| dst)
.unwrap_or(vk::QUEUE_FAMILY_IGNORED),
)
}
// #[cfg(test)]
// mod tests {
// use super::*;
// macro_rules! def_dummy_pass {
// ($name:ident: {$queue:path, $layout_in:path, $layout_out:path}) => {
// #[derive(Debug, Clone)]
// struct $name(Vec<RenderGraphResourceId>, Vec<RenderGraphResourceId>);
// impl Pass for $name {
// fn get_read_resource_access(&self, _id: RenderGraphResourceId) -> ResourceAccess {
// ResourceAccess::default()
// }
// fn get_write_resource_access(&self, _id: RenderGraphResourceId) -> ResourceAccess {
// ResourceAccess::default()
// }
// fn get_queue_capability_requirements(&self) -> device::QueueFlags {
// $queue
// }
// fn get_read_dependencies<'a>(
// &'a self,
// ) -> Box<dyn Iterator<Item = RenderGraphResourceId> + 'a> {
// Box::new(self.0.iter().cloned())
// }
// fn get_write_dependencies<'a>(
// &'a self,
// ) -> Box<dyn Iterator<Item = RenderGraphResourceId> + 'a> {
// Box::new(self.1.iter().cloned())
// }
// fn record(&self, _ctx: &RenderContext) -> crate::Result<()> {
// Ok(())
// }
// }
// };
// }
// def_dummy_pass!(DepthPass: {
// device::QueueFlags::GRAPHICS,
// vk::ImageLayout::DEPTH_ATTACHMENT_OPTIMAL,
// vk::ImageLayout::DEPTH_ATTACHMENT_OPTIMAL});
// def_dummy_pass!(RenderPass: {
// device::QueueFlags::GRAPHICS,
// vk::ImageLayout::COLORiATTACHMENT_OPTIMAL,
// vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL});
// def_dummy_pass!(AsyncPass: {
// device::QueueFlags::ASYNC_COMPUTE,
// vk::ImageLayout::UNDEFINED,
// vk::ImageLayout::GENERAL});
// def_dummy_pass!(PostProcessPass: {
// device::QueueFlags::ASYNC_COMPUTE,
// vk::ImageLayout::GENERAL,
// vk::ImageLayout::GENERAL});
// def_dummy_pass!(PresentPass: {
// device::QueueFlags::PRESENT,
// vk::ImageLayout::PRESENT_SRC_KHR,
// vk::ImageLayout::UNDEFINED});
// def_dummy_pass!(DepthVisualisationPass: {
// device::QueueFlags::ASYNC_COMPUTE,
// vk::ImageLayout::GENERAL,
// vk::ImageLayout::UNDEFINED});
// #[test]
// fn resolve_graph() {
// let mut graph = RenderGraph::new();
// let gbuffer = graph.add_resource(RenderGraphResourceDesc::Image(ImageDesc {
// ..Default::default()
// }));
// let depth_image = graph.add_resource(RenderGraphResourceDesc::Image(ImageDesc {
// ..Default::default()
// }));
// let depth_visualisation = graph.add_resource(RenderGraphResourceDesc::Image(ImageDesc {
// ..Default::default()
// }));
// let compute_buffer = graph.add_resource(RenderGraphResourceDesc::Buffer(BufferDesc {
// ..Default::default()
// }));
// graph.add_pass(DepthPass(vec![depth_image], vec![depth_image]));
// graph.add_pass(DepthVisualisationPass(
// vec![depth_image, depth_visualisation],
// vec![depth_visualisation],
// ));
// graph.add_pass(AsyncPass(vec![compute_buffer], vec![compute_buffer]));
// graph.add_pass(RenderPass(
// vec![depth_image, compute_buffer, gbuffer],
// vec![gbuffer],
// ));
// graph.add_pass(PostProcessPass(vec![gbuffer], vec![gbuffer]));
// graph.mark_as_output(gbuffer);
// graph.mark_as_output(depth_image);
// // graph.resolve();
// }
// }
pub fn clear_pass(rg: &mut RenderGraph, color: Rgba, target: GraphResourceId) {
let reads = [(target, Access::transfer_write())].to_vec();
let writes = [(target, Access::transfer_write())].to_vec();
let record: Box<RecordFn> = Box::new({
move |ctx| {
let target = ctx.get_image(target).unwrap();
let cmd = &ctx.cmd;
cmd.clear_color_image(
target.handle(),
target.format(),
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
color,
&[images::SUBRESOURCERANGE_COLOR_ALL],
);
Ok(())
}
});
rg.add_pass(PassDesc {
reads,
writes,
record,
});
}
pub fn present_pass(rg: &mut RenderGraph, target: GraphResourceId) {
let record: Box<RecordFn> = Box::new(|_| Ok(()));
let reads = vec![(target, Access::present())];
let writes = vec![(target, Access::present())];
rg.add_pass(PassDesc {
reads,
writes,
record,
});
}