vidya/crates/renderer/src/lib.rs

3327 lines
127 KiB
Rust

#![feature(
c_str_module,
closure_lifetime_binder,
let_chains,
negative_impls,
map_try_insert,
debug_closure_helpers,
slice_partition_dedup
)]
use std::{
borrow::Borrow,
collections::{BTreeMap, BTreeSet, HashMap},
ffi::{CStr, CString},
fmt::Debug,
hint::black_box,
marker::PhantomData,
sync::{
atomic::{AtomicU32, AtomicU64},
Arc,
},
};
use egui::Color32;
use indexmap::IndexMap;
use parking_lot::{Mutex, MutexGuard, RwLock};
use ash::{
khr,
prelude::VkResult,
vk::{self, Handle},
Entry,
};
use dyn_clone::DynClone;
use rand::{Rng, SeedableRng};
use raw_window_handle::RawDisplayHandle;
mod buffers;
mod commands;
mod device;
#[path = "egui.rs"]
mod egui_pass;
mod images;
mod pipeline;
mod render_graph;
mod sync;
mod util;
use device::{Device, DeviceOwned, DeviceQueueFamilies};
mod texture {
use std::{collections::BTreeMap, sync::Arc};
use crate::{def_monotonic_id, images::Image, Device};
def_monotonic_id!(pub TextureId);
pub struct TextureManager {
pub textures: BTreeMap<TextureId, Arc<Image>>,
#[allow(unused)]
dev: Device,
}
impl TextureManager {
pub fn new(dev: Device) -> Self {
Self {
dev,
textures: BTreeMap::new(),
}
}
pub fn insert_image(&mut self, image: Arc<Image>) -> TextureId {
let id = TextureId::new();
self.textures.insert(id, image);
id
}
pub fn insert_image_with_id(&mut self, id: TextureId, image: Arc<Image>) {
self.textures.insert(id, image);
}
pub fn remove_texture(&mut self, id: TextureId) -> Option<Arc<Image>> {
self.textures.remove(&id)
}
pub fn get_texture(&self, id: TextureId) -> Option<Arc<Image>> {
self.textures.get(&id).cloned()
}
}
}
use util::Rgba;
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error("Swapchain suboptimal.")]
SuboptimalSwapchain,
#[error(transparent)]
LoadingError(#[from] ash::LoadingError),
#[error(transparent)]
Result(#[from] ash::vk::Result),
#[error(transparent)]
CStrError(#[from] core::ffi::c_str::FromBytesUntilNulError),
#[error(transparent)]
NulError(#[from] std::ffi::NulError),
#[error("No Physical Device found.")]
NoPhysicalDevice,
#[error(transparent)]
Io(#[from] std::io::Error),
}
type Result<T> = core::result::Result<T, Error>;
struct VkNameList<'a> {
names: Vec<*const i8>,
_pd: PhantomData<&'a ()>,
}
impl<'a> VkNameList<'a> {
fn from_strs(strs: &[&'a CStr]) -> Self {
let names = strs.iter().map(|str| str.as_ptr()).collect::<Vec<_>>();
Self {
names,
_pd: PhantomData,
}
}
}
#[derive(Debug, Clone)]
#[allow(dead_code)]
struct DeviceExtension<'a> {
name: &'a core::ffi::CStr,
version: u32,
}
fn make_extention_properties(name: &CStr, version: u32) -> vk::ExtensionProperties {
vk::ExtensionProperties::default()
.spec_version(version)
.extension_name(name)
.unwrap()
}
/// returns true if lhs and rhs have the same name and lhs spec_version is less
/// than or equal to rhs spec_version
#[allow(dead_code)]
fn compatible_extension_properties(
lhs: &vk::ExtensionProperties,
rhs: &vk::ExtensionProperties,
) -> bool {
let Some(lhs_name) = lhs.extension_name_as_c_str().ok() else {
return false;
};
let Some(rhs_name) = rhs.extension_name_as_c_str().ok() else {
return false;
};
if lhs_name == rhs_name {
lhs.spec_version <= rhs.spec_version
} else {
false
}
}
#[derive(Clone, Debug)]
pub struct Queue(Arc<Mutex<vk::Queue>>, u32);
impl Queue {
fn new(device: &ash::Device, family: u32, index: u32) -> Self {
Self(
Arc::new(Mutex::new(unsafe {
device.get_device_queue(family, index)
})),
family,
)
}
pub fn family(&self) -> u32 {
self.1
}
pub fn with_locked<T, F: FnOnce(vk::Queue) -> T>(&self, map: F) -> T {
let lock = self.0.lock();
map(*lock)
}
pub fn lock(&self) -> MutexGuard<'_, vk::Queue> {
self.0.lock()
}
}
pub trait ExtendsDeviceFeatures2Debug:
vk::ExtendsPhysicalDeviceFeatures2 + Debug + Send + Sync
{
}
pub trait ExtendsDeviceProperties2Debug:
vk::ExtendsPhysicalDeviceProperties2 + Debug + DynClone + Send + Sync
{
}
impl<T: vk::ExtendsPhysicalDeviceFeatures2 + Debug + Send + Sync> ExtendsDeviceFeatures2Debug
for T
{
}
impl<T: vk::ExtendsPhysicalDeviceProperties2 + Debug + DynClone + Send + Sync>
ExtendsDeviceProperties2Debug for T
{
}
#[derive(Default, Debug)]
pub struct PhysicalDeviceFeatures {
pub version: u32,
pub physical_features_10: vk::PhysicalDeviceFeatures,
pub physical_features_11: Option<vk::PhysicalDeviceVulkan11Features<'static>>,
pub physical_features_12: Option<vk::PhysicalDeviceVulkan12Features<'static>>,
pub physical_features_13: Option<vk::PhysicalDeviceVulkan13Features<'static>>,
pub extra_features: Vec<Box<dyn ExtendsDeviceFeatures2Debug>>,
pub device_extensions: Vec<vk::ExtensionProperties>,
}
impl PhysicalDeviceFeatures {
fn version(self, version: u32) -> Self {
Self { version, ..self }
}
fn all_default() -> Self {
Self::default()
.features11(Default::default())
.features12(Default::default())
.features13(Default::default())
}
fn query(instance: &ash::Instance, pdev: vk::PhysicalDevice) -> Result<Self> {
let mut this = Self::all_default();
let mut features2 = this.features2();
let features = unsafe {
instance.get_physical_device_features2(pdev, &mut features2);
// allocate and query again
features2.features
};
this = this.features10(features);
let extensions = unsafe { instance.enumerate_device_extension_properties(pdev)? };
this = this.device_extensions(extensions);
Ok(this)
}
fn features10(self, physical_features_10: vk::PhysicalDeviceFeatures) -> Self {
Self {
physical_features_10,
..self
}
}
fn features11(self, physical_features_11: vk::PhysicalDeviceVulkan11Features<'static>) -> Self {
Self {
physical_features_11: Some(physical_features_11),
..self
}
}
fn features12(self, physical_features_12: vk::PhysicalDeviceVulkan12Features<'static>) -> Self {
Self {
physical_features_12: Some(physical_features_12),
..self
}
}
fn features13(self, physical_features_13: vk::PhysicalDeviceVulkan13Features<'static>) -> Self {
Self {
physical_features_13: Some(physical_features_13),
..self
}
}
fn device_extensions(self, device_extensions: Vec<vk::ExtensionProperties>) -> Self {
Self {
device_extensions,
..self
}
}
#[allow(dead_code)]
fn with_extension2(mut self, ext: vk::ExtensionProperties) -> Self {
self.device_extensions.push(ext);
self
}
fn with_extensions2<I: IntoIterator<Item = vk::ExtensionProperties>>(
mut self,
exts: I,
) -> Self {
self.device_extensions.extend(exts);
self
}
fn with_extension<F>(mut self, ext: vk::ExtensionProperties, features: F) -> Self
where
F: ExtendsDeviceFeatures2Debug + 'static,
{
self.extra_features.push(Box::new(features));
self.device_extensions.push(ext);
self
}
fn features2(&mut self) -> vk::PhysicalDeviceFeatures2<'_> {
let mut features2 =
vk::PhysicalDeviceFeatures2::default().features(self.physical_features_10);
if let Some(ref mut features11) = self.physical_features_11 {
features2 = features2.push_next(features11);
}
if let Some(ref mut features12) = self.physical_features_12 {
features2 = features2.push_next(features12);
}
if let Some(ref mut features13) = self.physical_features_13 {
features2 = features2.push_next(features13);
}
for features in self.extra_features.iter_mut() {
features2 = features2.push_next(Box::as_mut(features));
}
features2
}
fn supports_extension(&self, e: &vk::ExtensionProperties) -> bool {
self.device_extensions
.iter()
.find(|ext| {
ext.extension_name_as_c_str() == e.extension_name_as_c_str()
&& ext.spec_version >= e.spec_version
})
.is_some()
}
fn compatible_with(&self, device: &Self) -> bool {
let sort_exts = |a: &vk::ExtensionProperties, b: &vk::ExtensionProperties| {
(a.extension_name_as_c_str().unwrap(), a.spec_version)
.cmp(&(b.extension_name_as_c_str().unwrap(), b.spec_version))
};
let mut device_extensions = device.device_extensions.clone();
device_extensions.sort_by(sort_exts);
let unsupported_extensions = self
.device_extensions
.iter()
.filter(|ext| {
!device_extensions
.binary_search_by(|t| sort_exts(t, ext))
.is_ok()
})
.cloned()
.collect::<Vec<_>>();
let supports_extensions = unsupported_extensions.is_empty();
supports_extensions
&& utils::eq_device_features10(
&utils::bitand_device_features10(
&self.physical_features_10,
&device.physical_features_10,
),
&self.physical_features_10,
)
&& self
.physical_features_11
.zip(device.physical_features_11)
.map(|(a, b)| {
utils::eq_device_features11(&utils::bitand_device_features11(&a, &b), &a)
})
.unwrap_or(true)
&& self
.physical_features_12
.zip(device.physical_features_12)
.map(|(a, b)| {
utils::eq_device_features12(&utils::bitand_device_features12(&a, &b), &a)
})
.unwrap_or(true)
&& self
.physical_features_13
.zip(device.physical_features_13)
.map(|(a, b)| {
utils::eq_device_features13(&utils::bitand_device_features13(&a, &b), &a)
})
.unwrap_or(true)
}
}
#[derive(Debug, Default)]
struct PhysicalDeviceProperties {
base: vk::PhysicalDeviceProperties,
vk11: vk::PhysicalDeviceVulkan11Properties<'static>,
vk12: vk::PhysicalDeviceVulkan12Properties<'static>,
vk13: vk::PhysicalDeviceVulkan13Properties<'static>,
extra_properties: Vec<Box<dyn ExtendsDeviceProperties2Debug>>,
}
impl PhysicalDeviceProperties {
fn query(&mut self, instance: &ash::Instance, pdev: vk::PhysicalDevice) {
let mut props2 = self.properties2();
unsafe {
instance.get_physical_device_properties2(pdev, &mut props2);
self.base = props2.properties;
}
}
fn extra_properties(
mut self,
extra_properties: Vec<Box<dyn ExtendsDeviceProperties2Debug>>,
) -> Self {
self.extra_properties = extra_properties;
self
}
#[allow(dead_code)]
fn with_properties<F>(mut self, properties: F) -> Self
where
F: ExtendsDeviceProperties2Debug + 'static,
{
self.extra_properties.push(Box::new(properties));
self
}
fn properties2(&mut self) -> vk::PhysicalDeviceProperties2<'_> {
let mut props2 = vk::PhysicalDeviceProperties2::default()
.properties(self.base)
.push_next(&mut self.vk11)
.push_next(&mut self.vk12)
.push_next(&mut self.vk13);
for props in &mut self.extra_properties {
props2 = props2.push_next(Box::as_mut(props));
}
props2
}
}
#[derive(Debug)]
pub struct PhysicalDevice {
pdev: vk::PhysicalDevice,
queue_families: DeviceQueueFamilies,
properties: PhysicalDeviceProperties,
}
pub struct Instance {
entry: Entry,
instance: ash::Instance,
debug_utils: ash::ext::debug_utils::Instance,
debug_utils_messenger: vk::DebugUtilsMessengerEXT,
surface: ash::khr::surface::Instance,
}
impl Drop for Instance {
fn drop(&mut self) {
unsafe {
self.debug_utils
.destroy_debug_utils_messenger(self.debug_utils_messenger, None);
}
}
}
impl AsRef<ash::Instance> for Instance {
fn as_ref(&self) -> &ash::Instance {
&self.instance
}
}
impl AsRef<ash::khr::surface::Instance> for Instance {
fn as_ref(&self) -> &ash::khr::surface::Instance {
&self.surface
}
}
#[derive(Debug)]
struct SwapchainHandle(Mutex<vk::SwapchainKHR>);
impl SwapchainHandle {
unsafe fn from_handle(swapchain: vk::SwapchainKHR) -> SwapchainHandle {
Self(Mutex::new(swapchain))
}
fn lock(&self) -> MutexGuard<'_, vk::SwapchainKHR> {
self.0.lock()
}
fn with_locked<T, F: FnOnce(vk::SwapchainKHR) -> T>(&self, f: F) -> T {
let lock = self.0.lock();
f(*lock)
}
}
#[derive(Debug)]
pub struct Swapchain {
device: Device,
// has a strong ref to the surface because the surface may not outlive the swapchain
surface: Arc<Surface>,
swapchain: SwapchainHandle,
#[allow(unused)]
present_mode: vk::PresentModeKHR,
#[allow(unused)]
color_space: vk::ColorSpaceKHR,
format: vk::Format,
images: Vec<Arc<images::Image>>,
image_views: Vec<vk::ImageView>,
extent: vk::Extent2D,
min_image_count: u32,
// sync objects:
// we need two semaphores per each image, one acquire-semaphore and one release-semaphore.
// semaphores must be unique to each frame and cannot be reused per swapchain.
acquire_semaphores: Vec<vk::Semaphore>,
release_semaphores: Vec<vk::Semaphore>,
// one fence per in-flight frame, to synchronize image acquisition
fences: Vec<Arc<sync::Fence>>,
current_frame: AtomicU32,
// for khr_present_id/khr_present_wait
present_id: AtomicU64,
}
impl Drop for Swapchain {
fn drop(&mut self) {
unsafe {
_ = self.device.wait_queue_idle(self.device.present_queue());
tracing::debug!("dropping swapchain {:?}", self.swapchain);
for view in &self.image_views {
self.device.dev().destroy_image_view(*view, None);
}
self.swapchain.with_locked(|swapchain| {
self.device.swapchain().destroy_swapchain(swapchain, None)
});
for &semaphore in self
.acquire_semaphores
.iter()
.chain(&self.release_semaphores)
{
self.device.dev().destroy_semaphore(semaphore, None);
}
}
}
}
fn current_extent_or_clamped(
caps: &vk::SurfaceCapabilitiesKHR,
fallback: vk::Extent2D,
) -> vk::Extent2D {
if caps.current_extent.width == u32::MAX {
vk::Extent2D {
width: fallback
.width
.clamp(caps.min_image_extent.width, caps.max_image_extent.width),
height: fallback
.height
.clamp(caps.min_image_extent.height, caps.max_image_extent.height),
}
} else {
caps.current_extent
}
}
struct SwapchainParams {
present_mode: vk::PresentModeKHR,
format: vk::Format,
color_space: vk::ColorSpaceKHR,
/// the number of images to request from the device
image_count: u32,
/// the minimum number of images the surface permits
min_image_count: u32,
extent: vk::Extent2D,
}
#[derive(Debug)]
#[must_use = "This struct represents an acquired image from the swapchain and
must be presented in order to free resources on the device."]
pub struct SwapchainFrame {
pub swapchain: Arc<Swapchain>,
pub index: u32,
pub image: Arc<images::Image>,
pub format: vk::Format,
pub view: vk::ImageView,
pub acquire: vk::Semaphore,
pub release: vk::Semaphore,
}
impl Eq for SwapchainFrame {}
impl PartialEq for SwapchainFrame {
fn eq(&self, other: &Self) -> bool {
self.index == other.index && self.image == other.image
}
}
impl SwapchainFrame {
fn present(self, wait: Option<vk::Semaphore>) -> Result<()> {
self.swapchain.clone().present(self, wait)
}
}
impl Swapchain {
const PREFERRED_IMAGES_IN_FLIGHT: u32 = 3;
fn get_swapchain_params_from_surface(
instance: &Arc<Instance>,
surface: vk::SurfaceKHR,
pdev: vk::PhysicalDevice,
requested_extent: Option<vk::Extent2D>,
) -> Result<SwapchainParams> {
let caps = unsafe {
instance
.surface
.get_physical_device_surface_capabilities(pdev, surface)?
};
let formats = unsafe {
instance
.surface
.get_physical_device_surface_formats(pdev, surface)?
};
let present_modes = unsafe {
instance
.surface
.get_physical_device_surface_present_modes(pdev, surface)?
};
let present_mode = present_modes
.iter()
.find(|&mode| mode == &vk::PresentModeKHR::MAILBOX)
.cloned()
.unwrap_or(vk::PresentModeKHR::FIFO);
let format = formats
.iter()
.max_by_key(|&&format| {
let is_rgba_unorm = format.format == vk::Format::R8G8B8A8_UNORM
|| format.format == vk::Format::B8G8R8A8_UNORM;
let is_srgb = format.color_space == vk::ColorSpaceKHR::SRGB_NONLINEAR;
is_rgba_unorm as u8 * 10 + is_srgb as u8
})
.or(formats.first())
.cloned()
.expect("no surface format available!");
// 0 here means no limit
let max_image_count = core::num::NonZero::new(caps.max_image_count)
.map(|n| n.get())
.unwrap_or(u32::MAX);
// we want PREFERRED_IMAGES_IN_FLIGHT images acquired at the same time,
let image_count =
(caps.min_image_count + Self::PREFERRED_IMAGES_IN_FLIGHT).min(max_image_count);
let extent = current_extent_or_clamped(
&caps,
requested_extent.unwrap_or(vk::Extent2D::default().width(1).height(1)),
);
Ok(SwapchainParams {
present_mode,
format: format.format,
color_space: format.color_space,
image_count,
extent,
min_image_count: caps.min_image_count,
})
}
pub fn new(
device: Device,
surface: Arc<Surface>,
pdev: vk::PhysicalDevice,
extent: vk::Extent2D,
) -> Result<Self> {
Self::create(device, surface, pdev, Some(extent), None)
}
fn create(
device: Device,
surface: Arc<Surface>,
pdev: vk::PhysicalDevice,
extent: Option<vk::Extent2D>,
old_swapchain: Option<&SwapchainHandle>,
) -> Result<Self> {
let SwapchainParams {
present_mode,
format,
color_space,
image_count,
min_image_count,
extent,
} = Self::get_swapchain_params_from_surface(
device.instance(),
surface.surface,
pdev,
extent,
)?;
let (swapchain, images) = {
let lock = old_swapchain.as_ref().map(|handle| handle.lock());
Self::create_vkswapchainkhr(
&device,
surface.surface,
&device.queue_families().swapchain_family_indices(),
extent,
lock.as_ref().map(|lock| **lock),
present_mode,
format,
color_space,
image_count,
)
}?;
let images = images
.iter()
.enumerate()
.map(|(i, image)| unsafe {
images::Image::from_swapchain_image(
device.clone(),
*image,
Some(format!("swapchain-{swapchain:?}-image-{i}").into()),
vk::Extent3D {
width: extent.width,
height: extent.height,
depth: 1,
},
format,
)
.inspect(|img| {
img.get_view(images::ImageViewDesc {
name: Some(format!("swapchain-{swapchain:?}-image-{i}-view").into()),
kind: vk::ImageViewType::TYPE_2D,
format,
aspect: vk::ImageAspectFlags::COLOR,
..Default::default()
});
})
.map(|img| Arc::new(img))
})
.collect::<VkResult<Vec<_>>>()?;
let image_views = images
.iter()
.enumerate()
.map(|(i, image)| {
image.get_view(images::ImageViewDesc {
name: Some(format!("swapchain-{swapchain:?}-image-{i}-view").into()),
kind: vk::ImageViewType::TYPE_2D,
format,
aspect: vk::ImageAspectFlags::COLOR,
..Default::default()
})
})
.collect::<VkResult<Vec<_>>>()?;
let num_images = images.len() as u32;
let inflight_frames = num_images - min_image_count;
let acquire_semaphores = {
(0..inflight_frames)
.map(|i| unsafe {
device
.dev()
.create_semaphore(&vk::SemaphoreCreateInfo::default(), None)
.inspect(|r| {
#[cfg(debug_assertions)]
{
device
.debug_name_object(
*r,
&format!(
"semaphore-{:x}_{i}-acquire",
swapchain.0.lock().as_raw()
),
)
.unwrap();
}
})
})
.collect::<VkResult<Vec<_>>>()?
};
let release_semaphores = {
(0..inflight_frames)
.map(|i| unsafe {
device
.dev()
.create_semaphore(&vk::SemaphoreCreateInfo::default(), None)
.inspect(|r| {
#[cfg(debug_assertions)]
{
device
.debug_name_object(
*r,
&format!(
"semaphore-{:x}_{i}-release",
swapchain.0.lock().as_raw()
),
)
.unwrap();
}
})
})
.collect::<VkResult<Vec<_>>>()?
};
let fences = {
(0..inflight_frames)
.map(|i| {
Ok(Arc::new(sync::Fence::create(device.clone()).inspect(
|r| {
#[cfg(debug_assertions)]
{
device
.debug_name_object(
r.fence(),
&format!("fence-{:x}_{i}", swapchain.0.lock().as_raw()),
)
.unwrap();
}
},
)?))
})
.collect::<VkResult<Vec<_>>>()?
};
tracing::trace!("fences: {fences:?}");
Ok(Self {
device,
surface,
swapchain,
present_mode,
color_space,
format,
images,
image_views,
min_image_count,
extent,
acquire_semaphores,
release_semaphores,
fences,
current_frame: AtomicU32::new(0),
present_id: AtomicU64::new(1),
})
}
pub fn max_in_flight_images(&self) -> u32 {
self.num_images() - self.min_image_count
}
pub fn num_images(&self) -> u32 {
self.images.len() as u32
}
fn recreate(&self, extent: Option<vk::Extent2D>) -> Result<Self> {
Self::create(
self.device.clone(),
self.surface.clone(),
self.device.phy(),
extent,
Some(&self.swapchain),
)
}
/// returns a future yielding the frame, and true if the swapchain is
/// suboptimal and should be recreated.
fn acquire_image(
self: Arc<Self>,
) -> impl std::future::Future<Output = VkResult<(SwapchainFrame, bool)>> {
let frame = self
.current_frame
.fetch_update(
std::sync::atomic::Ordering::Release,
std::sync::atomic::Ordering::Relaxed,
|i| Some((i + 1) % self.max_in_flight_images()),
)
.unwrap() as usize;
tracing::trace!(frame, "acquiring image for frame {frame}");
async move {
let fence = self.fences[frame].clone();
let acquire = self.acquire_semaphores[frame];
let release = self.release_semaphores[frame];
// spawn on threadpool because it might block.
let (idx, suboptimal) = smol::unblock({
let this = self.clone();
let fence = fence.clone();
move || unsafe {
this.swapchain.with_locked(|swapchain| {
this.device.swapchain().acquire_next_image(
swapchain,
u64::MAX,
acquire,
fence.fence(),
)
})
}
})
.await?;
// wait for image to become available.
sync::FenceFuture::new(fence.clone()).await;
let idx = idx as usize;
let image = self.images[idx].clone();
let view = self.image_views[idx];
Ok((
SwapchainFrame {
index: idx as u32,
swapchain: self.clone(),
format: self.format,
image,
view,
acquire,
release,
},
suboptimal,
))
}
}
fn present(&self, frame: SwapchainFrame, wait: Option<vk::Semaphore>) -> Result<()> {
let swpchain = self.swapchain.lock();
let queue = self.device.present_queue().lock();
let wait_semaphores = wait
.as_ref()
.map(|sema| core::slice::from_ref(sema))
.unwrap_or_default();
// TODO: make this optional for devices with no support for present_wait/present_id
let present_id = self
.present_id
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
let mut present_id =
vk::PresentIdKHR::default().present_ids(core::slice::from_ref(&present_id));
let present_info = vk::PresentInfoKHR::default()
.image_indices(core::slice::from_ref(&frame.index))
.swapchains(core::slice::from_ref(&swpchain))
.wait_semaphores(wait_semaphores)
.push_next(&mut present_id);
// call winits pre_present_notify here
unsafe {
self.device
.swapchain()
.queue_present(*queue, &present_info)?;
}
Ok(())
}
fn create_vkswapchainkhr(
device: &Device,
surface: vk::SurfaceKHR,
queue_families: &[u32],
image_extent: vk::Extent2D,
old_swapchain: Option<vk::SwapchainKHR>,
present_mode: vk::PresentModeKHR,
image_format: vk::Format,
image_color_space: vk::ColorSpaceKHR,
image_count: u32,
) -> Result<(SwapchainHandle, Vec<vk::Image>)> {
let create_info = vk::SwapchainCreateInfoKHR::default()
.surface(surface)
.present_mode(present_mode)
.image_color_space(image_color_space)
.image_format(image_format)
.min_image_count(image_count)
.image_usage(vk::ImageUsageFlags::TRANSFER_DST | vk::ImageUsageFlags::COLOR_ATTACHMENT)
.image_array_layers(1)
.image_extent(image_extent)
.image_sharing_mode(if queue_families.len() <= 1 {
vk::SharingMode::EXCLUSIVE
} else {
vk::SharingMode::CONCURRENT
})
.queue_family_indices(queue_families)
.pre_transform(vk::SurfaceTransformFlagsKHR::IDENTITY)
.composite_alpha(vk::CompositeAlphaFlagsKHR::OPAQUE)
.old_swapchain(old_swapchain.unwrap_or(vk::SwapchainKHR::null()))
.clipped(true);
let (swapchain, images) = unsafe {
let swapchain = device.swapchain().create_swapchain(&create_info, None)?;
#[cfg(debug_assertions)]
{
let name = CString::new(format!(
"swapchain-{}_{}",
surface.as_raw(),
SWAPCHAIN_COUNT.fetch_add(1, std::sync::atomic::Ordering::Relaxed)
))
.unwrap();
_ = device.debug_utils().set_debug_utils_object_name(
&vk::DebugUtilsObjectNameInfoEXT::default()
.object_handle(swapchain)
.object_name(&name),
);
}
let images = device.swapchain().get_swapchain_images(swapchain)?;
(SwapchainHandle::from_handle(swapchain), images)
};
Ok((swapchain, images))
}
}
static SWAPCHAIN_COUNT: AtomicU64 = AtomicU64::new(0);
pub struct Surface {
instance: Arc<Instance>,
surface: vk::SurfaceKHR,
}
impl Debug for Surface {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Surface")
.field("surface", &self.surface)
.finish()
}
}
impl Surface {
#[allow(dead_code)]
fn headless(instance: Arc<Instance>) -> Result<Self> {
unsafe {
let headless_instance =
ash::ext::headless_surface::Instance::new(&instance.entry, &instance.instance);
let surface = headless_instance
.create_headless_surface(&vk::HeadlessSurfaceCreateInfoEXT::default(), None)?;
Ok(Self { instance, surface })
}
}
fn create(
instance: Arc<Instance>,
display_handle: RawDisplayHandle,
window_handle: raw_window_handle::RawWindowHandle,
) -> Result<Self> {
let surface = unsafe {
ash_window::create_surface(
&instance.entry,
&instance.instance,
display_handle,
window_handle,
None,
)?
};
Ok(Self { instance, surface })
}
}
impl Drop for Surface {
fn drop(&mut self) {
unsafe {
self.instance.surface.destroy_surface(self.surface, None);
}
}
}
struct SamplerCache {
device: Device,
samplers: HashMap<pipeline::SamplerDesc, pipeline::Sampler>,
}
impl SamplerCache {
pub fn new(device: Device) -> SamplerCache {
Self {
device,
samplers: HashMap::new(),
}
}
pub fn get_sampler(&mut self, desc: pipeline::SamplerDesc) -> VkResult<vk::Sampler> {
use std::collections::hash_map::Entry;
let entry = match self.samplers.entry(desc) {
Entry::Occupied(entry) => entry.get().handle(),
Entry::Vacant(entry) => {
let sampler = pipeline::Sampler::new(self.device.clone(), entry.key())?;
entry.insert(sampler).handle()
}
};
Ok(entry)
}
}
pub struct Vulkan {
instance: Arc<Instance>,
device: Device,
samplers: SamplerCache,
}
impl Drop for Vulkan {
fn drop(&mut self) {
unsafe {
_ = self.device.dev().device_wait_idle();
}
}
}
impl Vulkan {
const VALIDATION_LAYER_NAME: &'static core::ffi::CStr = c"VK_LAYER_KHRONOS_validation";
#[allow(unused)]
const RENDERDOC_LAYER_NAME: &'static core::ffi::CStr = c"VK_LAYER_RENDERDOC_Capture";
#[allow(unused)]
const NSIGHT_TRACE_LAYER_NAME: &'static core::ffi::CStr =
c"VK_LAYER_NV_GPU_Trace_release_public_2021_4_2";
#[allow(unused)]
const NSIGHT_INTERCEPTION_LAYER_NAME: &'static core::ffi::CStr =
c"VK_LAYER_NV_nomad_release_public_2021_4_2";
pub fn new(
app_name: &str,
instance_layers: &[&CStr],
instance_extensions: &[&CStr],
display_handle: Option<RawDisplayHandle>,
) -> Result<Self> {
let entry = unsafe { ash::Entry::load()? };
let app_name = CString::new(app_name)?;
let app_info = vk::ApplicationInfo::default()
.api_version(vk::make_api_version(0, 1, 3, 0))
.application_name(&app_name)
.engine_name(c"PrimalGame")
.application_version(0)
.engine_version(0);
// TODO: make this a flag somewhere to enable or disable validation layers
// DEBUG LAYERS/VALIDATION
let validation_settings = [
vk::LayerSettingEXT::default()
.layer_name(Self::VALIDATION_LAYER_NAME)
.setting_name(c"VK_KHRONOS_VALIDATION_VALIDATE_BEST_PRACTICES")
.ty(vk::LayerSettingTypeEXT::BOOL32)
.values(&[1]),
vk::LayerSettingEXT::default()
.layer_name(Self::VALIDATION_LAYER_NAME)
.setting_name(c"VK_KHRONOS_VALIDATION_VALIDATE_BEST_PRACTICES_AMD")
.ty(vk::LayerSettingTypeEXT::BOOL32)
.values(&[1]),
vk::LayerSettingEXT::default()
.layer_name(Self::VALIDATION_LAYER_NAME)
.setting_name(c"VK_KHRONOS_VALIDATION_VALIDATE_SYNC")
.ty(vk::LayerSettingTypeEXT::BOOL32)
.values(&[1]),
];
let mut validation_info =
vk::LayerSettingsCreateInfoEXT::default().settings(&validation_settings);
let layers = Self::get_layers(
&entry,
instance_layers
.into_iter()
.chain(&[
#[cfg(debug_assertions)]
Self::VALIDATION_LAYER_NAME,
])
.cloned(),
)
.unwrap();
let (extensions, unsupported_extensions) = Self::get_extensions(
&entry,
&layers,
instance_extensions
.into_iter()
.chain([ash::ext::debug_utils::NAME, ash::ext::layer_settings::NAME].iter())
.cloned(),
display_handle,
)?;
if !unsupported_extensions.is_empty() {
tracing::error!(
"extensions were requested but not supported by instance: {:?}",
unsupported_extensions
);
}
let layers = VkNameList::from_strs(&layers);
let extensions = VkNameList::from_strs(&extensions);
let create_info = vk::InstanceCreateInfo::default()
.application_info(&app_info)
.enabled_extension_names(&extensions.names)
.enabled_layer_names(&layers.names)
.push_next(&mut validation_info);
let instance = unsafe { entry.create_instance(&create_info, None)? };
let debug_info = vk::DebugUtilsMessengerCreateInfoEXT::default()
.message_severity(
vk::DebugUtilsMessageSeverityFlagsEXT::ERROR
| vk::DebugUtilsMessageSeverityFlagsEXT::WARNING
| vk::DebugUtilsMessageSeverityFlagsEXT::INFO,
)
.message_type(
vk::DebugUtilsMessageTypeFlagsEXT::GENERAL
| vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION
| vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE,
)
.pfn_user_callback(Some(debug::debug_callback));
let debug_utils_instance = ash::ext::debug_utils::Instance::new(&entry, &instance);
let debug_utils_messenger =
unsafe { debug_utils_instance.create_debug_utils_messenger(&debug_info, None)? };
let surface_instance = ash::khr::surface::Instance::new(&entry, &instance);
let instance = Arc::new(Instance {
instance,
debug_utils: debug_utils_instance,
debug_utils_messenger,
surface: surface_instance,
entry,
});
let features = PhysicalDeviceFeatures::all_default()
.version(vk::make_api_version(0, 1, 3, 0))
.features10(
vk::PhysicalDeviceFeatures::default()
.sampler_anisotropy(true)
.multi_draw_indirect(true),
)
.features11(vk::PhysicalDeviceVulkan11Features::default().shader_draw_parameters(true))
.features12(
vk::PhysicalDeviceVulkan12Features::default()
.shader_int8(true)
.runtime_descriptor_array(true)
.descriptor_binding_partially_bound(true)
.shader_sampled_image_array_non_uniform_indexing(true)
.descriptor_binding_sampled_image_update_after_bind(true)
.storage_buffer8_bit_access(true),
)
.features13(
vk::PhysicalDeviceVulkan13Features::default()
.dynamic_rendering(true)
.maintenance4(true)
.synchronization2(true),
)
.with_extension(
make_extention_properties(
ash::ext::mesh_shader::NAME,
ash::ext::mesh_shader::SPEC_VERSION,
),
vk::PhysicalDeviceMeshShaderFeaturesEXT::default()
.mesh_shader(true)
.task_shader(true),
)
.with_extension(
make_extention_properties(
ash::khr::present_id::NAME,
ash::khr::present_id::SPEC_VERSION,
),
vk::PhysicalDevicePresentIdFeaturesKHR::default().present_id(true),
)
.with_extension(
make_extention_properties(
ash::khr::present_wait::NAME,
ash::khr::present_wait::SPEC_VERSION,
),
vk::PhysicalDevicePresentWaitFeaturesKHR::default().present_wait(true),
)
.with_extension(
make_extention_properties(
ash::ext::index_type_uint8::NAME,
ash::ext::index_type_uint8::SPEC_VERSION,
),
vk::PhysicalDeviceIndexTypeUint8FeaturesEXT::default().index_type_uint8(true),
)
.with_extensions2([
make_extention_properties(khr::swapchain::NAME, khr::swapchain::SPEC_VERSION),
make_extention_properties(khr::spirv_1_4::NAME, khr::spirv_1_4::SPEC_VERSION),
]);
// Consider this: switching physical device in game?
// anything above this point is device agnostic, everything below would have to be recreated
// additionally, pdev would have to be derived from a device and not a scoring function.
let pdev = Self::choose_physical_device(
&instance,
display_handle,
&features,
vec![Box::new(
vk::PhysicalDeviceMeshShaderPropertiesEXT::default(),
)],
)?;
tracing::debug!("pdev: {pdev:?}");
let device = Device::new(instance.clone(), pdev, features)?;
Ok(Self {
instance,
samplers: SamplerCache::new(device.clone()),
device,
})
}
fn queue_family_supports_presentation(
instance: &Instance,
pdev: vk::PhysicalDevice,
queue_family: u32,
display_handle: RawDisplayHandle,
) -> bool {
unsafe {
match display_handle {
RawDisplayHandle::Xlib(display) => {
let surface =
ash::khr::xlib_surface::Instance::new(&instance.entry, &instance.instance);
surface.get_physical_device_xlib_presentation_support(
pdev,
queue_family,
display.display.unwrap().as_ptr() as _,
display.screen as _,
)
//todo!("xlib")
}
RawDisplayHandle::Xcb(_xcb_display_handle) => todo!("xcb"),
RawDisplayHandle::Wayland(wayland_display_handle) => {
let surface = ash::khr::wayland_surface::Instance::new(
&instance.entry,
&instance.instance,
);
surface.get_physical_device_wayland_presentation_support(
pdev,
queue_family,
wayland_display_handle.display.cast().as_mut(),
)
}
RawDisplayHandle::Drm(_) => {
todo!()
}
RawDisplayHandle::Windows(_) => {
ash::khr::win32_surface::Instance::new(&instance.entry, &instance.instance)
.get_physical_device_win32_presentation_support(pdev, queue_family)
}
_ => panic!("unsupported platform"),
}
}
}
fn select_pdev_queue_families(
instance: &Instance,
display_handle: Option<RawDisplayHandle>,
pdev: vk::PhysicalDevice,
) -> DeviceQueueFamilies {
let queue_families = unsafe {
instance
.instance
.get_physical_device_queue_family_properties(pdev)
};
struct QueueFamily {
num_queues: u32,
is_present: bool,
is_compute: bool,
is_graphics: bool,
is_transfer: bool,
}
impl QueueFamily {
#[allow(dead_code)]
fn is_graphics_and_compute(&self) -> bool {
self.is_compute && self.is_graphics
}
}
struct QueueFamilies(Vec<QueueFamily>);
impl QueueFamilies {
fn find_first<F>(&mut self, mut pred: F) -> Option<u32>
where
F: FnMut(&QueueFamily) -> bool,
{
if let Some((q, family)) = self
.0
.iter_mut()
.enumerate()
.filter(|(_, family)| family.num_queues > 0)
.find(|(_, family)| pred(family))
{
family.num_queues -= 1;
Some(q as u32)
} else {
None
}
}
fn find_best<F>(&mut self, mut pred: F) -> Option<u32>
where
F: FnMut(&QueueFamily) -> Option<u32>,
{
let (_, q, family) = self
.0
.iter_mut()
.enumerate()
.filter_map(|(i, family)| {
if family.num_queues == 0 {
return None;
}
pred(family).map(|score| (score, i, family))
})
.max_by_key(|(score, _, _)| *score)?;
family.num_queues -= 1;
Some(q as u32)
}
}
let mut queue_families = QueueFamilies(
queue_families
.iter()
.enumerate()
.map(|(i, family)| {
let q = i as u32;
let is_graphics = family.queue_flags.contains(vk::QueueFlags::GRAPHICS);
let is_compute = family.queue_flags.contains(vk::QueueFlags::COMPUTE);
let is_transfer = family.queue_flags.contains(vk::QueueFlags::TRANSFER)
|| is_compute
|| is_graphics;
let is_present = display_handle
.map(|display_handle| {
Self::queue_family_supports_presentation(
instance,
pdev,
q,
display_handle,
)
})
.unwrap_or(false);
QueueFamily {
num_queues: family.queue_count,
is_compute,
is_graphics,
is_present,
is_transfer,
}
})
.collect::<Vec<_>>(),
);
let graphics = queue_families
.find_best(|family| {
if !family.is_graphics {
return None;
}
// a queue with Graphics+Compute is guaranteed to exist
Some(family.is_compute as u32 * 2 + family.is_present as u32)
})
.unwrap();
// find present queue first because it is rather more important than a secondary compute queue
let present =
if !queue_families.0.get(graphics as usize).unwrap().is_present {
queue_families.find_first(|family| family.is_present)
} else {
None
}.or({
if display_handle.is_none() {
// in this case the graphics queue will be used by default
tracing::info!("no present queue available, using graphics queue as fallback for headless_surface");
Some(graphics)
} else {
tracing::warn!("no present queue available, this is unexpected!");
None}
});
let async_compute = queue_families.find_first(|family| family.is_compute);
let transfer = queue_families.find_first(|family| family.is_transfer);
let mut unique_families = BTreeMap::<u32, u32>::new();
let mut helper = |family: u32| {
use std::collections::btree_map::Entry;
let index = match unique_families.entry(family) {
Entry::Vacant(vacant_entry) => {
vacant_entry.insert(1);
0
}
Entry::Occupied(mut occupied_entry) => {
let idx = occupied_entry.get_mut();
*idx += 1;
*idx - 1
}
};
(family, index)
};
let graphics = helper(graphics);
let async_compute = async_compute.map(|f| helper(f)).unwrap_or(graphics);
let transfer = transfer.map(|f| helper(f)).unwrap_or(async_compute);
let present = present.map(|f| helper(f)).unwrap_or(graphics);
let families = unique_families
.into_iter()
.filter(|&(_family, count)| count > 0)
.collect::<Vec<_>>();
// family of each queue, of which one is allocated for each queue, with
// graphics being the fallback queue for compute and transfer, and
// present possibly being `None`, in which case it is Graphics
let queues = DeviceQueueFamilies {
families,
graphics,
async_compute,
transfer,
present,
};
queues
}
fn choose_physical_device(
instance: &Instance,
display_handle: Option<RawDisplayHandle>,
requirements: &PhysicalDeviceFeatures,
extra_properties: Vec<Box<dyn ExtendsDeviceProperties2Debug>>,
) -> Result<PhysicalDevice> {
let pdevs = unsafe { instance.instance.enumerate_physical_devices()? };
let (pdev, properties) = pdevs
.into_iter()
.map(|pdev| {
let mut props = PhysicalDeviceProperties::default().extra_properties(
extra_properties
.iter()
.map(|b| dyn_clone::clone_box(&**b))
.collect::<Vec<_>>(),
);
props.query(&instance.instance, pdev);
(pdev, props)
})
// filter devices which dont support the version of Vulkan we are requesting
.filter(|(_, props)| props.base.api_version >= requirements.version)
// filter devices which don't support the device extensions we
// are requesting
// TODO: figure out a way to fall back to some
// device which doesn't support all of the extensions.
.filter(|(pdev, _)| {
let query_features =
PhysicalDeviceFeatures::query(&instance.instance, *pdev).unwrap();
requirements.compatible_with(&query_features)
})
.max_by_key(|(_, props)| {
let score = match props.base.device_type {
vk::PhysicalDeviceType::DISCRETE_GPU => 5,
vk::PhysicalDeviceType::INTEGRATED_GPU => 4,
vk::PhysicalDeviceType::VIRTUAL_GPU => 3,
vk::PhysicalDeviceType::CPU => 2,
vk::PhysicalDeviceType::OTHER => 1,
_ => unreachable!(),
};
// score based on limits or other properties
score
})
.ok_or(Error::NoPhysicalDevice)?;
Ok(PhysicalDevice {
queue_families: Self::select_pdev_queue_families(instance, display_handle, pdev),
pdev,
properties,
})
}
fn get_available_extensions(
entry: &ash::Entry,
layers: &[&CStr],
) -> Result<Vec<ash::vk::ExtensionProperties>> {
unsafe {
let extensions = core::iter::once(entry.enumerate_instance_extension_properties(None))
.chain(
layers
.iter()
.map(|&layer| entry.enumerate_instance_extension_properties(Some(layer))),
)
.filter_map(|result| result.ok())
.flatten()
.collect::<Vec<ash::vk::ExtensionProperties>>();
Ok(extensions)
}
}
/// returns a tuple of supported/enabled extensions and unsupported/requested extensions
fn get_extensions<'a>(
entry: &ash::Entry,
layers: &[&'a CStr],
extensions: impl Iterator<Item = &'a CStr> + 'a,
display_handle: Option<RawDisplayHandle>,
) -> Result<(Vec<&'a CStr>, Vec<&'a CStr>)> {
unsafe {
let available_extensions = Self::get_available_extensions(entry, layers)?;
let available_extension_names = available_extensions
.iter()
.filter_map(|layer| layer.extension_name_as_c_str().ok())
.collect::<BTreeSet<_>>();
let mut out_extensions = Vec::new();
let mut unsupported_extensions = Vec::new();
for extension in extensions {
if available_extension_names.contains(extension) {
out_extensions.push(extension);
} else {
unsupported_extensions.push(extension);
}
}
let required_extension_names = display_handle
.map(|display_handle| ash_window::enumerate_required_extensions(display_handle))
.unwrap_or(Ok(&[]))?;
for &extension in required_extension_names {
let extension = core::ffi::CStr::from_ptr(extension);
if available_extension_names.contains(&extension) {
out_extensions.push(extension);
} else {
unsupported_extensions.push(extension);
}
}
Ok((out_extensions, unsupported_extensions))
}
}
fn get_layers<'a>(
entry: &ash::Entry,
wants_layers: impl Iterator<Item = &'a CStr> + 'a,
) -> core::result::Result<Vec<&'a CStr>, (Vec<&'a CStr>, Vec<&'a CStr>)> {
unsafe {
let wants_layers = wants_layers.collect::<Vec<_>>();
let available_layers = entry
.enumerate_instance_layer_properties()
.map_err(|_| (Vec::<&'a CStr>::new(), wants_layers.clone()))?;
let available_layer_names = available_layers
.iter()
.map(|layer| layer.layer_name_as_c_str())
.collect::<core::result::Result<BTreeSet<_>, _>>()
.map_err(|_| (Vec::<&'a CStr>::new(), wants_layers.clone()))?;
let mut out_layers = Vec::new();
let mut unsupported_layers = Vec::new();
for layer in wants_layers {
if available_layer_names.contains(&layer) {
out_layers.push(layer);
} else {
unsupported_layers.push(layer);
}
}
if !unsupported_layers.is_empty() {
Err((out_layers, unsupported_layers))
} else {
Ok(out_layers)
}
}
}
}
use raw_window_handle::RawWindowHandle;
pub struct WindowContext {
window_handle: RawWindowHandle,
surface: Arc<Surface>,
// this mutex is for guarding the swapchain against being replaced
// underneath WindowContext's functions
current_swapchain: RwLock<Arc<Swapchain>>,
}
impl Drop for WindowContext {
fn drop(&mut self) {
unsafe {
_ = self
.current_swapchain
.read()
.device
.dev()
.device_wait_idle();
}
}
}
unsafe impl Send for WindowContext {}
unsafe impl Sync for WindowContext {}
impl Borrow<RawWindowHandle> for WindowContext {
fn borrow(&self) -> &RawWindowHandle {
&self.window_handle
}
}
impl PartialEq for WindowContext {
fn eq(&self, other: &Self) -> bool {
self.window_handle == other.window_handle
}
}
impl Eq for WindowContext {}
impl core::hash::Hash for WindowContext {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.window_handle.hash(state);
}
}
impl WindowContext {
fn new(
instance: Arc<Instance>,
device: Device,
extent: vk::Extent2D,
window_handle: raw_window_handle::RawWindowHandle,
display: RawDisplayHandle,
) -> Result<Self> {
let surface = Arc::new(Surface::create(instance.clone(), display, window_handle)?);
let swapchain = Arc::new(Swapchain::new(
device.clone(),
surface.clone(),
device.phy(),
extent,
)?);
Ok(Self {
window_handle,
surface,
current_swapchain: RwLock::new(swapchain),
})
}
/// spawns a task that continuously requests images from the current
/// swapchain, sending them to a channel. returns the receiver of the
/// channel, and a handle to the task, allowing for cancellation.
fn images(
self: Arc<Self>,
) -> (
smol::channel::Receiver<SwapchainFrame>,
smol::Task<std::result::Result<(), Error>>,
) {
let (tx, rx) = smol::channel::bounded(8);
let task = smol::spawn(async move {
loop {
let frame = self.acquire_image().await?;
tx.send(frame)
.await
.expect("channel closed on swapchain acquiring frame");
}
});
(rx, task)
}
async fn acquire_image(&self) -> Result<SwapchainFrame> {
// clone swapchain to keep it alive
let swapchain = self.current_swapchain.read().clone();
let (frame, suboptimal) = swapchain.clone().acquire_image().await?;
if suboptimal {
let mut lock = self.current_swapchain.write();
// only recreate our swapchain if it is still same, or else it might have already been recreated.
if Arc::ptr_eq(&swapchain, &lock) {
*lock = Arc::new(lock.recreate(None)?);
}
}
Ok(frame)
}
pub fn recreate_with(&self, extent: Option<vk::Extent2D>) -> Result<()> {
let mut swapchain = self.current_swapchain.write();
*swapchain = Arc::new(swapchain.recreate(extent)?);
Ok(())
}
}
#[derive(Debug)]
pub struct EguiState {
textures: HashMap<egui::TextureId, EguiTextureInfo>,
#[allow(unused)]
descriptor_pool: pipeline::DescriptorPool,
descriptor_set: vk::DescriptorSet,
#[allow(unused)]
descriptor_layout: pipeline::DescriptorSetLayout,
pipeline_layout: Arc<pipeline::PipelineLayout>,
pipeline: Arc<pipeline::Pipeline>,
render_state: Option<EguiRenderState>,
}
#[derive(Debug)]
struct EguiRenderState {
vertices: buffers::Buffer,
indices: buffers::Buffer,
draw_calls: buffers::Buffer,
texture_ids: buffers::Buffer,
textures_to_free: Vec<texture::TextureId>,
num_draw_calls: usize,
}
#[derive(Debug, Clone, Copy)]
struct EguiTextureInfo {
id: texture::TextureId,
options: egui::epaint::textures::TextureOptions,
}
impl EguiTextureInfo {
fn into_sampler_desc(&self) -> pipeline::SamplerDesc {
let address_mode = match self.options.wrap_mode {
egui::TextureWrapMode::ClampToEdge => vk::SamplerAddressMode::CLAMP_TO_EDGE,
egui::TextureWrapMode::Repeat => vk::SamplerAddressMode::REPEAT,
egui::TextureWrapMode::MirroredRepeat => vk::SamplerAddressMode::MIRRORED_REPEAT,
};
pipeline::SamplerDesc {
min_filter: match self.options.minification {
egui::TextureFilter::Nearest => vk::Filter::NEAREST,
egui::TextureFilter::Linear => vk::Filter::LINEAR,
},
mag_filter: match self.options.magnification {
egui::TextureFilter::Nearest => vk::Filter::NEAREST,
egui::TextureFilter::Linear => vk::Filter::LINEAR,
},
mipmap_mode: match self.options.mipmap_mode {
Some(egui::TextureFilter::Linear) => vk::SamplerMipmapMode::LINEAR,
Some(egui::TextureFilter::Nearest) => vk::SamplerMipmapMode::NEAREST,
None => Default::default(),
},
address_u: address_mode,
address_v: address_mode,
address_w: address_mode,
max_lod: vk::LOD_CLAMP_NONE,
..Default::default()
}
}
}
impl EguiState {
const TEXTURE_BINDING: u32 = 0;
const UNIFORM_BINDING: u32 = 1;
fn new(device: Device) -> Result<Self> {
let descriptor_pool = pipeline::DescriptorPool::new(
device.clone(),
pipeline::DescriptorPoolDesc {
flags: vk::DescriptorPoolCreateFlags::UPDATE_AFTER_BIND,
name: Some("egui-descriptorpool".into()),
sizes: &[
vk::DescriptorPoolSize {
ty: vk::DescriptorType::COMBINED_IMAGE_SAMPLER,
descriptor_count: 10,
},
vk::DescriptorPoolSize {
ty: vk::DescriptorType::STORAGE_BUFFER,
descriptor_count: 1,
},
],
max_sets: 1,
..Default::default()
},
)?;
let descriptor_layout = pipeline::DescriptorSetLayout::new(
device.clone(),
pipeline::DescriptorSetLayoutDesc {
flags: vk::DescriptorSetLayoutCreateFlags::UPDATE_AFTER_BIND_POOL,
name: Some("egui-descriptor-layout".into()),
bindings: &[
pipeline::DescriptorSetLayoutBindingDesc {
binding: Self::TEXTURE_BINDING,
count: 10,
kind: vk::DescriptorType::COMBINED_IMAGE_SAMPLER,
stage: vk::ShaderStageFlags::FRAGMENT,
flags: Some(
vk::DescriptorBindingFlags::PARTIALLY_BOUND
| vk::DescriptorBindingFlags::UPDATE_AFTER_BIND,
),
},
pipeline::DescriptorSetLayoutBindingDesc {
binding: Self::UNIFORM_BINDING,
count: 1,
kind: vk::DescriptorType::STORAGE_BUFFER,
stage: vk::ShaderStageFlags::FRAGMENT,
flags: None,
},
],
},
)?;
let sets = descriptor_pool.allocate(&[pipeline::DescriptorSetAllocDesc {
name: None,
layout: &descriptor_layout,
}])?;
let pipeline_layout = pipeline::PipelineLayout::new(
device.clone(),
pipeline::PipelineLayoutDesc {
descriptor_set_layouts: &[&descriptor_layout],
push_constant_ranges: &[vk::PushConstantRange {
offset: 0,
size: 128,
stage_flags: vk::ShaderStageFlags::VERTEX,
}],
name: Some("egui-pipeline-layout".into()),
},
)?;
let frag_shader = pipeline::ShaderModule::new_from_path(
device.clone(),
"crates/renderer/shaders/egui_frag.spv",
)?;
let vert_shader = pipeline::ShaderModule::new_from_path(
device.clone(),
"crates/renderer/shaders/egui_vert.spv",
)?;
let pipeline = pipeline::Pipeline::new(
device.clone(),
pipeline::PipelineDesc::Graphics(pipeline::GraphicsPipelineDesc {
flags: Default::default(),
name: Some("egui-pipeline".into()),
shader_stages: &[
pipeline::ShaderStageDesc {
flags: vk::PipelineShaderStageCreateFlags::empty(),
module: &frag_shader,
stage: vk::ShaderStageFlags::FRAGMENT,
entry: c"main".into(),
},
pipeline::ShaderStageDesc {
flags: vk::PipelineShaderStageCreateFlags::empty(),
module: &vert_shader,
stage: vk::ShaderStageFlags::VERTEX,
entry: c"main".into(),
},
],
render_pass: None,
layout: &pipeline_layout,
subpass: None,
base_pipeline: None,
vertex_input: Some(pipeline::VertexInputState {
bindings: &[vk::VertexInputBindingDescription {
binding: 0,
stride: 20,
input_rate: vk::VertexInputRate::VERTEX,
}],
attributes: &[
vk::VertexInputAttributeDescription {
location: 0,
binding: 0,
format: vk::Format::R32G32_SFLOAT,
offset: 0,
},
vk::VertexInputAttributeDescription {
location: 1,
binding: 0,
format: vk::Format::R32G32_SFLOAT,
offset: 8,
},
vk::VertexInputAttributeDescription {
location: 2,
binding: 0,
format: vk::Format::R8G8B8A8_UNORM,
offset: 16,
},
],
}),
input_assembly: Some(pipeline::InputAssemblyState {
topology: vk::PrimitiveTopology::TRIANGLE_LIST,
primitive_restart: false,
}),
tessellation: None,
viewport: Some(pipeline::ViewportState {
num_viewports: 1,
num_scissors: 1,
..Default::default()
}),
rasterization: Some(pipeline::RasterizationState {
cull_mode: vk::CullModeFlags::NONE,
..Default::default()
}),
multisample: Some(pipeline::MultisampleState {
..Default::default()
}),
depth_stencil: Some(pipeline::DepthStencilState {
depth: Some(pipeline::DepthState {
write_enable: false,
compare_op: Some(vk::CompareOp::LESS),
bounds: Some(pipeline::DepthBounds { min: 0.0, max: 1.0 }),
}),
..Default::default()
}),
color_blend: Some(pipeline::ColorBlendState {
attachments: &[vk::PipelineColorBlendAttachmentState::default()
.color_write_mask(vk::ColorComponentFlags::RGBA)
.blend_enable(true)
.color_blend_op(vk::BlendOp::ADD)
.src_color_blend_factor(vk::BlendFactor::ONE)
.dst_color_blend_factor(vk::BlendFactor::ONE_MINUS_SRC_ALPHA)
.alpha_blend_op(vk::BlendOp::ADD)
.src_alpha_blend_factor(vk::BlendFactor::ONE)
.dst_alpha_blend_factor(vk::BlendFactor::ONE_MINUS_SRC_ALPHA)],
..Default::default()
}),
rendering: Some(pipeline::RenderingState {
color_formats: &[vk::Format::R8G8B8A8_UNORM],
..Default::default()
}),
dynamic: Some(pipeline::DynamicState {
dynamic_states: &[vk::DynamicState::VIEWPORT, vk::DynamicState::SCISSOR],
..Default::default()
}),
}),
)?;
Ok(Self {
textures: HashMap::new(),
descriptor_pool,
descriptor_layout,
descriptor_set: sets[0],
pipeline: Arc::new(pipeline),
pipeline_layout: Arc::new(pipeline_layout),
render_state: None,
})
}
fn lookup_texture(&self, id: egui::epaint::TextureId) -> Option<texture::TextureId> {
self.textures.get(&id).map(|entry| entry.id)
}
}
pub struct Renderer<W> {
pub texture_handler: texture::TextureManager,
pub egui_state: EguiState,
// thinkw: want renderer linked with display? then no (real) headless
display: RawDisplayHandle,
pub window_contexts: HashMap<W, WindowContext>,
vulkan: Vulkan,
}
pub use vk::Extent2D;
impl<W> Renderer<W> {
pub fn new(display: RawDisplayHandle) -> Result<Self> {
let vulkan = Vulkan::new("Vidya", &[], &[], Some(display))?;
Ok(Self {
texture_handler: texture::TextureManager::new(vulkan.device.clone()),
egui_state: EguiState::new(vulkan.device.clone())?,
vulkan,
display,
window_contexts: HashMap::new(),
})
}
pub fn draw_egui(&mut self, ctx: &egui::Context, output: egui::FullOutput) -> Result<()> {
let pool = commands::SingleUseCommandPool::new(
self.vulkan.device.clone(),
self.vulkan.device.graphics_queue().clone(),
)
.unwrap();
let cmd = pool.alloc().unwrap();
let cmd_objects = output
.textures_delta
.set
.iter()
.map(|(egui_id, delta)| {
let size = delta.image.size();
let byte_size = size[0] * size[1] * 4;
let mut staging = buffers::Buffer::new(
self.vulkan.device.clone(),
buffers::BufferDesc {
name: Some(format!("egui-{egui_id:?}-staging-buf").into()),
size: byte_size as u64,
usage: vk::BufferUsageFlags::TRANSFER_SRC,
mem_usage: vk_mem::MemoryUsage::AutoPreferHost,
alloc_flags: vk_mem::AllocationCreateFlags::MAPPED
| vk_mem::AllocationCreateFlags::HOST_ACCESS_SEQUENTIAL_WRITE
| vk_mem::AllocationCreateFlags::STRATEGY_FIRST_FIT,
..Default::default()
},
)
.expect("staging buffer");
{
let mut mem = staging.map().expect("mapping staging buffer");
match &delta.image {
egui::ImageData::Color(arc) => {
let slice = unsafe {
core::slice::from_raw_parts(
arc.pixels.as_ptr().cast::<u8>(),
arc.pixels.len() * size_of::<Color32>(),
)
};
mem[..slice.len()].copy_from_slice(slice);
}
egui::ImageData::Font(font_image) => {
for (i, c) in font_image.srgba_pixels(None).enumerate() {
let bytes = c.to_array();
mem[i * 4..(i + 1) * 4].copy_from_slice(&bytes);
}
}
}
}
let sampled = if delta.is_whole() {
vk::ImageUsageFlags::SAMPLED
} else {
vk::ImageUsageFlags::TRANSFER_SRC
};
let texture = Arc::new(
images::Image::new(
self.vulkan.device.clone(),
images::ImageDesc {
name: Some(format!("egui-texture-{egui_id:?}").into()),
format: vk::Format::R8G8B8A8_UNORM,
extent: vk::Extent3D {
width: delta.image.width() as u32,
height: delta.image.height() as u32,
depth: 1,
},
usage: sampled | vk::ImageUsageFlags::TRANSFER_DST,
mem_usage: vk_mem::MemoryUsage::AutoPreferDevice,
..Default::default()
},
)
.expect("image creation"),
);
cmd.image_barrier(
texture.image(),
vk::ImageAspectFlags::COLOR,
vk::PipelineStageFlags2::TRANSFER,
vk::AccessFlags2::empty(),
vk::PipelineStageFlags2::TRANSFER,
vk::AccessFlags2::TRANSFER_WRITE,
vk::ImageLayout::UNDEFINED,
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
None,
);
cmd.copy_buffer_to_image(
staging.buffer(),
texture.image(),
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
&[vk::BufferImageCopy {
buffer_offset: 0,
buffer_row_length: delta.image.width() as u32,
buffer_image_height: delta.image.height() as u32,
image_subresource: vk::ImageSubresourceLayers::default()
.aspect_mask(vk::ImageAspectFlags::COLOR)
.base_array_layer(0)
.mip_level(0)
.layer_count(1),
image_offset: vk::Offset3D { x: 0, y: 0, z: 0 },
image_extent: vk::Extent3D {
width: texture.size().width,
height: texture.size().height,
depth: 1,
},
}],
);
let id = self.egui_state.lookup_texture(*egui_id).unwrap_or_else(|| {
let id = texture::TextureId::new();
self.egui_state.textures.insert(
*egui_id,
EguiTextureInfo {
id,
options: delta.options,
},
);
id
});
if let Some(pos) = delta.pos {
// SAFETY: must exist because image is not whole.
let existing_texture = self.texture_handler.textures.get(&id).cloned().unwrap();
cmd.image_barrier(
texture.image(),
vk::ImageAspectFlags::COLOR,
vk::PipelineStageFlags2::TRANSFER,
vk::AccessFlags2::TRANSFER_WRITE,
vk::PipelineStageFlags2::TRANSFER,
vk::AccessFlags2::TRANSFER_READ,
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
vk::ImageLayout::TRANSFER_SRC_OPTIMAL,
None,
);
cmd.image_barrier(
existing_texture.handle(),
vk::ImageAspectFlags::COLOR,
vk::PipelineStageFlags2::empty(),
vk::AccessFlags2::empty(),
vk::PipelineStageFlags2::TRANSFER,
vk::AccessFlags2::TRANSFER_WRITE,
vk::ImageLayout::UNDEFINED,
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
None,
);
cmd.blit_images(
&texture,
util::Rect2D::new(0, 0, texture.width() as i32, texture.height() as i32),
&existing_texture,
util::Rect2D::new_from_size(
glam::ivec2(pos[0] as i32, pos[1] as i32),
glam::ivec2(texture.width() as i32, texture.height() as i32),
),
);
cmd.image_barrier(
existing_texture.handle(),
vk::ImageAspectFlags::COLOR,
vk::PipelineStageFlags2::TRANSFER,
vk::AccessFlags2::TRANSFER_WRITE,
vk::PipelineStageFlags2::FRAGMENT_SHADER,
vk::AccessFlags2::SHADER_SAMPLED_READ,
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL,
None,
);
} else {
cmd.image_barrier(
texture.handle(),
vk::ImageAspectFlags::COLOR,
vk::PipelineStageFlags2::TRANSFER,
vk::AccessFlags2::TRANSFER_WRITE,
vk::PipelineStageFlags2::FRAGMENT_SHADER,
vk::AccessFlags2::SHADER_SAMPLED_READ,
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL,
None,
);
self.texture_handler
.insert_image_with_id(id, texture.clone());
tracing::debug!("new texture for egui: {egui_id:?} -> {id:?}");
}
(staging, texture)
})
.collect::<Vec<_>>();
let draw_data = ctx.tessellate(output.shapes, output.pixels_per_point);
#[repr(C)]
#[derive(Debug, Clone, Copy, bytemuck::Pod, bytemuck::Zeroable)]
struct Vertex {
pos: glam::Vec2,
uv: glam::Vec2,
color: egui::epaint::Color32,
}
#[repr(transparent)]
#[derive(Debug, Clone, Copy)]
struct DrawCall(vk::DrawIndexedIndirectCommand);
unsafe impl bytemuck::Zeroable for DrawCall {}
unsafe impl bytemuck::Pod for DrawCall {}
let mut vertices = Vec::new();
let mut indices = Vec::new();
let mut draw_calls = Vec::new();
let mut textures = IndexMap::new();
let mut textures_indices = Vec::new();
for draw in draw_data {
let egui::epaint::Primitive::Mesh(mesh) = draw.primitive else {
continue;
};
draw_calls.push(DrawCall(vk::DrawIndexedIndirectCommand {
index_count: mesh.indices.len() as u32,
instance_count: 1,
first_index: indices.len() as u32,
vertex_offset: vertices.len() as i32,
first_instance: 0,
}));
vertices.extend(mesh.vertices.iter().map(|v| Vertex {
pos: glam::vec2(v.pos.x, v.pos.y),
uv: glam::vec2(v.uv.x, v.uv.y),
color: v.color,
}));
indices.extend(mesh.indices);
let texture = self
.egui_state
.textures
.get(&mesh.texture_id)
.cloned()
.unwrap();
if !textures.contains_key(&texture.id) {
textures.insert(texture.id, texture);
}
let idx = textures.get_index_of(&texture.id).unwrap();
textures_indices.push(idx as u32);
}
let num_draw_calls = draw_calls.len();
let device = self.vulkan.device.clone();
let (draw_staging, vertices, indices, draw_calls, texture_ids) = {
let vertices_size = vertices.len() * size_of::<Vertex>();
let indices_size = indices.len() * size_of::<u32>();
let draw_calls_size = draw_calls.len() * size_of::<vk::DrawIndexedIndirectCommand>();
let staging_size = vertices_size + indices_size + draw_calls_size;
let mut staging = buffers::Buffer::new(
device.clone(),
buffers::BufferDesc {
name: Some("egui-draw-staging".into()),
size: staging_size as u64,
usage: vk::BufferUsageFlags::TRANSFER_SRC,
mem_usage: vk_mem::MemoryUsage::AutoPreferHost,
alloc_flags: vk_mem::AllocationCreateFlags::MAPPED
| vk_mem::AllocationCreateFlags::HOST_ACCESS_SEQUENTIAL_WRITE
| vk_mem::AllocationCreateFlags::STRATEGY_FIRST_FIT,
..Default::default()
},
)?;
{
let mut map = staging.map()?;
let (st_vertices, rest) = map.split_at_mut(vertices_size);
let (st_indices, st_drawcalls) = rest.split_at_mut(indices_size);
st_vertices.copy_from_slice(bytemuck::cast_slice(&vertices));
st_indices.copy_from_slice(bytemuck::cast_slice(&indices));
st_drawcalls.copy_from_slice(bytemuck::cast_slice(&draw_calls));
}
let vertices = buffers::Buffer::new(
device.clone(),
buffers::BufferDesc {
name: Some("egui-draw-vertices".into()),
size: vertices_size as u64,
usage: vk::BufferUsageFlags::TRANSFER_DST | vk::BufferUsageFlags::VERTEX_BUFFER,
mem_usage: vk_mem::MemoryUsage::AutoPreferDevice,
..Default::default()
},
)?;
let indices = buffers::Buffer::new(
device.clone(),
buffers::BufferDesc {
name: Some("egui-draw-indices".into()),
size: indices_size as u64,
usage: vk::BufferUsageFlags::TRANSFER_DST | vk::BufferUsageFlags::INDEX_BUFFER,
mem_usage: vk_mem::MemoryUsage::AutoPreferDevice,
..Default::default()
},
)?;
let draw_calls = buffers::Buffer::new(
device.clone(),
buffers::BufferDesc {
name: Some("egui-draw-draw_calls".into()),
size: draw_calls_size as u64,
usage: vk::BufferUsageFlags::TRANSFER_DST
| vk::BufferUsageFlags::INDIRECT_BUFFER,
mem_usage: vk_mem::MemoryUsage::AutoPreferDevice,
..Default::default()
},
)?;
cmd.copy_buffers(
staging.buffer(),
vertices.buffer(),
&[vk::BufferCopy {
src_offset: 0,
dst_offset: 0,
size: vertices_size as u64,
}],
);
cmd.copy_buffers(
staging.buffer(),
indices.buffer(),
&[vk::BufferCopy {
src_offset: vertices_size as u64,
dst_offset: 0,
size: indices_size as u64,
}],
);
cmd.copy_buffers(
staging.buffer(),
draw_calls.buffer(),
&[vk::BufferCopy {
src_offset: (vertices_size + indices_size) as u64,
dst_offset: 0,
size: draw_calls_size as u64,
}],
);
let mut texture_ids = buffers::Buffer::new(
device.clone(),
buffers::BufferDesc {
name: Some("egui-draw-texture_ids".into()),
size: (textures_indices.len() * size_of::<u32>()) as u64,
usage: vk::BufferUsageFlags::STORAGE_BUFFER,
mem_usage: vk_mem::MemoryUsage::AutoPreferDevice,
alloc_flags: vk_mem::AllocationCreateFlags::HOST_ACCESS_SEQUENTIAL_WRITE,
..Default::default()
},
)?;
{
let mut map = texture_ids.map()?;
map.copy_from_slice(bytemuck::cast_slice(&textures_indices));
}
(staging, vertices, indices, draw_calls, texture_ids)
};
let descriptor_infos = textures
.values()
.map(|entry| {
let texture = self.texture_handler.get_texture(entry.id).unwrap();
let info = vk::DescriptorImageInfo {
sampler: self
.vulkan
.samplers
.get_sampler(entry.into_sampler_desc())
.unwrap(),
image_view: texture
.get_view(images::ImageViewDesc {
kind: vk::ImageViewType::TYPE_2D,
format: texture.format(),
aspect: vk::ImageAspectFlags::COLOR,
mip_range: (0..1).into(),
layer_range: (0..1).into(),
..Default::default()
})
.unwrap(),
image_layout: vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL,
};
info
})
.collect::<Vec<_>>();
let uniform_info = vk::DescriptorBufferInfo {
buffer: texture_ids.buffer(),
offset: 0,
range: texture_ids.len(),
};
let descriptor_writes = descriptor_infos
.iter()
.enumerate()
.map(|(i, info)| {
vk::WriteDescriptorSet::default()
.image_info(core::slice::from_ref(info))
.descriptor_count(1)
.descriptor_type(vk::DescriptorType::COMBINED_IMAGE_SAMPLER)
.dst_set(self.egui_state.descriptor_set)
.dst_binding(EguiState::TEXTURE_BINDING)
.dst_array_element(i as u32)
})
.chain(core::iter::once({
vk::WriteDescriptorSet::default()
.buffer_info(core::slice::from_ref(&uniform_info))
.descriptor_count(1)
.dst_binding(EguiState::UNIFORM_BINDING)
.descriptor_type(vk::DescriptorType::STORAGE_BUFFER)
.dst_array_element(0)
.dst_set(self.egui_state.descriptor_set)
}))
.collect::<Vec<_>>();
unsafe {
device.dev().update_descriptor_sets(&descriptor_writes, &[]);
}
let to_remove_tex_ids = output
.textures_delta
.free
.iter()
.filter_map(|id| self.egui_state.textures.get(id).cloned())
.map(|entry| entry.id)
.collect::<Vec<_>>();
self.egui_state.render_state = Some(EguiRenderState {
vertices,
indices,
draw_calls,
num_draw_calls,
texture_ids,
textures_to_free: to_remove_tex_ids,
});
let fence = Arc::new(sync::Fence::create(device.clone()).unwrap());
let future = cmd.submit_async(None, None, fence).unwrap();
future.block()?;
black_box((cmd_objects, draw_staging));
// free after drawing
Ok(())
}
pub fn debug_draw<K, F: FnOnce()>(&mut self, window: &K, pre_present_cb: F) -> Result<()>
where
K: core::hash::Hash + Eq,
W: core::hash::Hash + Eq + Borrow<K>,
{
let dev = self.vulkan.device.clone();
let pool = commands::SingleUseCommandPool::new(dev.clone(), dev.graphics_queue().clone())?;
if let Some(ctx) = self.window_contexts.get(window) {
let (frame, suboptimal) =
smol::block_on(ctx.current_swapchain.read().clone().acquire_image())?;
if suboptimal {
tracing::warn!(
"swapchain ({:?}) is suboptimal!",
ctx.current_swapchain.read().swapchain
);
}
let [r, g, b] = rand::prelude::StdRng::seed_from_u64(ctx.surface.surface.as_raw())
.gen::<[f32; 3]>();
let clear_color = Rgba([r, g, b, 1.0]);
let egui_ctx = self.egui_state.render_state.take();
let cmd = util::timed("record command buffer", || {
let cmd = pool.alloc()?;
cmd.image_barrier(
frame.image.handle(),
vk::ImageAspectFlags::COLOR,
vk::PipelineStageFlags2::TRANSFER,
vk::AccessFlags2::empty(),
vk::PipelineStageFlags2::TRANSFER,
vk::AccessFlags2::TRANSFER_WRITE,
vk::ImageLayout::UNDEFINED,
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
None,
);
cmd.clear_color_image(
frame.image.handle(),
frame.format,
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
clear_color,
&[images::SUBRESOURCERANGE_COLOR_ALL],
);
cmd.image_barrier(
frame.image.handle(),
vk::ImageAspectFlags::COLOR,
vk::PipelineStageFlags2::TRANSFER,
vk::AccessFlags2::TRANSFER_WRITE,
vk::PipelineStageFlags2::FRAGMENT_SHADER,
vk::AccessFlags2::SHADER_WRITE,
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL,
None,
);
if let Some(ctx) = egui_ctx.as_ref() {
_ = &ctx.texture_ids;
let color_attachment = &vk::RenderingAttachmentInfo::default()
.image_layout(vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL)
.image_view(frame.view)
.load_op(vk::AttachmentLoadOp::LOAD)
.store_op(vk::AttachmentStoreOp::STORE);
cmd.begin_rendering(
vk::RenderingInfo::default()
.color_attachments(core::slice::from_ref(color_attachment))
.layer_count(1)
.render_area(vk::Rect2D::default().extent(frame.swapchain.extent)),
);
cmd.set_scissors(&[vk::Rect2D::default()
.offset(vk::Offset2D::default())
.extent(frame.swapchain.extent)]);
cmd.set_viewport(&[vk::Viewport::default()
.x(0.0)
.y(0.0)
.min_depth(0.0)
.max_depth(1.0)
.width(frame.swapchain.extent.width as f32)
.height(frame.swapchain.extent.height as f32)]);
cmd.bind_pipeline(&self.egui_state.pipeline);
cmd.bind_indices(ctx.indices.buffer(), 0, vk::IndexType::UINT32);
cmd.bind_vertices(ctx.vertices.buffer(), 0);
cmd.push_constants(
&self.egui_state.pipeline_layout,
vk::ShaderStageFlags::VERTEX,
0,
bytemuck::cast_slice(
&[
frame.swapchain.extent.width as f32,
frame.swapchain.extent.height as f32,
]
.map(|f| f.to_bits()),
),
);
cmd.bind_descriptor_sets(
&self.egui_state.pipeline_layout,
vk::PipelineBindPoint::GRAPHICS,
&[self.egui_state.descriptor_set],
);
cmd.draw_indexed_indirect(
ctx.draw_calls.buffer(),
0,
ctx.num_draw_calls as u32,
size_of::<vk::DrawIndexedIndirectCommand>() as u32,
);
cmd.end_rendering();
}
cmd.image_barrier(
frame.image.handle(),
vk::ImageAspectFlags::COLOR,
vk::PipelineStageFlags2::FRAGMENT_SHADER,
vk::AccessFlags2::SHADER_WRITE,
vk::PipelineStageFlags2::BOTTOM_OF_PIPE,
vk::AccessFlags2::empty(),
vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL,
vk::ImageLayout::PRESENT_SRC_KHR,
None,
);
Result::Ok(cmd)
})?;
let future = cmd.submit_async(
Some((frame.acquire, vk::PipelineStageFlags::TRANSFER)),
Some(frame.release),
Arc::new(sync::Fence::create(dev.clone())?),
)?;
// call pre_present_notify
pre_present_cb();
let wait = Some(frame.release);
frame.present(wait)?;
future.block()?;
egui_ctx.map(|ctx| {
for id in ctx.textures_to_free {
self.texture_handler.remove_texture(id);
}
});
}
Ok(())
}
pub fn new_window_context(
&mut self,
extent: vk::Extent2D,
window_id: W,
window: raw_window_handle::WindowHandle,
) -> Result<()>
where
W: core::hash::Hash + Eq,
{
use std::collections::hash_map::Entry;
match self.window_contexts.entry(window_id) {
Entry::Vacant(entry) => {
let ctx = WindowContext::new(
self.vulkan.instance.clone(),
self.vulkan.device.clone(),
extent,
window.as_raw(),
self.display,
)?;
entry.insert(ctx);
}
_ => {}
}
Ok(())
}
}
mod debug {
use ash::vk;
use tracing::{event, Level};
unsafe fn str_from_raw_parts<'a>(str: *const i8) -> std::borrow::Cow<'a, str> {
use std::{borrow::Cow, ffi};
if str.is_null() {
Cow::from("")
} else {
ffi::CStr::from_ptr(str).to_string_lossy()
}
}
pub(super) unsafe extern "system" fn debug_callback(
message_severity: vk::DebugUtilsMessageSeverityFlagsEXT,
message_type: vk::DebugUtilsMessageTypeFlagsEXT,
callback_data: *const vk::DebugUtilsMessengerCallbackDataEXT<'_>,
user_data: *mut core::ffi::c_void,
) -> vk::Bool32 {
_ = user_data;
let callback_data = *callback_data;
let message_id_number = callback_data.message_id_number;
let message_id_name = str_from_raw_parts(callback_data.p_message_id_name);
let message = str_from_raw_parts(callback_data.p_message);
match message_severity {
vk::DebugUtilsMessageSeverityFlagsEXT::ERROR => {
event!(target: "VK::DebugUtils", Level::ERROR, "{message_type:?} [{message_id_name}({message_id_number})]: {message}");
}
vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE => {
event!(target: "VK::DebugUtils", Level::TRACE, "{message_type:?} [{message_id_name}({message_id_number})]: {message}");
}
vk::DebugUtilsMessageSeverityFlagsEXT::INFO => {
event!(target: "VK::DebugUtils", Level::INFO, "{message_type:?} [{message_id_name}({message_id_number})]: {message}");
}
vk::DebugUtilsMessageSeverityFlagsEXT::WARNING => {
event!(target: "VK::DebugUtils", Level::WARN, "{message_type:?} [{message_id_name}({message_id_number})]: {message}");
}
_ => unreachable!(),
}
vk::FALSE
}
}
pub mod utils {
#![allow(dead_code)]
use ash::vk;
pub trait SplitCollect: Iterator {
fn collect2<F>(self, mut pred: F) -> (Vec<Self::Item>, Vec<Self::Item>)
where
Self: Sized,
F: FnMut(&Self::Item) -> bool,
{
let mut left = Vec::new();
let mut right = Vec::new();
for item in self {
if pred(&item) {
left.push(item);
} else {
right.push(item);
}
}
(left, right)
}
}
pub fn eq_device_features10(
lhs: &vk::PhysicalDeviceFeatures,
rhs: &vk::PhysicalDeviceFeatures,
) -> bool {
lhs.robust_buffer_access == rhs.robust_buffer_access
&& lhs.full_draw_index_uint32 == rhs.full_draw_index_uint32
&& lhs.image_cube_array == rhs.image_cube_array
&& lhs.independent_blend == rhs.independent_blend
&& lhs.geometry_shader == rhs.geometry_shader
&& lhs.tessellation_shader == rhs.tessellation_shader
&& lhs.sample_rate_shading == rhs.sample_rate_shading
&& lhs.dual_src_blend == rhs.dual_src_blend
&& lhs.logic_op == rhs.logic_op
&& lhs.multi_draw_indirect == rhs.multi_draw_indirect
&& lhs.draw_indirect_first_instance == rhs.draw_indirect_first_instance
&& lhs.depth_clamp == rhs.depth_clamp
&& lhs.depth_bias_clamp == rhs.depth_bias_clamp
&& lhs.fill_mode_non_solid == rhs.fill_mode_non_solid
&& lhs.depth_bounds == rhs.depth_bounds
&& lhs.wide_lines == rhs.wide_lines
&& lhs.large_points == rhs.large_points
&& lhs.alpha_to_one == rhs.alpha_to_one
&& lhs.multi_viewport == rhs.multi_viewport
&& lhs.sampler_anisotropy == rhs.sampler_anisotropy
&& lhs.texture_compression_etc2 == rhs.texture_compression_etc2
&& lhs.texture_compression_astc_ldr == rhs.texture_compression_astc_ldr
&& lhs.texture_compression_bc == rhs.texture_compression_bc
&& lhs.occlusion_query_precise == rhs.occlusion_query_precise
&& lhs.pipeline_statistics_query == rhs.pipeline_statistics_query
&& lhs.vertex_pipeline_stores_and_atomics == rhs.vertex_pipeline_stores_and_atomics
&& lhs.fragment_stores_and_atomics == rhs.fragment_stores_and_atomics
&& lhs.shader_tessellation_and_geometry_point_size
== rhs.shader_tessellation_and_geometry_point_size
&& lhs.shader_image_gather_extended == rhs.shader_image_gather_extended
&& lhs.shader_storage_image_extended_formats
== rhs.shader_storage_image_extended_formats
&& lhs.shader_storage_image_multisample == rhs.shader_storage_image_multisample
&& lhs.shader_storage_image_read_without_format
== rhs.shader_storage_image_read_without_format
&& lhs.shader_storage_image_write_without_format
== rhs.shader_storage_image_write_without_format
&& lhs.shader_uniform_buffer_array_dynamic_indexing
== rhs.shader_uniform_buffer_array_dynamic_indexing
&& lhs.shader_sampled_image_array_dynamic_indexing
== rhs.shader_sampled_image_array_dynamic_indexing
&& lhs.shader_storage_buffer_array_dynamic_indexing
== rhs.shader_storage_buffer_array_dynamic_indexing
&& lhs.shader_storage_image_array_dynamic_indexing
== rhs.shader_storage_image_array_dynamic_indexing
&& lhs.shader_clip_distance == rhs.shader_clip_distance
&& lhs.shader_cull_distance == rhs.shader_cull_distance
&& lhs.shader_float64 == rhs.shader_float64
&& lhs.shader_int64 == rhs.shader_int64
&& lhs.shader_int16 == rhs.shader_int16
&& lhs.shader_resource_residency == rhs.shader_resource_residency
&& lhs.shader_resource_min_lod == rhs.shader_resource_min_lod
&& lhs.sparse_binding == rhs.sparse_binding
&& lhs.sparse_residency_buffer == rhs.sparse_residency_buffer
&& lhs.sparse_residency_image2_d == rhs.sparse_residency_image2_d
&& lhs.sparse_residency_image3_d == rhs.sparse_residency_image3_d
&& lhs.sparse_residency2_samples == rhs.sparse_residency2_samples
&& lhs.sparse_residency4_samples == rhs.sparse_residency4_samples
&& lhs.sparse_residency8_samples == rhs.sparse_residency8_samples
&& lhs.sparse_residency16_samples == rhs.sparse_residency16_samples
&& lhs.sparse_residency_aliased == rhs.sparse_residency_aliased
&& lhs.variable_multisample_rate == rhs.variable_multisample_rate
&& lhs.inherited_queries == rhs.inherited_queries
}
pub fn eq_device_features11(
lhs: &vk::PhysicalDeviceVulkan11Features,
rhs: &vk::PhysicalDeviceVulkan11Features,
) -> bool {
lhs.storage_buffer16_bit_access == rhs.storage_buffer16_bit_access
&& lhs.uniform_and_storage_buffer16_bit_access
== rhs.uniform_and_storage_buffer16_bit_access
&& lhs.storage_push_constant16 == rhs.storage_push_constant16
&& lhs.storage_input_output16 == rhs.storage_input_output16
&& lhs.multiview == rhs.multiview
&& lhs.multiview_geometry_shader == rhs.multiview_geometry_shader
&& lhs.multiview_tessellation_shader == rhs.multiview_tessellation_shader
&& lhs.variable_pointers_storage_buffer == rhs.variable_pointers_storage_buffer
&& lhs.variable_pointers == rhs.variable_pointers
&& lhs.protected_memory == rhs.protected_memory
&& lhs.sampler_ycbcr_conversion == rhs.sampler_ycbcr_conversion
&& lhs.shader_draw_parameters == rhs.shader_draw_parameters
}
pub fn eq_device_features12(
lhs: &vk::PhysicalDeviceVulkan12Features,
rhs: &vk::PhysicalDeviceVulkan12Features,
) -> bool {
lhs.sampler_mirror_clamp_to_edge == rhs.sampler_mirror_clamp_to_edge
&& lhs.draw_indirect_count == rhs.draw_indirect_count
&& lhs.storage_buffer8_bit_access == rhs.storage_buffer8_bit_access
&& lhs.uniform_and_storage_buffer8_bit_access
== rhs.uniform_and_storage_buffer8_bit_access
&& lhs.storage_push_constant8 == rhs.storage_push_constant8
&& lhs.shader_buffer_int64_atomics == rhs.shader_buffer_int64_atomics
&& lhs.shader_shared_int64_atomics == rhs.shader_shared_int64_atomics
&& lhs.shader_float16 == rhs.shader_float16
&& lhs.shader_int8 == rhs.shader_int8
&& lhs.descriptor_indexing == rhs.descriptor_indexing
&& lhs.shader_input_attachment_array_dynamic_indexing
== rhs.shader_input_attachment_array_dynamic_indexing
&& lhs.shader_uniform_texel_buffer_array_dynamic_indexing
== rhs.shader_uniform_texel_buffer_array_dynamic_indexing
&& lhs.shader_storage_texel_buffer_array_dynamic_indexing
== rhs.shader_storage_texel_buffer_array_dynamic_indexing
&& lhs.shader_uniform_buffer_array_non_uniform_indexing
== rhs.shader_uniform_buffer_array_non_uniform_indexing
&& lhs.shader_sampled_image_array_non_uniform_indexing
== rhs.shader_sampled_image_array_non_uniform_indexing
&& lhs.shader_storage_buffer_array_non_uniform_indexing
== rhs.shader_storage_buffer_array_non_uniform_indexing
&& lhs.shader_storage_image_array_non_uniform_indexing
== rhs.shader_storage_image_array_non_uniform_indexing
&& lhs.shader_input_attachment_array_non_uniform_indexing
== rhs.shader_input_attachment_array_non_uniform_indexing
&& lhs.shader_uniform_texel_buffer_array_non_uniform_indexing
== rhs.shader_uniform_texel_buffer_array_non_uniform_indexing
&& lhs.shader_storage_texel_buffer_array_non_uniform_indexing
== rhs.shader_storage_texel_buffer_array_non_uniform_indexing
&& lhs.descriptor_binding_uniform_buffer_update_after_bind
== rhs.descriptor_binding_uniform_buffer_update_after_bind
&& lhs.descriptor_binding_sampled_image_update_after_bind
== rhs.descriptor_binding_sampled_image_update_after_bind
&& lhs.descriptor_binding_storage_image_update_after_bind
== rhs.descriptor_binding_storage_image_update_after_bind
&& lhs.descriptor_binding_storage_buffer_update_after_bind
== rhs.descriptor_binding_storage_buffer_update_after_bind
&& lhs.descriptor_binding_uniform_texel_buffer_update_after_bind
== rhs.descriptor_binding_uniform_texel_buffer_update_after_bind
&& lhs.descriptor_binding_storage_texel_buffer_update_after_bind
== rhs.descriptor_binding_storage_texel_buffer_update_after_bind
&& lhs.descriptor_binding_update_unused_while_pending
== rhs.descriptor_binding_update_unused_while_pending
&& lhs.descriptor_binding_partially_bound == rhs.descriptor_binding_partially_bound
&& lhs.descriptor_binding_variable_descriptor_count
== rhs.descriptor_binding_variable_descriptor_count
&& lhs.runtime_descriptor_array == rhs.runtime_descriptor_array
&& lhs.sampler_filter_minmax == rhs.sampler_filter_minmax
&& lhs.scalar_block_layout == rhs.scalar_block_layout
&& lhs.imageless_framebuffer == rhs.imageless_framebuffer
&& lhs.uniform_buffer_standard_layout == rhs.uniform_buffer_standard_layout
&& lhs.shader_subgroup_extended_types == rhs.shader_subgroup_extended_types
&& lhs.separate_depth_stencil_layouts == rhs.separate_depth_stencil_layouts
&& lhs.host_query_reset == rhs.host_query_reset
&& lhs.timeline_semaphore == rhs.timeline_semaphore
&& lhs.buffer_device_address == rhs.buffer_device_address
&& lhs.buffer_device_address_capture_replay == rhs.buffer_device_address_capture_replay
&& lhs.buffer_device_address_multi_device == rhs.buffer_device_address_multi_device
&& lhs.vulkan_memory_model == rhs.vulkan_memory_model
&& lhs.vulkan_memory_model_device_scope == rhs.vulkan_memory_model_device_scope
&& lhs.vulkan_memory_model_availability_visibility_chains
== rhs.vulkan_memory_model_availability_visibility_chains
&& lhs.shader_output_viewport_index == rhs.shader_output_viewport_index
&& lhs.shader_output_layer == rhs.shader_output_layer
&& lhs.subgroup_broadcast_dynamic_id == rhs.subgroup_broadcast_dynamic_id
}
pub fn eq_device_features13(
lhs: &vk::PhysicalDeviceVulkan13Features,
rhs: &vk::PhysicalDeviceVulkan13Features,
) -> bool {
lhs.robust_image_access == rhs.robust_image_access
&& lhs.inline_uniform_block == rhs.inline_uniform_block
&& lhs.descriptor_binding_inline_uniform_block_update_after_bind
== rhs.descriptor_binding_inline_uniform_block_update_after_bind
&& lhs.pipeline_creation_cache_control == rhs.pipeline_creation_cache_control
&& lhs.private_data == rhs.private_data
&& lhs.shader_demote_to_helper_invocation == rhs.shader_demote_to_helper_invocation
&& lhs.shader_terminate_invocation == rhs.shader_terminate_invocation
&& lhs.subgroup_size_control == rhs.subgroup_size_control
&& lhs.compute_full_subgroups == rhs.compute_full_subgroups
&& lhs.synchronization2 == rhs.synchronization2
&& lhs.texture_compression_astc_hdr == rhs.texture_compression_astc_hdr
&& lhs.shader_zero_initialize_workgroup_memory
== rhs.shader_zero_initialize_workgroup_memory
&& lhs.dynamic_rendering == rhs.dynamic_rendering
&& lhs.shader_integer_dot_product == rhs.shader_integer_dot_product
&& lhs.maintenance4 == rhs.maintenance4
}
pub fn bitand_device_features10(
lhs: &vk::PhysicalDeviceFeatures,
rhs: &vk::PhysicalDeviceFeatures,
) -> vk::PhysicalDeviceFeatures {
use core::ops::BitAnd;
vk::PhysicalDeviceFeatures {
robust_buffer_access: lhs.robust_buffer_access.bitand(&rhs.robust_buffer_access),
full_draw_index_uint32: lhs
.full_draw_index_uint32
.bitand(&rhs.full_draw_index_uint32),
image_cube_array: lhs.image_cube_array.bitand(&rhs.image_cube_array),
independent_blend: lhs.independent_blend.bitand(&rhs.independent_blend),
geometry_shader: lhs.geometry_shader.bitand(&rhs.geometry_shader),
tessellation_shader: lhs.tessellation_shader.bitand(&rhs.tessellation_shader),
sample_rate_shading: lhs.sample_rate_shading.bitand(&rhs.sample_rate_shading),
dual_src_blend: lhs.dual_src_blend.bitand(&rhs.dual_src_blend),
logic_op: lhs.logic_op.bitand(&rhs.logic_op),
multi_draw_indirect: lhs.multi_draw_indirect.bitand(&rhs.multi_draw_indirect),
draw_indirect_first_instance: lhs
.draw_indirect_first_instance
.bitand(&rhs.draw_indirect_first_instance),
depth_clamp: lhs.depth_clamp.bitand(&rhs.depth_clamp),
depth_bias_clamp: lhs.depth_bias_clamp.bitand(&rhs.depth_bias_clamp),
fill_mode_non_solid: lhs.fill_mode_non_solid.bitand(&rhs.fill_mode_non_solid),
depth_bounds: lhs.depth_bounds.bitand(&rhs.depth_bounds),
wide_lines: lhs.wide_lines.bitand(&rhs.wide_lines),
large_points: lhs.large_points.bitand(&rhs.large_points),
alpha_to_one: lhs.alpha_to_one.bitand(&rhs.alpha_to_one),
multi_viewport: lhs.multi_viewport.bitand(&rhs.multi_viewport),
sampler_anisotropy: lhs.sampler_anisotropy.bitand(&rhs.sampler_anisotropy),
texture_compression_etc2: lhs
.texture_compression_etc2
.bitand(&rhs.texture_compression_etc2),
texture_compression_astc_ldr: lhs
.texture_compression_astc_ldr
.bitand(&rhs.texture_compression_astc_ldr),
texture_compression_bc: lhs
.texture_compression_bc
.bitand(&rhs.texture_compression_bc),
occlusion_query_precise: lhs
.occlusion_query_precise
.bitand(&rhs.occlusion_query_precise),
pipeline_statistics_query: lhs
.pipeline_statistics_query
.bitand(&rhs.pipeline_statistics_query),
vertex_pipeline_stores_and_atomics: lhs
.vertex_pipeline_stores_and_atomics
.bitand(&rhs.vertex_pipeline_stores_and_atomics),
fragment_stores_and_atomics: lhs
.fragment_stores_and_atomics
.bitand(&rhs.fragment_stores_and_atomics),
shader_tessellation_and_geometry_point_size: lhs
.shader_tessellation_and_geometry_point_size
.bitand(&rhs.shader_tessellation_and_geometry_point_size),
shader_image_gather_extended: lhs
.shader_image_gather_extended
.bitand(&rhs.shader_image_gather_extended),
shader_storage_image_extended_formats: lhs
.shader_storage_image_extended_formats
.bitand(&rhs.shader_storage_image_extended_formats),
shader_storage_image_multisample: lhs
.shader_storage_image_multisample
.bitand(&rhs.shader_storage_image_multisample),
shader_storage_image_read_without_format: lhs
.shader_storage_image_read_without_format
.bitand(&rhs.shader_storage_image_read_without_format),
shader_storage_image_write_without_format: lhs
.shader_storage_image_write_without_format
.bitand(&rhs.shader_storage_image_write_without_format),
shader_uniform_buffer_array_dynamic_indexing: lhs
.shader_uniform_buffer_array_dynamic_indexing
.bitand(&rhs.shader_uniform_buffer_array_dynamic_indexing),
shader_sampled_image_array_dynamic_indexing: lhs
.shader_sampled_image_array_dynamic_indexing
.bitand(&rhs.shader_sampled_image_array_dynamic_indexing),
shader_storage_buffer_array_dynamic_indexing: lhs
.shader_storage_buffer_array_dynamic_indexing
.bitand(&rhs.shader_storage_buffer_array_dynamic_indexing),
shader_storage_image_array_dynamic_indexing: lhs
.shader_storage_image_array_dynamic_indexing
.bitand(&rhs.shader_storage_image_array_dynamic_indexing),
shader_clip_distance: lhs.shader_clip_distance.bitand(&rhs.shader_clip_distance),
shader_cull_distance: lhs.shader_cull_distance.bitand(&rhs.shader_cull_distance),
shader_float64: lhs.shader_float64.bitand(&rhs.shader_float64),
shader_int64: lhs.shader_int64.bitand(&rhs.shader_int64),
shader_int16: lhs.shader_int16.bitand(&rhs.shader_int16),
shader_resource_residency: lhs
.shader_resource_residency
.bitand(&rhs.shader_resource_residency),
shader_resource_min_lod: lhs
.shader_resource_min_lod
.bitand(&rhs.shader_resource_min_lod),
sparse_binding: lhs.sparse_binding.bitand(&rhs.sparse_binding),
sparse_residency_buffer: lhs
.sparse_residency_buffer
.bitand(&rhs.sparse_residency_buffer),
sparse_residency_image2_d: lhs
.sparse_residency_image2_d
.bitand(&rhs.sparse_residency_image2_d),
sparse_residency_image3_d: lhs
.sparse_residency_image3_d
.bitand(&rhs.sparse_residency_image3_d),
sparse_residency2_samples: lhs
.sparse_residency2_samples
.bitand(&rhs.sparse_residency2_samples),
sparse_residency4_samples: lhs
.sparse_residency4_samples
.bitand(&rhs.sparse_residency4_samples),
sparse_residency8_samples: lhs
.sparse_residency8_samples
.bitand(&rhs.sparse_residency8_samples),
sparse_residency16_samples: lhs
.sparse_residency16_samples
.bitand(&rhs.sparse_residency16_samples),
sparse_residency_aliased: lhs
.sparse_residency_aliased
.bitand(&rhs.sparse_residency_aliased),
variable_multisample_rate: lhs
.variable_multisample_rate
.bitand(&rhs.variable_multisample_rate),
inherited_queries: lhs.inherited_queries.bitand(&rhs.inherited_queries),
}
}
pub fn bitand_device_features11(
lhs: &vk::PhysicalDeviceVulkan11Features,
rhs: &vk::PhysicalDeviceVulkan11Features,
) -> vk::PhysicalDeviceVulkan11Features<'static> {
use core::ops::BitAnd;
vk::PhysicalDeviceVulkan11Features {
storage_buffer16_bit_access: lhs
.storage_buffer16_bit_access
.bitand(&rhs.storage_buffer16_bit_access),
uniform_and_storage_buffer16_bit_access: lhs
.uniform_and_storage_buffer16_bit_access
.bitand(&rhs.uniform_and_storage_buffer16_bit_access),
storage_push_constant16: lhs
.storage_push_constant16
.bitand(&rhs.storage_push_constant16),
storage_input_output16: lhs
.storage_input_output16
.bitand(&rhs.storage_input_output16),
multiview: lhs.multiview.bitand(&rhs.multiview),
multiview_geometry_shader: lhs
.multiview_geometry_shader
.bitand(&rhs.multiview_geometry_shader),
multiview_tessellation_shader: lhs
.multiview_tessellation_shader
.bitand(&rhs.multiview_tessellation_shader),
variable_pointers_storage_buffer: lhs
.variable_pointers_storage_buffer
.bitand(&rhs.variable_pointers_storage_buffer),
variable_pointers: lhs.variable_pointers.bitand(&rhs.variable_pointers),
protected_memory: lhs.protected_memory.bitand(&rhs.protected_memory),
sampler_ycbcr_conversion: lhs
.sampler_ycbcr_conversion
.bitand(&rhs.sampler_ycbcr_conversion),
shader_draw_parameters: lhs
.shader_draw_parameters
.bitand(&rhs.shader_draw_parameters),
..Default::default()
}
}
pub fn bitand_device_features12(
lhs: &vk::PhysicalDeviceVulkan12Features,
rhs: &vk::PhysicalDeviceVulkan12Features,
) -> vk::PhysicalDeviceVulkan12Features<'static> {
use core::ops::BitAnd;
vk::PhysicalDeviceVulkan12Features {
sampler_mirror_clamp_to_edge: lhs
.sampler_mirror_clamp_to_edge
.bitand(&rhs.sampler_mirror_clamp_to_edge),
draw_indirect_count: lhs.draw_indirect_count.bitand(&rhs.draw_indirect_count),
storage_buffer8_bit_access: lhs
.storage_buffer8_bit_access
.bitand(&rhs.storage_buffer8_bit_access),
uniform_and_storage_buffer8_bit_access: lhs
.uniform_and_storage_buffer8_bit_access
.bitand(&rhs.uniform_and_storage_buffer8_bit_access),
storage_push_constant8: lhs
.storage_push_constant8
.bitand(&rhs.storage_push_constant8),
shader_buffer_int64_atomics: lhs
.shader_buffer_int64_atomics
.bitand(&rhs.shader_buffer_int64_atomics),
shader_shared_int64_atomics: lhs
.shader_shared_int64_atomics
.bitand(&rhs.shader_shared_int64_atomics),
shader_float16: lhs.shader_float16.bitand(&rhs.shader_float16),
shader_int8: lhs.shader_int8.bitand(&rhs.shader_int8),
descriptor_indexing: lhs.descriptor_indexing.bitand(&rhs.descriptor_indexing),
shader_input_attachment_array_dynamic_indexing: lhs
.shader_input_attachment_array_dynamic_indexing
.bitand(&rhs.shader_input_attachment_array_dynamic_indexing),
shader_uniform_texel_buffer_array_dynamic_indexing: lhs
.shader_uniform_texel_buffer_array_dynamic_indexing
.bitand(&rhs.shader_uniform_texel_buffer_array_dynamic_indexing),
shader_storage_texel_buffer_array_dynamic_indexing: lhs
.shader_storage_texel_buffer_array_dynamic_indexing
.bitand(&rhs.shader_storage_texel_buffer_array_dynamic_indexing),
shader_uniform_buffer_array_non_uniform_indexing: lhs
.shader_uniform_buffer_array_non_uniform_indexing
.bitand(&rhs.shader_uniform_buffer_array_non_uniform_indexing),
shader_sampled_image_array_non_uniform_indexing: lhs
.shader_sampled_image_array_non_uniform_indexing
.bitand(&rhs.shader_sampled_image_array_non_uniform_indexing),
shader_storage_buffer_array_non_uniform_indexing: lhs
.shader_storage_buffer_array_non_uniform_indexing
.bitand(&rhs.shader_storage_buffer_array_non_uniform_indexing),
shader_storage_image_array_non_uniform_indexing: lhs
.shader_storage_image_array_non_uniform_indexing
.bitand(&rhs.shader_storage_image_array_non_uniform_indexing),
shader_input_attachment_array_non_uniform_indexing: lhs
.shader_input_attachment_array_non_uniform_indexing
.bitand(&rhs.shader_input_attachment_array_non_uniform_indexing),
shader_uniform_texel_buffer_array_non_uniform_indexing: lhs
.shader_uniform_texel_buffer_array_non_uniform_indexing
.bitand(&rhs.shader_uniform_texel_buffer_array_non_uniform_indexing),
shader_storage_texel_buffer_array_non_uniform_indexing: lhs
.shader_storage_texel_buffer_array_non_uniform_indexing
.bitand(&rhs.shader_storage_texel_buffer_array_non_uniform_indexing),
descriptor_binding_uniform_buffer_update_after_bind: lhs
.descriptor_binding_uniform_buffer_update_after_bind
.bitand(&rhs.descriptor_binding_uniform_buffer_update_after_bind),
descriptor_binding_sampled_image_update_after_bind: lhs
.descriptor_binding_sampled_image_update_after_bind
.bitand(&rhs.descriptor_binding_sampled_image_update_after_bind),
descriptor_binding_storage_image_update_after_bind: lhs
.descriptor_binding_storage_image_update_after_bind
.bitand(&rhs.descriptor_binding_storage_image_update_after_bind),
descriptor_binding_storage_buffer_update_after_bind: lhs
.descriptor_binding_storage_buffer_update_after_bind
.bitand(&rhs.descriptor_binding_storage_buffer_update_after_bind),
descriptor_binding_uniform_texel_buffer_update_after_bind: lhs
.descriptor_binding_uniform_texel_buffer_update_after_bind
.bitand(&rhs.descriptor_binding_uniform_texel_buffer_update_after_bind),
descriptor_binding_storage_texel_buffer_update_after_bind: lhs
.descriptor_binding_storage_texel_buffer_update_after_bind
.bitand(&rhs.descriptor_binding_storage_texel_buffer_update_after_bind),
descriptor_binding_update_unused_while_pending: lhs
.descriptor_binding_update_unused_while_pending
.bitand(&rhs.descriptor_binding_update_unused_while_pending),
descriptor_binding_partially_bound: lhs
.descriptor_binding_partially_bound
.bitand(&rhs.descriptor_binding_partially_bound),
descriptor_binding_variable_descriptor_count: lhs
.descriptor_binding_variable_descriptor_count
.bitand(&rhs.descriptor_binding_variable_descriptor_count),
runtime_descriptor_array: lhs
.runtime_descriptor_array
.bitand(&rhs.runtime_descriptor_array),
sampler_filter_minmax: lhs.sampler_filter_minmax.bitand(&rhs.sampler_filter_minmax),
scalar_block_layout: lhs.scalar_block_layout.bitand(&rhs.scalar_block_layout),
imageless_framebuffer: lhs.imageless_framebuffer.bitand(&rhs.imageless_framebuffer),
uniform_buffer_standard_layout: lhs
.uniform_buffer_standard_layout
.bitand(&rhs.uniform_buffer_standard_layout),
shader_subgroup_extended_types: lhs
.shader_subgroup_extended_types
.bitand(&rhs.shader_subgroup_extended_types),
separate_depth_stencil_layouts: lhs
.separate_depth_stencil_layouts
.bitand(&rhs.separate_depth_stencil_layouts),
host_query_reset: lhs.host_query_reset.bitand(&rhs.host_query_reset),
timeline_semaphore: lhs.timeline_semaphore.bitand(&rhs.timeline_semaphore),
buffer_device_address: lhs.buffer_device_address.bitand(&rhs.buffer_device_address),
buffer_device_address_capture_replay: lhs
.buffer_device_address_capture_replay
.bitand(&rhs.buffer_device_address_capture_replay),
buffer_device_address_multi_device: lhs
.buffer_device_address_multi_device
.bitand(&rhs.buffer_device_address_multi_device),
vulkan_memory_model: lhs.vulkan_memory_model.bitand(&rhs.vulkan_memory_model),
vulkan_memory_model_device_scope: lhs
.vulkan_memory_model_device_scope
.bitand(&rhs.vulkan_memory_model_device_scope),
vulkan_memory_model_availability_visibility_chains: lhs
.vulkan_memory_model_availability_visibility_chains
.bitand(&rhs.vulkan_memory_model_availability_visibility_chains),
shader_output_viewport_index: lhs
.shader_output_viewport_index
.bitand(&rhs.shader_output_viewport_index),
shader_output_layer: lhs.shader_output_layer.bitand(&rhs.shader_output_layer),
subgroup_broadcast_dynamic_id: lhs
.subgroup_broadcast_dynamic_id
.bitand(&rhs.subgroup_broadcast_dynamic_id),
..Default::default()
}
}
pub fn bitand_device_features13(
lhs: &vk::PhysicalDeviceVulkan13Features,
rhs: &vk::PhysicalDeviceVulkan13Features,
) -> vk::PhysicalDeviceVulkan13Features<'static> {
use core::ops::BitAnd;
vk::PhysicalDeviceVulkan13Features {
robust_image_access: lhs.robust_image_access.bitand(&rhs.robust_image_access),
inline_uniform_block: lhs.inline_uniform_block.bitand(&rhs.inline_uniform_block),
descriptor_binding_inline_uniform_block_update_after_bind: lhs
.descriptor_binding_inline_uniform_block_update_after_bind
.bitand(&rhs.descriptor_binding_inline_uniform_block_update_after_bind),
pipeline_creation_cache_control: lhs
.pipeline_creation_cache_control
.bitand(&rhs.pipeline_creation_cache_control),
private_data: lhs.private_data.bitand(&rhs.private_data),
shader_demote_to_helper_invocation: lhs
.shader_demote_to_helper_invocation
.bitand(&rhs.shader_demote_to_helper_invocation),
shader_terminate_invocation: lhs
.shader_terminate_invocation
.bitand(&rhs.shader_terminate_invocation),
subgroup_size_control: lhs.subgroup_size_control.bitand(&rhs.subgroup_size_control),
compute_full_subgroups: lhs
.compute_full_subgroups
.bitand(&rhs.compute_full_subgroups),
synchronization2: lhs.synchronization2.bitand(&rhs.synchronization2),
texture_compression_astc_hdr: lhs
.texture_compression_astc_hdr
.bitand(&rhs.texture_compression_astc_hdr),
shader_zero_initialize_workgroup_memory: lhs
.shader_zero_initialize_workgroup_memory
.bitand(&rhs.shader_zero_initialize_workgroup_memory),
dynamic_rendering: lhs.dynamic_rendering.bitand(&rhs.dynamic_rendering),
shader_integer_dot_product: lhs
.shader_integer_dot_product
.bitand(&rhs.shader_integer_dot_product),
maintenance4: lhs.maintenance4.bitand(&rhs.maintenance4),
..Default::default()
}
}
}
#[cfg(test)]
mod test_swapchain {
use super::*;
fn create_headless_vk() -> Result<(Vulkan, WindowContext)> {
let vk = Vulkan::new(
"testing",
&[],
&[ash::ext::headless_surface::NAME, khr::surface::NAME],
None,
)?;
let surface = Arc::new(Surface::headless(vk.instance.clone())?);
let swapchain = Arc::new(Swapchain::new(
vk.device.clone(),
surface.clone(),
vk.device.phy(),
vk::Extent2D::default().width(1).height(1),
)?);
let window_ctx = WindowContext {
window_handle: RawWindowHandle::Web(raw_window_handle::WebWindowHandle::new(0)),
surface,
current_swapchain: RwLock::new(swapchain),
};
Ok((vk, window_ctx))
}
#[tracing_test::traced_test]
#[test]
fn async_swapchain_acquiring() {
let (_vlk, ctx) = create_headless_vk().expect("init");
let ctx = Arc::new(ctx);
let (rx, handle) = ctx.clone().images();
eprintln!("hello world!");
let mut count = 0;
loop {
let now = std::time::Instant::now();
let frame = rx.recv_blocking().expect("recv");
_ = frame.present(None);
tracing::info!("mspf: {:.3}ms", now.elapsed().as_micros() as f32 / 1e3);
count += 1;
if count > 1000 {
smol::block_on(handle.cancel());
break;
}
}
}
}