2670 lines
102 KiB
Rust
2670 lines
102 KiB
Rust
#![feature(
|
|
c_str_module,
|
|
closure_lifetime_binder,
|
|
let_chains,
|
|
negative_impls,
|
|
map_try_insert
|
|
)]
|
|
#![allow(unused)]
|
|
use std::{
|
|
borrow::Borrow,
|
|
collections::{BTreeMap, BTreeSet, HashMap},
|
|
ffi::{CStr, CString},
|
|
fmt::Debug,
|
|
marker::PhantomData,
|
|
ops::Deref,
|
|
sync::{
|
|
atomic::{AtomicU32, AtomicU64},
|
|
Arc,
|
|
},
|
|
};
|
|
|
|
use egui::Color32;
|
|
use parking_lot::{Mutex, MutexGuard, RwLock};
|
|
|
|
use ash::{
|
|
khr,
|
|
prelude::VkResult,
|
|
vk::{self, Handle},
|
|
Entry,
|
|
};
|
|
use dyn_clone::DynClone;
|
|
use rand::{Rng, SeedableRng};
|
|
use raw_window_handle::{DisplayHandle, RawDisplayHandle};
|
|
use tinyvec::{array_vec, ArrayVec};
|
|
use tracing::info;
|
|
|
|
mod buffers;
|
|
mod commands;
|
|
mod device;
|
|
mod images;
|
|
mod render_graph;
|
|
mod sync;
|
|
mod util;
|
|
|
|
use device::{Device, DeviceAndQueues, DeviceQueueFamilies, WeakDevice};
|
|
|
|
mod texture {
|
|
use std::{collections::BTreeMap, sync::Arc};
|
|
|
|
use crate::{def_monotonic_id, images::Image2D, Device};
|
|
|
|
def_monotonic_id!(TextureId);
|
|
|
|
pub struct TextureManager {
|
|
pub textures: BTreeMap<TextureId, Arc<Image2D>>,
|
|
dev: Device,
|
|
}
|
|
|
|
impl TextureManager {
|
|
pub fn new(dev: Device) -> Self {
|
|
Self {
|
|
dev,
|
|
textures: BTreeMap::new(),
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
use render_graph::Rgba;
|
|
|
|
#[derive(Debug, thiserror::Error)]
|
|
pub enum Error {
|
|
#[error("Swapchain suboptimal.")]
|
|
SuboptimalSwapchain,
|
|
#[error(transparent)]
|
|
LoadingError(#[from] ash::LoadingError),
|
|
#[error(transparent)]
|
|
Result(#[from] ash::vk::Result),
|
|
#[error(transparent)]
|
|
CStrError(#[from] core::ffi::c_str::FromBytesUntilNulError),
|
|
#[error(transparent)]
|
|
NulError(#[from] std::ffi::NulError),
|
|
#[error("No Physical Device found.")]
|
|
NoPhysicalDevice,
|
|
}
|
|
|
|
type Result<T> = core::result::Result<T, Error>;
|
|
|
|
struct VkNameList<'a> {
|
|
names: Vec<*const i8>,
|
|
_pd: PhantomData<&'a ()>,
|
|
}
|
|
|
|
impl<'a> VkNameList<'a> {
|
|
fn from_strs(strs: &[&'a CStr]) -> Self {
|
|
let names = strs.iter().map(|str| str.as_ptr()).collect::<Vec<_>>();
|
|
|
|
Self {
|
|
names,
|
|
_pd: PhantomData,
|
|
}
|
|
}
|
|
}
|
|
|
|
#[derive(Debug, Clone)]
|
|
struct DeviceExtension<'a> {
|
|
name: &'a core::ffi::CStr,
|
|
version: u32,
|
|
}
|
|
|
|
fn make_extention_properties(name: &CStr, version: u32) -> vk::ExtensionProperties {
|
|
vk::ExtensionProperties::default()
|
|
.spec_version(version)
|
|
.extension_name(name)
|
|
.unwrap()
|
|
}
|
|
|
|
/// returns true if lhs and rhs have the same name and lhs spec_version is less
|
|
/// than or equal to rhs spec_version
|
|
fn compatible_extension_properties(
|
|
lhs: &vk::ExtensionProperties,
|
|
rhs: &vk::ExtensionProperties,
|
|
) -> bool {
|
|
let Some(lhs_name) = lhs.extension_name_as_c_str().ok() else {
|
|
return false;
|
|
};
|
|
let Some(rhs_name) = rhs.extension_name_as_c_str().ok() else {
|
|
return false;
|
|
};
|
|
|
|
if lhs_name == rhs_name {
|
|
lhs.spec_version <= rhs.spec_version
|
|
} else {
|
|
false
|
|
}
|
|
}
|
|
|
|
#[derive(Clone, Debug)]
|
|
struct Queue(Arc<Mutex<vk::Queue>>, u32);
|
|
|
|
impl Queue {
|
|
fn new(device: &ash::Device, family: u32, index: u32) -> Self {
|
|
Self(
|
|
Arc::new(Mutex::new(unsafe {
|
|
device.get_device_queue(family, index)
|
|
})),
|
|
family,
|
|
)
|
|
}
|
|
|
|
pub fn family(&self) -> u32 {
|
|
self.1
|
|
}
|
|
|
|
pub fn with_locked<T, F: FnOnce(vk::Queue) -> T>(&self, map: F) -> T {
|
|
let lock = self.0.lock();
|
|
map(*lock)
|
|
}
|
|
|
|
pub fn lock(&self) -> MutexGuard<'_, vk::Queue> {
|
|
self.0.lock()
|
|
}
|
|
}
|
|
|
|
trait ExtendsDeviceFeatures2Debug: vk::ExtendsPhysicalDeviceFeatures2 + Debug {}
|
|
trait ExtendsDeviceProperties2Debug:
|
|
vk::ExtendsPhysicalDeviceProperties2 + Debug + DynClone + Send + Sync
|
|
{
|
|
}
|
|
|
|
impl<T: vk::ExtendsPhysicalDeviceFeatures2 + Debug> ExtendsDeviceFeatures2Debug for T {}
|
|
impl<T: vk::ExtendsPhysicalDeviceProperties2 + Debug + DynClone + Send + Sync>
|
|
ExtendsDeviceProperties2Debug for T
|
|
{
|
|
}
|
|
|
|
#[derive(Default, Debug)]
|
|
struct PhysicalDeviceFeatures {
|
|
version: u32,
|
|
physical_features_10: vk::PhysicalDeviceFeatures,
|
|
physical_features_11: Option<vk::PhysicalDeviceVulkan11Features<'static>>,
|
|
physical_features_12: Option<vk::PhysicalDeviceVulkan12Features<'static>>,
|
|
physical_features_13: Option<vk::PhysicalDeviceVulkan13Features<'static>>,
|
|
extra_features: Vec<Box<dyn ExtendsDeviceFeatures2Debug>>,
|
|
device_extensions: Vec<vk::ExtensionProperties>,
|
|
}
|
|
|
|
impl PhysicalDeviceFeatures {
|
|
fn version(self, version: u32) -> Self {
|
|
Self { version, ..self }
|
|
}
|
|
fn all_default() -> Self {
|
|
Self::default()
|
|
.features11(Default::default())
|
|
.features12(Default::default())
|
|
.features13(Default::default())
|
|
}
|
|
|
|
fn query(instance: &ash::Instance, pdev: vk::PhysicalDevice) -> Result<Self> {
|
|
let mut this = Self::all_default();
|
|
let mut features2 = this.features2();
|
|
let features = unsafe {
|
|
instance.get_physical_device_features2(pdev, &mut features2);
|
|
// allocate and query again
|
|
features2.features
|
|
};
|
|
this = this.features10(features);
|
|
|
|
let extensions = unsafe { instance.enumerate_device_extension_properties(pdev)? };
|
|
this = this.device_extensions(extensions);
|
|
|
|
Ok(this)
|
|
}
|
|
|
|
fn features10(self, physical_features_10: vk::PhysicalDeviceFeatures) -> Self {
|
|
Self {
|
|
physical_features_10,
|
|
..self
|
|
}
|
|
}
|
|
fn features11(self, physical_features_11: vk::PhysicalDeviceVulkan11Features<'static>) -> Self {
|
|
Self {
|
|
physical_features_11: Some(physical_features_11),
|
|
..self
|
|
}
|
|
}
|
|
fn features12(self, physical_features_12: vk::PhysicalDeviceVulkan12Features<'static>) -> Self {
|
|
Self {
|
|
physical_features_12: Some(physical_features_12),
|
|
..self
|
|
}
|
|
}
|
|
fn features13(self, physical_features_13: vk::PhysicalDeviceVulkan13Features<'static>) -> Self {
|
|
Self {
|
|
physical_features_13: Some(physical_features_13),
|
|
..self
|
|
}
|
|
}
|
|
fn device_extensions(self, device_extensions: Vec<vk::ExtensionProperties>) -> Self {
|
|
Self {
|
|
device_extensions,
|
|
..self
|
|
}
|
|
}
|
|
|
|
fn with_extension2(mut self, ext: vk::ExtensionProperties) -> Self {
|
|
self.device_extensions.push(ext);
|
|
|
|
self
|
|
}
|
|
|
|
fn with_extensions2<I: IntoIterator<Item = vk::ExtensionProperties>>(
|
|
mut self,
|
|
exts: I,
|
|
) -> Self {
|
|
self.device_extensions.extend(exts);
|
|
|
|
self
|
|
}
|
|
|
|
fn with_extension<F>(mut self, ext: vk::ExtensionProperties, features: F) -> Self
|
|
where
|
|
F: ExtendsDeviceFeatures2Debug + 'static,
|
|
{
|
|
self.extra_features.push(Box::new(features));
|
|
self.device_extensions.push(ext);
|
|
|
|
self
|
|
}
|
|
|
|
fn features2(&mut self) -> vk::PhysicalDeviceFeatures2<'_> {
|
|
let mut features2 =
|
|
vk::PhysicalDeviceFeatures2::default().features(self.physical_features_10);
|
|
|
|
if let Some(ref mut features11) = self.physical_features_11 {
|
|
features2 = features2.push_next(features11);
|
|
}
|
|
if let Some(ref mut features12) = self.physical_features_12 {
|
|
features2 = features2.push_next(features12);
|
|
}
|
|
if let Some(ref mut features13) = self.physical_features_13 {
|
|
features2 = features2.push_next(features13);
|
|
}
|
|
|
|
for features in self.extra_features.iter_mut() {
|
|
features2 = features2.push_next(Box::as_mut(features));
|
|
}
|
|
|
|
features2
|
|
}
|
|
|
|
fn compatible_with(&self, device: &Self) -> bool {
|
|
let sort_exts = |a: &vk::ExtensionProperties, b: &vk::ExtensionProperties| {
|
|
(a.extension_name_as_c_str().unwrap(), a.spec_version)
|
|
.cmp(&(b.extension_name_as_c_str().unwrap(), b.spec_version))
|
|
};
|
|
let mut device_extensions = device.device_extensions.clone();
|
|
device_extensions.sort_by(sort_exts);
|
|
|
|
let unsupported_extensions = self
|
|
.device_extensions
|
|
.iter()
|
|
.filter(|ext| {
|
|
!device_extensions
|
|
.binary_search_by(|t| sort_exts(t, ext))
|
|
.is_ok()
|
|
})
|
|
.cloned()
|
|
.collect::<Vec<_>>();
|
|
|
|
let supports_extensions = unsupported_extensions.is_empty();
|
|
|
|
supports_extensions
|
|
&& utils::eq_device_features10(
|
|
&utils::bitand_device_features10(
|
|
&self.physical_features_10,
|
|
&device.physical_features_10,
|
|
),
|
|
&self.physical_features_10,
|
|
)
|
|
&& self
|
|
.physical_features_11
|
|
.zip(device.physical_features_11)
|
|
.map(|(a, b)| {
|
|
utils::eq_device_features11(&utils::bitand_device_features11(&a, &b), &a)
|
|
})
|
|
.unwrap_or(true)
|
|
&& self
|
|
.physical_features_12
|
|
.zip(device.physical_features_12)
|
|
.map(|(a, b)| {
|
|
utils::eq_device_features12(&utils::bitand_device_features12(&a, &b), &a)
|
|
})
|
|
.unwrap_or(true)
|
|
&& self
|
|
.physical_features_13
|
|
.zip(device.physical_features_13)
|
|
.map(|(a, b)| {
|
|
utils::eq_device_features13(&utils::bitand_device_features13(&a, &b), &a)
|
|
})
|
|
.unwrap_or(true)
|
|
}
|
|
}
|
|
|
|
#[derive(Debug, Default)]
|
|
struct PhysicalDeviceProperties {
|
|
base: vk::PhysicalDeviceProperties,
|
|
vk11: vk::PhysicalDeviceVulkan11Properties<'static>,
|
|
vk12: vk::PhysicalDeviceVulkan12Properties<'static>,
|
|
vk13: vk::PhysicalDeviceVulkan13Properties<'static>,
|
|
extra_properties: Vec<Box<dyn ExtendsDeviceProperties2Debug>>,
|
|
}
|
|
|
|
impl PhysicalDeviceProperties {
|
|
fn query(&mut self, instance: &ash::Instance, pdev: vk::PhysicalDevice) {
|
|
let mut props2 = self.properties2();
|
|
unsafe {
|
|
instance.get_physical_device_properties2(pdev, &mut props2);
|
|
self.base = props2.properties;
|
|
}
|
|
}
|
|
|
|
fn extra_properties(
|
|
mut self,
|
|
extra_properties: Vec<Box<dyn ExtendsDeviceProperties2Debug>>,
|
|
) -> Self {
|
|
self.extra_properties = extra_properties;
|
|
self
|
|
}
|
|
|
|
fn with_properties<F>(mut self, properties: F) -> Self
|
|
where
|
|
F: ExtendsDeviceProperties2Debug + 'static,
|
|
{
|
|
self.extra_properties.push(Box::new(properties));
|
|
|
|
self
|
|
}
|
|
|
|
fn properties2(&mut self) -> vk::PhysicalDeviceProperties2<'_> {
|
|
let mut props2 = vk::PhysicalDeviceProperties2::default()
|
|
.properties(self.base)
|
|
.push_next(&mut self.vk11)
|
|
.push_next(&mut self.vk12)
|
|
.push_next(&mut self.vk13);
|
|
|
|
for props in &mut self.extra_properties {
|
|
props2 = props2.push_next(Box::as_mut(props));
|
|
}
|
|
|
|
props2
|
|
}
|
|
}
|
|
|
|
#[derive(Debug)]
|
|
struct PhysicalDevice {
|
|
pdev: vk::PhysicalDevice,
|
|
queue_families: DeviceQueueFamilies,
|
|
properties: PhysicalDeviceProperties,
|
|
}
|
|
|
|
struct Instance {
|
|
entry: Entry,
|
|
instance: ash::Instance,
|
|
debug_utils: ash::ext::debug_utils::Instance,
|
|
debug_utils_messenger: vk::DebugUtilsMessengerEXT,
|
|
surface: ash::khr::surface::Instance,
|
|
}
|
|
|
|
impl Drop for Instance {
|
|
fn drop(&mut self) {
|
|
unsafe {
|
|
self.debug_utils
|
|
.destroy_debug_utils_messenger(self.debug_utils_messenger, None);
|
|
}
|
|
}
|
|
}
|
|
|
|
impl AsRef<ash::Instance> for Instance {
|
|
fn as_ref(&self) -> &ash::Instance {
|
|
&self.instance
|
|
}
|
|
}
|
|
impl AsRef<ash::khr::surface::Instance> for Instance {
|
|
fn as_ref(&self) -> &ash::khr::surface::Instance {
|
|
&self.surface
|
|
}
|
|
}
|
|
|
|
struct RawSwapchain(vk::SwapchainKHR);
|
|
|
|
impl !Sync for RawSwapchain {}
|
|
|
|
#[derive(Debug)]
|
|
struct SwapchainHandle(Mutex<vk::SwapchainKHR>);
|
|
|
|
impl SwapchainHandle {
|
|
unsafe fn from_handle(swapchain: vk::SwapchainKHR) -> SwapchainHandle {
|
|
Self(Mutex::new(swapchain))
|
|
}
|
|
|
|
fn lock(&self) -> MutexGuard<'_, vk::SwapchainKHR> {
|
|
self.0.lock()
|
|
}
|
|
|
|
fn with_locked<T, F: FnOnce(vk::SwapchainKHR) -> T>(&self, f: F) -> T {
|
|
let lock = self.0.lock();
|
|
f(*lock)
|
|
}
|
|
}
|
|
|
|
pub struct Swapchain {
|
|
device: Device,
|
|
instance: Arc<Instance>,
|
|
// has a strong ref to the surface because the surface may not outlive the swapchain
|
|
surface: Arc<Surface>,
|
|
swapchain: SwapchainHandle,
|
|
present_mode: vk::PresentModeKHR,
|
|
color_space: vk::ColorSpaceKHR,
|
|
format: vk::Format,
|
|
images: Vec<vk::Image>,
|
|
image_views: Vec<vk::ImageView>,
|
|
extent: vk::Extent2D,
|
|
min_image_count: u32,
|
|
|
|
// sync objects:
|
|
// we need two semaphores per each image, one acquire-semaphore and one release-semaphore.
|
|
// semaphores must be unique to each frame and cannot be reused per swapchain.
|
|
acquire_semaphores: Vec<vk::Semaphore>,
|
|
release_semaphores: Vec<vk::Semaphore>,
|
|
|
|
// one fence per in-flight frame, to synchronize image acquisition
|
|
fences: Vec<Arc<sync::Fence>>,
|
|
|
|
current_frame: AtomicU32,
|
|
|
|
// for khr_present_id/khr_present_wait
|
|
present_id: AtomicU64,
|
|
}
|
|
|
|
impl Drop for Swapchain {
|
|
fn drop(&mut self) {
|
|
unsafe {
|
|
self.device.wait_queue_idle(self.device.present_queue());
|
|
info!("dropping swapchain {:?}", self.swapchain);
|
|
for view in &self.image_views {
|
|
self.device.dev().destroy_image_view(*view, None);
|
|
}
|
|
|
|
self.swapchain.with_locked(|swapchain| {
|
|
self.device.swapchain().destroy_swapchain(swapchain, None)
|
|
});
|
|
|
|
for &semaphore in self
|
|
.acquire_semaphores
|
|
.iter()
|
|
.chain(&self.release_semaphores)
|
|
{
|
|
unsafe {
|
|
self.device.dev().destroy_semaphore(semaphore, None);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
fn current_extent_or_clamped(
|
|
caps: &vk::SurfaceCapabilitiesKHR,
|
|
fallback: vk::Extent2D,
|
|
) -> vk::Extent2D {
|
|
if caps.current_extent.width == u32::MAX {
|
|
vk::Extent2D {
|
|
width: fallback
|
|
.width
|
|
.clamp(caps.min_image_extent.width, caps.max_image_extent.width),
|
|
height: fallback
|
|
.height
|
|
.clamp(caps.min_image_extent.height, caps.max_image_extent.height),
|
|
}
|
|
} else {
|
|
caps.current_extent
|
|
}
|
|
}
|
|
|
|
struct SwapchainParams {
|
|
present_mode: vk::PresentModeKHR,
|
|
format: vk::Format,
|
|
color_space: vk::ColorSpaceKHR,
|
|
/// the number of images to request from the device
|
|
image_count: u32,
|
|
/// the minimum number of images the surface permits
|
|
min_image_count: u32,
|
|
extent: vk::Extent2D,
|
|
}
|
|
|
|
#[must_use = "This struct represents an acquired image from the swapchain and
|
|
must be presented in order to free resources on the device."]
|
|
pub struct SwapchainFrame {
|
|
pub swapchain: Arc<Swapchain>,
|
|
pub index: u32,
|
|
pub image: vk::Image,
|
|
pub format: vk::Format,
|
|
pub view: vk::ImageView,
|
|
pub acquire: vk::Semaphore,
|
|
pub release: vk::Semaphore,
|
|
}
|
|
|
|
impl SwapchainFrame {
|
|
fn present(self) {
|
|
self.swapchain.clone().present(self);
|
|
}
|
|
}
|
|
|
|
impl Swapchain {
|
|
const PREFERRED_IMAGES_IN_FLIGHT: u32 = 3;
|
|
|
|
fn get_swapchain_params_from_surface(
|
|
instance: &Arc<Instance>,
|
|
surface: vk::SurfaceKHR,
|
|
pdev: vk::PhysicalDevice,
|
|
requested_extent: Option<vk::Extent2D>,
|
|
) -> Result<SwapchainParams> {
|
|
let caps = unsafe {
|
|
instance
|
|
.surface
|
|
.get_physical_device_surface_capabilities(pdev, surface)?
|
|
};
|
|
let formats = unsafe {
|
|
instance
|
|
.surface
|
|
.get_physical_device_surface_formats(pdev, surface)?
|
|
};
|
|
let present_modes = unsafe {
|
|
instance
|
|
.surface
|
|
.get_physical_device_surface_present_modes(pdev, surface)?
|
|
};
|
|
|
|
let present_mode = present_modes
|
|
.iter()
|
|
.find(|&mode| mode == &vk::PresentModeKHR::MAILBOX)
|
|
.cloned()
|
|
.unwrap_or(vk::PresentModeKHR::FIFO);
|
|
|
|
let format = formats
|
|
.iter()
|
|
.max_by_key(|&&format| {
|
|
let is_rgba_unorm = format.format == vk::Format::R8G8B8A8_UNORM
|
|
|| format.format == vk::Format::B8G8R8A8_UNORM;
|
|
let is_srgb = format.color_space == vk::ColorSpaceKHR::SRGB_NONLINEAR;
|
|
is_rgba_unorm as u8 * 10 + is_srgb as u8
|
|
})
|
|
.or(formats.first())
|
|
.cloned()
|
|
.expect("no surface format available!");
|
|
|
|
// 0 here means no limit
|
|
let max_image_count = core::num::NonZero::new(caps.max_image_count)
|
|
.map(|n| n.get())
|
|
.unwrap_or(u32::MAX);
|
|
|
|
// we want PREFERRED_IMAGES_IN_FLIGHT images acquired at the same time,
|
|
let image_count =
|
|
(caps.min_image_count + Self::PREFERRED_IMAGES_IN_FLIGHT).min(max_image_count);
|
|
|
|
let extent = current_extent_or_clamped(
|
|
&caps,
|
|
requested_extent.unwrap_or(vk::Extent2D::default().width(1).height(1)),
|
|
);
|
|
|
|
Ok(SwapchainParams {
|
|
present_mode,
|
|
format: format.format,
|
|
color_space: format.color_space,
|
|
image_count,
|
|
extent,
|
|
min_image_count: caps.min_image_count,
|
|
})
|
|
}
|
|
|
|
pub fn new(
|
|
instance: Arc<Instance>,
|
|
device: Device,
|
|
surface: Arc<Surface>,
|
|
pdev: vk::PhysicalDevice,
|
|
extent: vk::Extent2D,
|
|
) -> Result<Self> {
|
|
Self::create(instance, device, surface, pdev, Some(extent), None)
|
|
}
|
|
|
|
fn create(
|
|
instance: Arc<Instance>,
|
|
device: Device,
|
|
surface: Arc<Surface>,
|
|
pdev: vk::PhysicalDevice,
|
|
extent: Option<vk::Extent2D>,
|
|
old_swapchain: Option<&SwapchainHandle>,
|
|
) -> Result<Self> {
|
|
let SwapchainParams {
|
|
present_mode,
|
|
format,
|
|
color_space,
|
|
image_count,
|
|
min_image_count,
|
|
extent,
|
|
} = Self::get_swapchain_params_from_surface(&instance, surface.surface, pdev, extent)?;
|
|
|
|
let (swapchain, images) = {
|
|
let lock = old_swapchain.as_ref().map(|handle| handle.lock());
|
|
|
|
Self::create_vkswapchainkhr(
|
|
&device,
|
|
surface.surface,
|
|
&device.queue_families().swapchain_family_indices(),
|
|
extent,
|
|
lock.as_ref().map(|lock| **lock),
|
|
present_mode,
|
|
format,
|
|
color_space,
|
|
image_count,
|
|
)
|
|
}?;
|
|
|
|
let image_views = images
|
|
.iter()
|
|
.map(|&image| {
|
|
let info = vk::ImageViewCreateInfo::default()
|
|
.image(image)
|
|
.view_type(vk::ImageViewType::TYPE_2D)
|
|
.format(format)
|
|
.components(vk::ComponentMapping::default())
|
|
.subresource_range(
|
|
vk::ImageSubresourceRange::default()
|
|
.aspect_mask(vk::ImageAspectFlags::COLOR)
|
|
.base_mip_level(0)
|
|
.level_count(1)
|
|
.base_array_layer(0)
|
|
.layer_count(1),
|
|
);
|
|
|
|
unsafe { device.dev().create_image_view(&info, None) }
|
|
})
|
|
.collect::<core::result::Result<Vec<vk::ImageView>, _>>()?;
|
|
|
|
let num_images = images.len() as u32;
|
|
let inflight_frames = num_images - min_image_count;
|
|
|
|
let acquire_semaphores = {
|
|
(0..inflight_frames)
|
|
.map(|i| unsafe {
|
|
device
|
|
.dev()
|
|
.create_semaphore(&vk::SemaphoreCreateInfo::default(), None)
|
|
.inspect(|r| {
|
|
#[cfg(debug_assertions)]
|
|
{
|
|
let name = CString::new(format!(
|
|
"semaphore-{:x}_{i}-acquire",
|
|
swapchain.0.lock().as_raw()
|
|
))
|
|
.unwrap();
|
|
unsafe {
|
|
device.debug_utils().set_debug_utils_object_name(
|
|
&vk::DebugUtilsObjectNameInfoEXT::default()
|
|
.object_handle(*r)
|
|
.object_name(&name),
|
|
);
|
|
}
|
|
}
|
|
})
|
|
})
|
|
.collect::<VkResult<Vec<_>>>()?
|
|
};
|
|
|
|
let release_semaphores = {
|
|
(0..inflight_frames)
|
|
.map(|i| unsafe {
|
|
device
|
|
.dev()
|
|
.create_semaphore(&vk::SemaphoreCreateInfo::default(), None)
|
|
.inspect(|r| {
|
|
#[cfg(debug_assertions)]
|
|
{
|
|
let name = CString::new(format!(
|
|
"semaphore-{:x}_{i}-release",
|
|
swapchain.0.lock().as_raw()
|
|
))
|
|
.unwrap();
|
|
unsafe {
|
|
device.debug_utils().set_debug_utils_object_name(
|
|
&vk::DebugUtilsObjectNameInfoEXT::default()
|
|
.object_handle(*r)
|
|
.object_name(&name),
|
|
);
|
|
}
|
|
}
|
|
})
|
|
})
|
|
.collect::<VkResult<Vec<_>>>()?
|
|
};
|
|
|
|
let fences = {
|
|
(0..inflight_frames)
|
|
.map(|i| {
|
|
Ok(Arc::new(sync::Fence::create(device.clone()).inspect(
|
|
|r| {
|
|
#[cfg(debug_assertions)]
|
|
{
|
|
let name = CString::new(format!(
|
|
"fence-{:x}_{i}",
|
|
swapchain.0.lock().as_raw()
|
|
))
|
|
.unwrap();
|
|
unsafe {
|
|
device.debug_utils().set_debug_utils_object_name(
|
|
&vk::DebugUtilsObjectNameInfoEXT::default()
|
|
.object_handle(r.fence())
|
|
.object_name(&name),
|
|
);
|
|
}
|
|
}
|
|
},
|
|
)?))
|
|
})
|
|
.collect::<VkResult<Vec<_>>>()?
|
|
};
|
|
|
|
tracing::info!("fences: {fences:?}");
|
|
|
|
Ok(Self {
|
|
instance,
|
|
device,
|
|
surface,
|
|
swapchain,
|
|
present_mode,
|
|
color_space,
|
|
format,
|
|
images,
|
|
image_views,
|
|
min_image_count,
|
|
extent,
|
|
acquire_semaphores,
|
|
release_semaphores,
|
|
fences,
|
|
current_frame: AtomicU32::new(0),
|
|
present_id: AtomicU64::new(1),
|
|
})
|
|
}
|
|
|
|
pub fn max_in_flight_images(&self) -> u32 {
|
|
self.num_images() - self.min_image_count
|
|
}
|
|
|
|
pub fn num_images(&self) -> u32 {
|
|
self.images.len() as u32
|
|
}
|
|
|
|
fn recreate(&self, extent: Option<vk::Extent2D>) -> Result<Self> {
|
|
Self::create(
|
|
self.instance.clone(),
|
|
self.device.clone(),
|
|
self.surface.clone(),
|
|
self.device.phy(),
|
|
extent,
|
|
Some(&self.swapchain),
|
|
)
|
|
}
|
|
|
|
/// returns a future yielding the frame, and true if the swapchain is
|
|
/// suboptimal and should be recreated.
|
|
fn acquire_image(
|
|
self: Arc<Self>,
|
|
) -> impl std::future::Future<Output = VkResult<(SwapchainFrame, bool)>> {
|
|
let frame = self
|
|
.current_frame
|
|
.fetch_update(
|
|
std::sync::atomic::Ordering::Release,
|
|
std::sync::atomic::Ordering::Relaxed,
|
|
|i| Some((i + 1) % self.max_in_flight_images()),
|
|
)
|
|
.unwrap() as usize;
|
|
|
|
tracing::info!(frame, "acquiring image for frame {frame}");
|
|
|
|
async move {
|
|
let fence = self.fences[frame].clone();
|
|
let acquire = self.acquire_semaphores[frame];
|
|
let release = self.release_semaphores[frame];
|
|
|
|
// spawn on threadpool because it might block.
|
|
let (idx, suboptimal) = smol::unblock({
|
|
let this = self.clone();
|
|
let fence = fence.clone();
|
|
move || unsafe {
|
|
this.swapchain.with_locked(|swapchain| {
|
|
this.device.swapchain().acquire_next_image(
|
|
swapchain,
|
|
u64::MAX,
|
|
acquire,
|
|
fence.fence(),
|
|
)
|
|
})
|
|
}
|
|
})
|
|
.await?;
|
|
|
|
// wait for image to become available.
|
|
sync::FenceFuture::new(fence.clone()).await;
|
|
|
|
let idx = idx as usize;
|
|
let image = self.images[idx];
|
|
let view = self.image_views[idx];
|
|
|
|
Ok((
|
|
SwapchainFrame {
|
|
index: idx as u32,
|
|
swapchain: self.clone(),
|
|
format: self.format,
|
|
image,
|
|
view,
|
|
acquire,
|
|
release,
|
|
},
|
|
suboptimal,
|
|
))
|
|
}
|
|
}
|
|
|
|
fn present(&self, frame: SwapchainFrame) -> Result<()> {
|
|
let swpchain = self.swapchain.lock();
|
|
let queue = self.device.present_queue().lock();
|
|
|
|
let wait_semaphores = [frame.release];
|
|
|
|
// TODO: make this optional for devices with no support for present_wait/present_id
|
|
let present_id = self
|
|
.present_id
|
|
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
|
let mut present_id =
|
|
vk::PresentIdKHR::default().present_ids(core::slice::from_ref(&present_id));
|
|
|
|
let present_info = vk::PresentInfoKHR::default()
|
|
.image_indices(core::slice::from_ref(&frame.index))
|
|
.swapchains(core::slice::from_ref(&swpchain))
|
|
.wait_semaphores(&wait_semaphores)
|
|
.push_next(&mut present_id);
|
|
|
|
// call winits pre_present_notify here
|
|
|
|
unsafe {
|
|
self.device
|
|
.swapchain()
|
|
.queue_present(*queue, &present_info)?;
|
|
}
|
|
Ok(())
|
|
}
|
|
|
|
fn create_vkswapchainkhr(
|
|
device: &Device,
|
|
surface: vk::SurfaceKHR,
|
|
queue_families: &[u32],
|
|
image_extent: vk::Extent2D,
|
|
old_swapchain: Option<vk::SwapchainKHR>,
|
|
present_mode: vk::PresentModeKHR,
|
|
image_format: vk::Format,
|
|
image_color_space: vk::ColorSpaceKHR,
|
|
image_count: u32,
|
|
) -> Result<(SwapchainHandle, Vec<vk::Image>)> {
|
|
let create_info = vk::SwapchainCreateInfoKHR::default()
|
|
.surface(surface)
|
|
.present_mode(present_mode)
|
|
.image_color_space(image_color_space)
|
|
.image_format(image_format)
|
|
.min_image_count(image_count)
|
|
.image_usage(vk::ImageUsageFlags::TRANSFER_DST | vk::ImageUsageFlags::COLOR_ATTACHMENT)
|
|
.image_array_layers(1)
|
|
.image_extent(image_extent)
|
|
.image_sharing_mode(if queue_families.len() <= 1 {
|
|
vk::SharingMode::EXCLUSIVE
|
|
} else {
|
|
vk::SharingMode::CONCURRENT
|
|
})
|
|
.queue_family_indices(queue_families)
|
|
.pre_transform(vk::SurfaceTransformFlagsKHR::IDENTITY)
|
|
.composite_alpha(vk::CompositeAlphaFlagsKHR::OPAQUE)
|
|
.old_swapchain(old_swapchain.unwrap_or(vk::SwapchainKHR::null()))
|
|
.clipped(true);
|
|
|
|
let (swapchain, images) = unsafe {
|
|
let swapchain = device.swapchain().create_swapchain(&create_info, None)?;
|
|
|
|
#[cfg(debug_assertions)]
|
|
{
|
|
let name = CString::new(format!(
|
|
"swapchain-{}_{}",
|
|
surface.as_raw(),
|
|
SWAPCHAIN_COUNT.fetch_add(1, std::sync::atomic::Ordering::Relaxed)
|
|
))
|
|
.unwrap();
|
|
device.debug_utils().set_debug_utils_object_name(
|
|
&vk::DebugUtilsObjectNameInfoEXT::default()
|
|
.object_handle(swapchain)
|
|
.object_name(&name),
|
|
);
|
|
}
|
|
|
|
let images = device.swapchain().get_swapchain_images(swapchain)?;
|
|
|
|
(SwapchainHandle::from_handle(swapchain), images)
|
|
};
|
|
|
|
Ok((swapchain, images))
|
|
}
|
|
}
|
|
static SWAPCHAIN_COUNT: AtomicU64 = AtomicU64::new(0);
|
|
|
|
struct Surface {
|
|
instance: Arc<Instance>,
|
|
surface: vk::SurfaceKHR,
|
|
}
|
|
|
|
impl Surface {
|
|
fn headless(instance: Arc<Instance>) -> Result<Self> {
|
|
unsafe {
|
|
let headless_instance =
|
|
ash::ext::headless_surface::Instance::new(&instance.entry, &instance.instance);
|
|
|
|
let surface = headless_instance
|
|
.create_headless_surface(&vk::HeadlessSurfaceCreateInfoEXT::default(), None)?;
|
|
|
|
Ok(Self { instance, surface })
|
|
}
|
|
}
|
|
|
|
fn create(
|
|
instance: Arc<Instance>,
|
|
display_handle: RawDisplayHandle,
|
|
window_handle: raw_window_handle::RawWindowHandle,
|
|
) -> Result<Self> {
|
|
let surface = unsafe {
|
|
ash_window::create_surface(
|
|
&instance.entry,
|
|
&instance.instance,
|
|
display_handle,
|
|
window_handle,
|
|
None,
|
|
)?
|
|
};
|
|
|
|
Ok(Self { instance, surface })
|
|
}
|
|
}
|
|
|
|
impl Drop for Surface {
|
|
fn drop(&mut self) {
|
|
unsafe {
|
|
self.instance.surface.destroy_surface(self.surface, None);
|
|
}
|
|
}
|
|
}
|
|
|
|
pub struct Vulkan {
|
|
instance: Arc<Instance>,
|
|
device: Device,
|
|
}
|
|
|
|
impl Drop for Vulkan {
|
|
fn drop(&mut self) {
|
|
unsafe {
|
|
self.device.dev().device_wait_idle();
|
|
}
|
|
}
|
|
}
|
|
|
|
impl Vulkan {
|
|
const VALIDATION_LAYER_NAME: &'static core::ffi::CStr = c"VK_LAYER_KHRONOS_validation";
|
|
#[allow(unused)]
|
|
const RENDERDOC_LAYER_NAME: &'static core::ffi::CStr = c"VK_LAYER_RENDERDOC_Capture";
|
|
#[allow(unused)]
|
|
const NSIGHT_TRACE_LAYER_NAME: &'static core::ffi::CStr =
|
|
c"VK_LAYER_NV_GPU_Trace_release_public_2021_4_2";
|
|
#[allow(unused)]
|
|
const NSIGHT_INTERCEPTION_LAYER_NAME: &'static core::ffi::CStr =
|
|
c"VK_LAYER_NV_nomad_release_public_2021_4_2";
|
|
|
|
pub fn new(
|
|
app_name: &str,
|
|
instance_layers: &[&CStr],
|
|
instance_extensions: &[&CStr],
|
|
display_handle: Option<RawDisplayHandle>,
|
|
) -> Result<Self> {
|
|
let entry = unsafe { ash::Entry::load()? };
|
|
|
|
let app_name = CString::new(app_name)?;
|
|
let app_info = vk::ApplicationInfo::default()
|
|
.api_version(vk::make_api_version(0, 1, 3, 0))
|
|
.application_name(&app_name)
|
|
.engine_name(c"PrimalGame")
|
|
.application_version(0)
|
|
.engine_version(0);
|
|
|
|
// TODO: make this a flag somewhere to enable or disable validation layers
|
|
// DEBUG LAYERS/VALIDATION
|
|
let validation_settings = [
|
|
vk::LayerSettingEXT::default()
|
|
.layer_name(Self::VALIDATION_LAYER_NAME)
|
|
.setting_name(c"VK_KHRONOS_VALIDATION_VALIDATE_BEST_PRACTICES")
|
|
.ty(vk::LayerSettingTypeEXT::BOOL32)
|
|
.values(&[1]),
|
|
vk::LayerSettingEXT::default()
|
|
.layer_name(Self::VALIDATION_LAYER_NAME)
|
|
.setting_name(c"VK_KHRONOS_VALIDATION_VALIDATE_BEST_PRACTICES_AMD")
|
|
.ty(vk::LayerSettingTypeEXT::BOOL32)
|
|
.values(&[1]),
|
|
vk::LayerSettingEXT::default()
|
|
.layer_name(Self::VALIDATION_LAYER_NAME)
|
|
.setting_name(c"VK_KHRONOS_VALIDATION_VALIDATE_SYNC")
|
|
.ty(vk::LayerSettingTypeEXT::BOOL32)
|
|
.values(&[1]),
|
|
];
|
|
let mut validation_info =
|
|
vk::LayerSettingsCreateInfoEXT::default().settings(&validation_settings);
|
|
|
|
let layers = Self::get_layers(&entry, &[Self::VALIDATION_LAYER_NAME]).unwrap();
|
|
|
|
// optional display handle
|
|
let extensions = Self::get_extensions(
|
|
&entry,
|
|
&layers,
|
|
&[ash::ext::debug_utils::NAME, ash::ext::layer_settings::NAME],
|
|
display_handle,
|
|
)
|
|
.unwrap();
|
|
|
|
let layers = VkNameList::from_strs(&layers);
|
|
let extensions = VkNameList::from_strs(&extensions);
|
|
|
|
let create_info = vk::InstanceCreateInfo::default()
|
|
.application_info(&app_info)
|
|
.enabled_extension_names(&extensions.names)
|
|
.enabled_layer_names(&layers.names)
|
|
.push_next(&mut validation_info);
|
|
let instance = unsafe { entry.create_instance(&create_info, None)? };
|
|
|
|
let debug_info = vk::DebugUtilsMessengerCreateInfoEXT::default()
|
|
.message_severity(
|
|
vk::DebugUtilsMessageSeverityFlagsEXT::ERROR
|
|
| vk::DebugUtilsMessageSeverityFlagsEXT::WARNING
|
|
| vk::DebugUtilsMessageSeverityFlagsEXT::INFO,
|
|
)
|
|
.message_type(
|
|
vk::DebugUtilsMessageTypeFlagsEXT::GENERAL
|
|
| vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION
|
|
| vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE,
|
|
)
|
|
.pfn_user_callback(Some(debug::debug_callback));
|
|
|
|
let debug_utils_instance = ash::ext::debug_utils::Instance::new(&entry, &instance);
|
|
let debug_utils_messenger =
|
|
unsafe { debug_utils_instance.create_debug_utils_messenger(&debug_info, None)? };
|
|
let surface_instance = ash::khr::surface::Instance::new(&entry, &instance);
|
|
|
|
let instance = Arc::new(Instance {
|
|
instance,
|
|
debug_utils: debug_utils_instance,
|
|
debug_utils_messenger,
|
|
surface: surface_instance,
|
|
entry,
|
|
});
|
|
|
|
let mut features = PhysicalDeviceFeatures::all_default()
|
|
.version(vk::make_api_version(0, 1, 3, 0))
|
|
.features10(
|
|
vk::PhysicalDeviceFeatures::default()
|
|
.sampler_anisotropy(true)
|
|
.multi_draw_indirect(true),
|
|
)
|
|
.features11(vk::PhysicalDeviceVulkan11Features::default().shader_draw_parameters(true))
|
|
.features12(
|
|
vk::PhysicalDeviceVulkan12Features::default()
|
|
.shader_int8(true)
|
|
.storage_buffer8_bit_access(true),
|
|
)
|
|
.features13(
|
|
vk::PhysicalDeviceVulkan13Features::default()
|
|
.dynamic_rendering(true)
|
|
.maintenance4(true)
|
|
.synchronization2(true),
|
|
)
|
|
.with_extension(
|
|
make_extention_properties(
|
|
ash::ext::mesh_shader::NAME,
|
|
ash::ext::mesh_shader::SPEC_VERSION,
|
|
),
|
|
vk::PhysicalDeviceMeshShaderFeaturesEXT::default()
|
|
.mesh_shader(true)
|
|
.task_shader(true),
|
|
)
|
|
.with_extension(
|
|
make_extention_properties(
|
|
ash::khr::present_id::NAME,
|
|
ash::khr::present_id::SPEC_VERSION,
|
|
),
|
|
vk::PhysicalDevicePresentIdFeaturesKHR::default().present_id(true),
|
|
)
|
|
.with_extension(
|
|
make_extention_properties(
|
|
ash::khr::present_wait::NAME,
|
|
ash::khr::present_wait::SPEC_VERSION,
|
|
),
|
|
vk::PhysicalDevicePresentWaitFeaturesKHR::default().present_wait(true),
|
|
)
|
|
.with_extension(
|
|
make_extention_properties(
|
|
ash::ext::index_type_uint8::NAME,
|
|
ash::ext::index_type_uint8::SPEC_VERSION,
|
|
),
|
|
vk::PhysicalDeviceIndexTypeUint8FeaturesEXT::default().index_type_uint8(true),
|
|
)
|
|
.with_extensions2([
|
|
make_extention_properties(khr::swapchain::NAME, khr::swapchain::SPEC_VERSION),
|
|
make_extention_properties(khr::spirv_1_4::NAME, khr::spirv_1_4::SPEC_VERSION),
|
|
]);
|
|
|
|
// Consider this: switching physical device in game?
|
|
// anything above this point is device agnostic, everything below would have to be recreated
|
|
// additionally, pdev would have to be derived from a device and not a scoring function.
|
|
|
|
let pdev = Self::choose_physical_device(
|
|
&instance,
|
|
display_handle,
|
|
&features,
|
|
vec![Box::new(
|
|
vk::PhysicalDeviceMeshShaderPropertiesEXT::default(),
|
|
)],
|
|
)?;
|
|
|
|
tracing::debug!("pdev: {pdev:?}");
|
|
let device = Device::new(instance.clone(), pdev, features)?;
|
|
|
|
Ok(Self { instance, device })
|
|
}
|
|
|
|
fn queue_family_supports_presentation(
|
|
instance: &Instance,
|
|
pdev: vk::PhysicalDevice,
|
|
queue_family: u32,
|
|
display_handle: RawDisplayHandle,
|
|
) -> bool {
|
|
unsafe {
|
|
match display_handle {
|
|
RawDisplayHandle::Xlib(_xlib_display_handle) => {
|
|
todo!()
|
|
}
|
|
RawDisplayHandle::Xcb(_xcb_display_handle) => todo!(),
|
|
RawDisplayHandle::Wayland(wayland_display_handle) => {
|
|
ash::khr::wayland_surface::Instance::new(&instance.entry, &instance.instance)
|
|
.get_physical_device_wayland_presentation_support(
|
|
pdev,
|
|
queue_family,
|
|
wayland_display_handle.display.cast().as_mut(),
|
|
)
|
|
}
|
|
RawDisplayHandle::Drm(_) => {
|
|
todo!()
|
|
}
|
|
RawDisplayHandle::Windows(_) => {
|
|
ash::khr::win32_surface::Instance::new(&instance.entry, &instance.instance)
|
|
.get_physical_device_win32_presentation_support(pdev, queue_family)
|
|
}
|
|
_ => panic!("unsupported platform"),
|
|
}
|
|
}
|
|
}
|
|
|
|
fn select_pdev_queue_families(
|
|
instance: &Instance,
|
|
display_handle: Option<RawDisplayHandle>,
|
|
pdev: vk::PhysicalDevice,
|
|
) -> DeviceQueueFamilies {
|
|
let queue_families = unsafe {
|
|
instance
|
|
.instance
|
|
.get_physical_device_queue_family_properties(pdev)
|
|
};
|
|
|
|
struct QueueFamily {
|
|
num_queues: u32,
|
|
is_present: bool,
|
|
is_compute: bool,
|
|
is_graphics: bool,
|
|
is_transfer: bool,
|
|
}
|
|
|
|
impl QueueFamily {
|
|
fn is_graphics_and_compute(&self) -> bool {
|
|
self.is_compute && self.is_graphics
|
|
}
|
|
}
|
|
|
|
struct QueueFamilies(Vec<QueueFamily>);
|
|
impl QueueFamilies {
|
|
fn find_first<F>(&mut self, mut pred: F) -> Option<u32>
|
|
where
|
|
F: FnMut(&QueueFamily) -> bool,
|
|
{
|
|
if let Some((q, family)) = self
|
|
.0
|
|
.iter_mut()
|
|
.enumerate()
|
|
.filter(|(_, family)| family.num_queues > 0)
|
|
.find(|(_, family)| pred(family))
|
|
{
|
|
family.num_queues -= 1;
|
|
Some(q as u32)
|
|
} else {
|
|
None
|
|
}
|
|
}
|
|
|
|
fn find_best<F>(&mut self, mut pred: F) -> Option<u32>
|
|
where
|
|
F: FnMut(&QueueFamily) -> Option<u32>,
|
|
{
|
|
let (_, q, family) = self
|
|
.0
|
|
.iter_mut()
|
|
.enumerate()
|
|
.filter_map(|(i, family)| {
|
|
if family.num_queues == 0 {
|
|
return None;
|
|
}
|
|
pred(family).map(|score| (score, i, family))
|
|
})
|
|
.max_by_key(|(score, _, _)| *score)?;
|
|
family.num_queues -= 1;
|
|
Some(q as u32)
|
|
}
|
|
}
|
|
|
|
let mut queue_families = QueueFamilies(
|
|
queue_families
|
|
.iter()
|
|
.enumerate()
|
|
.map(|(i, family)| {
|
|
let q = i as u32;
|
|
let is_graphics = family.queue_flags.contains(vk::QueueFlags::GRAPHICS);
|
|
let is_compute = family.queue_flags.contains(vk::QueueFlags::COMPUTE);
|
|
let is_transfer = family.queue_flags.contains(vk::QueueFlags::TRANSFER)
|
|
|| is_compute
|
|
|| is_graphics;
|
|
let is_present = display_handle
|
|
.map(|display_handle| {
|
|
Self::queue_family_supports_presentation(
|
|
instance,
|
|
pdev,
|
|
q,
|
|
display_handle,
|
|
)
|
|
})
|
|
.unwrap_or(false);
|
|
QueueFamily {
|
|
num_queues: family.queue_count,
|
|
is_compute,
|
|
is_graphics,
|
|
is_present,
|
|
is_transfer,
|
|
}
|
|
})
|
|
.collect::<Vec<_>>(),
|
|
);
|
|
|
|
let graphics = queue_families
|
|
.find_best(|family| {
|
|
if !family.is_graphics {
|
|
return None;
|
|
}
|
|
// a queue with Graphics+Compute is guaranteed to exist
|
|
Some(family.is_compute as u32 * 2 + family.is_present as u32)
|
|
})
|
|
.unwrap();
|
|
|
|
// find present queue first because it is rather more important than a secondary compute queue
|
|
let present =
|
|
if !queue_families.0.get(graphics as usize).unwrap().is_present {
|
|
queue_families.find_first(|family| family.is_present)
|
|
} else {
|
|
None
|
|
}.or({
|
|
if display_handle.is_none() {
|
|
// in this case the graphics queue will be used by default
|
|
tracing::info!("no present queue available, using graphics queue as fallback for headless_surface");
|
|
Some(graphics)
|
|
} else {
|
|
tracing::warn!("no present queue available, this is unexpected!");
|
|
None}
|
|
});
|
|
|
|
let async_compute = queue_families.find_first(|family| family.is_compute);
|
|
let transfer = queue_families.find_first(|family| family.is_transfer);
|
|
|
|
let mut unique_families = BTreeMap::<u32, u32>::new();
|
|
|
|
let mut helper = |family: u32| {
|
|
use std::collections::btree_map::Entry;
|
|
let index = match unique_families.entry(family) {
|
|
Entry::Vacant(vacant_entry) => {
|
|
vacant_entry.insert(1);
|
|
0
|
|
}
|
|
Entry::Occupied(mut occupied_entry) => {
|
|
let idx = occupied_entry.get_mut();
|
|
*idx += 1;
|
|
*idx - 1
|
|
}
|
|
};
|
|
|
|
(family, index)
|
|
};
|
|
|
|
let graphics = helper(graphics);
|
|
let async_compute = async_compute.map(|f| helper(f)).unwrap_or(graphics);
|
|
let transfer = transfer.map(|f| helper(f)).unwrap_or(async_compute);
|
|
let present = present.map(|f| helper(f)).unwrap_or(graphics);
|
|
|
|
let families = unique_families
|
|
.into_iter()
|
|
.filter(|&(_family, count)| count > 0)
|
|
.collect::<Vec<_>>();
|
|
|
|
// family of each queue, of which one is allocated for each queue, with
|
|
// graphics being the fallback queue for compute and transfer, and
|
|
// present possibly being `None`, in which case it is Graphics
|
|
let queues = DeviceQueueFamilies {
|
|
families,
|
|
graphics,
|
|
async_compute,
|
|
transfer,
|
|
present,
|
|
};
|
|
|
|
queues
|
|
}
|
|
|
|
fn choose_physical_device(
|
|
instance: &Instance,
|
|
display_handle: Option<RawDisplayHandle>,
|
|
requirements: &PhysicalDeviceFeatures,
|
|
extra_properties: Vec<Box<dyn ExtendsDeviceProperties2Debug>>,
|
|
) -> Result<PhysicalDevice> {
|
|
let pdevs = unsafe { instance.instance.enumerate_physical_devices()? };
|
|
|
|
let (pdev, properties) = pdevs
|
|
.into_iter()
|
|
.map(|pdev| {
|
|
let mut props = PhysicalDeviceProperties::default().extra_properties(
|
|
extra_properties
|
|
.iter()
|
|
.map(|b| dyn_clone::clone_box(&**b))
|
|
.collect::<Vec<_>>(),
|
|
);
|
|
props.query(&instance.instance, pdev);
|
|
|
|
(pdev, props)
|
|
})
|
|
// filter devices which dont support the version of Vulkan we are requesting
|
|
.filter(|(_, props)| props.base.api_version >= requirements.version)
|
|
// filter devices which don't support the device extensions we
|
|
// are requesting
|
|
// TODO: figure out a way to fall back to some
|
|
// device which doesn't support all of the extensions.
|
|
.filter(|(pdev, _)| {
|
|
let query_features =
|
|
PhysicalDeviceFeatures::query(&instance.instance, *pdev).unwrap();
|
|
|
|
requirements.compatible_with(&query_features)
|
|
})
|
|
.max_by_key(|(_, props)| {
|
|
let score = match props.base.device_type {
|
|
vk::PhysicalDeviceType::DISCRETE_GPU => 5,
|
|
vk::PhysicalDeviceType::INTEGRATED_GPU => 4,
|
|
vk::PhysicalDeviceType::VIRTUAL_GPU => 3,
|
|
vk::PhysicalDeviceType::CPU => 2,
|
|
vk::PhysicalDeviceType::OTHER => 1,
|
|
_ => unreachable!(),
|
|
};
|
|
|
|
// score based on limits or other properties
|
|
|
|
score
|
|
})
|
|
.ok_or(Error::NoPhysicalDevice)?;
|
|
|
|
Ok(PhysicalDevice {
|
|
queue_families: Self::select_pdev_queue_families(instance, display_handle, pdev),
|
|
pdev,
|
|
properties,
|
|
})
|
|
}
|
|
|
|
fn get_available_extensions(
|
|
entry: &ash::Entry,
|
|
layers: &[&CStr],
|
|
) -> Result<Vec<ash::vk::ExtensionProperties>> {
|
|
unsafe {
|
|
let extensions = core::iter::once(entry.enumerate_instance_extension_properties(None))
|
|
.chain(
|
|
layers
|
|
.iter()
|
|
.map(|&layer| entry.enumerate_instance_extension_properties(Some(layer))),
|
|
)
|
|
.filter_map(|result| result.ok())
|
|
.flatten()
|
|
.collect::<Vec<ash::vk::ExtensionProperties>>();
|
|
|
|
Ok(extensions)
|
|
}
|
|
}
|
|
|
|
fn get_extensions<'a>(
|
|
entry: &ash::Entry,
|
|
layers: &[&'a CStr],
|
|
extensions: &[&'a CStr],
|
|
display_handle: Option<RawDisplayHandle>,
|
|
) -> core::result::Result<Vec<&'a CStr>, (Vec<&'a CStr>, Vec<&'a CStr>)> {
|
|
unsafe {
|
|
let available_extensions = Self::get_available_extensions(entry, layers)
|
|
.map_err(|_| (Vec::<&'a CStr>::new(), extensions.to_vec()))?;
|
|
|
|
let available_extension_names = available_extensions
|
|
.iter()
|
|
.map(|layer| layer.extension_name_as_c_str())
|
|
.collect::<core::result::Result<BTreeSet<_>, _>>()
|
|
.map_err(|_| (Vec::<&'a CStr>::new(), extensions.to_vec()))?;
|
|
|
|
let mut out_extensions = Vec::with_capacity(extensions.len());
|
|
let mut unsupported_extensions = Vec::with_capacity(extensions.len());
|
|
for &extension in extensions {
|
|
if available_extension_names.contains(&extension) {
|
|
out_extensions.push(extension);
|
|
} else {
|
|
unsupported_extensions.push(extension);
|
|
}
|
|
}
|
|
|
|
let Ok(required_extension_names) = display_handle
|
|
.map(|display_handle| ash_window::enumerate_required_extensions(display_handle))
|
|
.unwrap_or(Ok(&[]))
|
|
else {
|
|
return Err((out_extensions, unsupported_extensions));
|
|
};
|
|
|
|
for &extension in required_extension_names {
|
|
let extension = core::ffi::CStr::from_ptr(extension);
|
|
if available_extension_names.contains(&extension) {
|
|
out_extensions.push(extension);
|
|
} else {
|
|
unsupported_extensions.push(extension);
|
|
}
|
|
}
|
|
|
|
if !unsupported_extensions.is_empty() {
|
|
Err((out_extensions, unsupported_extensions))
|
|
} else {
|
|
Ok(out_extensions)
|
|
}
|
|
}
|
|
}
|
|
|
|
fn get_layers<'a>(
|
|
entry: &ash::Entry,
|
|
wants_layers: &[&'a CStr],
|
|
) -> core::result::Result<Vec<&'a CStr>, (Vec<&'a CStr>, Vec<&'a CStr>)> {
|
|
unsafe {
|
|
let available_layers = entry
|
|
.enumerate_instance_layer_properties()
|
|
.map_err(|_| (Vec::<&'a CStr>::new(), wants_layers.to_vec()))?;
|
|
let available_layer_names = available_layers
|
|
.iter()
|
|
.map(|layer| layer.layer_name_as_c_str())
|
|
.collect::<core::result::Result<BTreeSet<_>, _>>()
|
|
.map_err(|_| (Vec::<&'a CStr>::new(), wants_layers.to_vec()))?;
|
|
|
|
let mut out_layers = Vec::with_capacity(wants_layers.len());
|
|
let mut unsupported_layers = Vec::with_capacity(wants_layers.len());
|
|
for &layer in wants_layers {
|
|
if available_layer_names.contains(&layer) {
|
|
out_layers.push(layer);
|
|
} else {
|
|
unsupported_layers.push(layer);
|
|
}
|
|
}
|
|
|
|
if !unsupported_layers.is_empty() {
|
|
Err((out_layers, unsupported_layers))
|
|
} else {
|
|
Ok(out_layers)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
use raw_window_handle::RawWindowHandle;
|
|
|
|
pub struct WindowContext {
|
|
window_handle: RawWindowHandle,
|
|
surface: Arc<Surface>,
|
|
// this mutex is for guarding the swapchain against being replaced
|
|
// underneath WindowContext's functions
|
|
current_swapchain: RwLock<Arc<Swapchain>>,
|
|
}
|
|
|
|
impl Drop for WindowContext {
|
|
fn drop(&mut self) {
|
|
unsafe {
|
|
self.current_swapchain
|
|
.read()
|
|
.device
|
|
.dev()
|
|
.device_wait_idle();
|
|
}
|
|
}
|
|
}
|
|
|
|
unsafe impl Send for WindowContext {}
|
|
unsafe impl Sync for WindowContext {}
|
|
|
|
impl Borrow<RawWindowHandle> for WindowContext {
|
|
fn borrow(&self) -> &RawWindowHandle {
|
|
&self.window_handle
|
|
}
|
|
}
|
|
impl PartialEq for WindowContext {
|
|
fn eq(&self, other: &Self) -> bool {
|
|
self.window_handle == other.window_handle
|
|
}
|
|
}
|
|
impl Eq for WindowContext {}
|
|
|
|
impl core::hash::Hash for WindowContext {
|
|
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
|
self.window_handle.hash(state);
|
|
}
|
|
}
|
|
|
|
impl WindowContext {
|
|
fn new(
|
|
instance: Arc<Instance>,
|
|
device: Device,
|
|
extent: vk::Extent2D,
|
|
window_handle: raw_window_handle::RawWindowHandle,
|
|
display: RawDisplayHandle,
|
|
) -> Result<Self> {
|
|
let surface = Arc::new(Surface::create(instance.clone(), display, window_handle)?);
|
|
|
|
let swapchain = Arc::new(Swapchain::new(
|
|
instance,
|
|
device.clone(),
|
|
surface.clone(),
|
|
device.phy(),
|
|
extent,
|
|
)?);
|
|
|
|
Ok(Self {
|
|
window_handle,
|
|
surface,
|
|
current_swapchain: RwLock::new(swapchain),
|
|
})
|
|
}
|
|
|
|
/// spawns a task that continuously requests images from the current
|
|
/// swapchain, sending them to a channel. returns the receiver of the
|
|
/// channel, and a handle to the task, allowing for cancellation.
|
|
fn images(
|
|
self: Arc<Self>,
|
|
) -> (
|
|
smol::channel::Receiver<SwapchainFrame>,
|
|
smol::Task<std::result::Result<(), Error>>,
|
|
) {
|
|
let (tx, rx) = smol::channel::bounded(8);
|
|
let task = smol::spawn(async move {
|
|
loop {
|
|
let frame = self.acquire_image().await?;
|
|
tx.send(frame)
|
|
.await
|
|
.expect("channel closed on swapchain acquiring frame");
|
|
}
|
|
Result::Ok(())
|
|
});
|
|
|
|
(rx, task)
|
|
}
|
|
|
|
async fn acquire_image(&self) -> Result<SwapchainFrame> {
|
|
// clone swapchain to keep it alive
|
|
let swapchain = self.current_swapchain.read().clone();
|
|
let (frame, suboptimal) = swapchain.clone().acquire_image().await?;
|
|
if suboptimal {
|
|
let mut lock = self.current_swapchain.write();
|
|
// only recreate our swapchain if it is still same, or else it might have already been recreated.
|
|
if Arc::ptr_eq(&swapchain, &lock) {
|
|
*lock = Arc::new(lock.recreate(None)?);
|
|
}
|
|
}
|
|
|
|
Ok(frame)
|
|
}
|
|
|
|
pub fn recreate_with(&self, extent: Option<vk::Extent2D>) -> Result<()> {
|
|
let mut swapchain = self.current_swapchain.write();
|
|
*swapchain = Arc::new(swapchain.recreate(extent)?);
|
|
|
|
Ok(())
|
|
}
|
|
}
|
|
|
|
#[derive(Debug, Default)]
|
|
pub struct EguiState {
|
|
pub textures: HashMap<egui::TextureId, texture::TextureId>,
|
|
}
|
|
|
|
pub struct Renderer<W> {
|
|
pub texture_handler: texture::TextureManager,
|
|
pub egui_state: EguiState,
|
|
// thinkw: want renderer linked with display? then no (real) headless
|
|
display: RawDisplayHandle,
|
|
pub window_contexts: HashMap<W, WindowContext>,
|
|
vulkan: Vulkan,
|
|
}
|
|
|
|
pub use vk::Extent2D;
|
|
|
|
impl<W> Renderer<W> {
|
|
pub fn new(display: RawDisplayHandle) -> Result<Self> {
|
|
let vulkan = Vulkan::new("Vidya", &[], &[], Some(display))?;
|
|
Ok(Self {
|
|
texture_handler: texture::TextureManager::new(vulkan.device.clone()),
|
|
vulkan,
|
|
egui_state: Default::default(),
|
|
display,
|
|
window_contexts: HashMap::new(),
|
|
})
|
|
}
|
|
|
|
pub fn draw_egui(&mut self, ctx: &egui::Context, output: egui::FullOutput) {
|
|
let pool = commands::SingleUseCommandPool::new(
|
|
self.vulkan.device.clone(),
|
|
self.vulkan.device.graphics_queue().clone(),
|
|
)
|
|
.unwrap();
|
|
let cmd = pool.alloc().unwrap();
|
|
|
|
let cmd_objects = output
|
|
.textures_delta
|
|
.set
|
|
.iter()
|
|
.map(|(egui_id, delta)| {
|
|
let size = delta.image.size();
|
|
let byte_size = size[0] * size[1] * 4;
|
|
let mut staging = buffers::Buffer::new(
|
|
self.vulkan.device.clone(),
|
|
byte_size,
|
|
vk::BufferUsageFlags::TRANSFER_SRC,
|
|
&[],
|
|
vk_mem::MemoryUsage::AutoPreferHost,
|
|
vk_mem::AllocationCreateFlags::MAPPED
|
|
| vk_mem::AllocationCreateFlags::HOST_ACCESS_SEQUENTIAL_WRITE
|
|
| vk_mem::AllocationCreateFlags::STRATEGY_FIRST_FIT,
|
|
)
|
|
.expect("staging buffer");
|
|
{
|
|
let mut mem = Arc::get_mut(&mut staging)
|
|
.unwrap()
|
|
.map()
|
|
.expect("mapping staging buffer");
|
|
match &delta.image {
|
|
egui::ImageData::Color(arc) => {
|
|
let slice = unsafe {
|
|
core::slice::from_raw_parts(
|
|
arc.pixels.as_ptr().cast::<u8>(),
|
|
arc.pixels.len() * size_of::<Color32>(),
|
|
)
|
|
};
|
|
mem[..slice.len()].copy_from_slice(slice);
|
|
}
|
|
egui::ImageData::Font(font_image) => {
|
|
for (i, c) in font_image.srgba_pixels(None).enumerate() {
|
|
let bytes = c.to_array();
|
|
mem[i * 4..(i + 1) * 4].copy_from_slice(&bytes);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
let sampled = if delta.is_whole() {
|
|
vk::ImageUsageFlags::SAMPLED
|
|
} else {
|
|
vk::ImageUsageFlags::TRANSFER_SRC
|
|
};
|
|
let extent = vk::Extent2D {
|
|
width: delta.image.width() as u32,
|
|
height: delta.image.height() as u32,
|
|
};
|
|
let texture = images::Image2D::new_exclusive(
|
|
&self.vulkan.device,
|
|
extent,
|
|
1,
|
|
1,
|
|
vk::Format::R8G8B8A8_UNORM,
|
|
vk::ImageTiling::OPTIMAL,
|
|
sampled | vk::ImageUsageFlags::TRANSFER_DST,
|
|
vk_mem::MemoryUsage::AutoPreferDevice,
|
|
vk_mem::AllocationCreateFlags::empty(),
|
|
Some(&format!("egui-texture-{egui_id:?}")),
|
|
)
|
|
.expect("image creation");
|
|
|
|
cmd.image_barrier(
|
|
texture.image(),
|
|
vk::ImageAspectFlags::COLOR,
|
|
vk::PipelineStageFlags2::TRANSFER,
|
|
vk::AccessFlags2::empty(),
|
|
vk::PipelineStageFlags2::TRANSFER,
|
|
vk::AccessFlags2::TRANSFER_WRITE,
|
|
vk::ImageLayout::UNDEFINED,
|
|
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
|
|
None,
|
|
);
|
|
cmd.copy_buffer_to_image(
|
|
staging.buffer(),
|
|
texture.image(),
|
|
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
|
|
&[vk::BufferImageCopy {
|
|
buffer_offset: 0,
|
|
buffer_row_length: delta.image.width() as u32,
|
|
buffer_image_height: delta.image.height() as u32,
|
|
image_subresource: vk::ImageSubresourceLayers::default()
|
|
.aspect_mask(vk::ImageAspectFlags::COLOR)
|
|
.base_array_layer(0)
|
|
.mip_level(0)
|
|
.layer_count(1),
|
|
image_offset: vk::Offset3D { x: 0, y: 0, z: 0 },
|
|
image_extent: vk::Extent3D {
|
|
width: texture.size().width,
|
|
height: texture.size().height,
|
|
depth: 1,
|
|
},
|
|
}],
|
|
);
|
|
|
|
let id = self
|
|
.egui_state
|
|
.textures
|
|
.get(egui_id)
|
|
.cloned()
|
|
.unwrap_or_else(|| {
|
|
let id = texture::TextureId::new();
|
|
self.egui_state.textures.insert(*egui_id, id);
|
|
|
|
id
|
|
});
|
|
|
|
if let Some(pos) = delta.pos {
|
|
// SAFETY: must exist because image is not whole.
|
|
let existing_texture = self.texture_handler.textures.get(&id).cloned().unwrap();
|
|
|
|
cmd.image_barrier(
|
|
texture.image(),
|
|
vk::ImageAspectFlags::COLOR,
|
|
vk::PipelineStageFlags2::TRANSFER,
|
|
vk::AccessFlags2::TRANSFER_WRITE,
|
|
vk::PipelineStageFlags2::TRANSFER,
|
|
vk::AccessFlags2::TRANSFER_READ,
|
|
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
|
|
vk::ImageLayout::TRANSFER_SRC_OPTIMAL,
|
|
None,
|
|
);
|
|
cmd.image_barrier(
|
|
existing_texture.image(),
|
|
vk::ImageAspectFlags::COLOR,
|
|
vk::PipelineStageFlags2::empty(),
|
|
vk::AccessFlags2::empty(),
|
|
vk::PipelineStageFlags2::TRANSFER,
|
|
vk::AccessFlags2::TRANSFER_WRITE,
|
|
vk::ImageLayout::UNDEFINED,
|
|
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
|
|
None,
|
|
);
|
|
cmd.blit_images(
|
|
&texture,
|
|
util::Rect2D::new(0, 0, texture.width() as i32, texture.height() as i32),
|
|
&existing_texture,
|
|
util::Rect2D::new_from_size(
|
|
glam::ivec2(pos[0] as i32, pos[1] as i32),
|
|
glam::ivec2(texture.width() as i32, texture.height() as i32),
|
|
),
|
|
);
|
|
cmd.image_barrier(
|
|
existing_texture.image(),
|
|
vk::ImageAspectFlags::COLOR,
|
|
vk::PipelineStageFlags2::TRANSFER,
|
|
vk::AccessFlags2::TRANSFER_WRITE,
|
|
vk::PipelineStageFlags2::FRAGMENT_SHADER,
|
|
vk::AccessFlags2::SHADER_SAMPLED_READ,
|
|
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
|
|
vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL,
|
|
None,
|
|
);
|
|
} else {
|
|
cmd.image_barrier(
|
|
texture.image(),
|
|
vk::ImageAspectFlags::COLOR,
|
|
vk::PipelineStageFlags2::TRANSFER,
|
|
vk::AccessFlags2::TRANSFER_WRITE,
|
|
vk::PipelineStageFlags2::FRAGMENT_SHADER,
|
|
vk::AccessFlags2::SHADER_SAMPLED_READ,
|
|
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
|
|
vk::ImageLayout::SHADER_READ_ONLY_OPTIMAL,
|
|
None,
|
|
);
|
|
self.texture_handler.textures.insert(id, texture.clone());
|
|
info!("new texture for egui: {egui_id:?} -> {id:?}");
|
|
}
|
|
|
|
(staging, texture)
|
|
})
|
|
.collect::<Vec<_>>();
|
|
|
|
let fence = Arc::new(sync::Fence::create(self.vulkan.device.clone()).unwrap());
|
|
let future = cmd.submit_async(None, None, fence).unwrap();
|
|
|
|
future.block();
|
|
|
|
let draw_data = ctx.tessellate(output.shapes, output.pixels_per_point);
|
|
|
|
// do drawing stuff
|
|
let cmd = pool.alloc().unwrap();
|
|
|
|
// free after drawing
|
|
let texture_deltas = &output.textures_delta;
|
|
texture_deltas
|
|
.free
|
|
.iter()
|
|
.filter_map(|id| self.egui_state.textures.get(id).cloned())
|
|
.for_each(|id| {
|
|
self.texture_handler.textures.remove(&id);
|
|
});
|
|
}
|
|
|
|
pub fn debug_draw<K, F: FnOnce()>(&mut self, window: &K, pre_present_cb: F) -> Result<()>
|
|
where
|
|
K: core::hash::Hash + Eq,
|
|
W: core::hash::Hash + Eq + Borrow<K>,
|
|
{
|
|
let dev = self.vulkan.device.clone();
|
|
|
|
unsafe { dev.dev().device_wait_idle()? };
|
|
|
|
let pool = commands::SingleUseCommandPool::new(dev.clone(), dev.graphics_queue().clone())?;
|
|
|
|
if let Some(ctx) = self.window_contexts.get(window) {
|
|
let cmd = pool.alloc()?;
|
|
|
|
let (frame, suboptimal) =
|
|
smol::block_on(ctx.current_swapchain.read().clone().acquire_image())?;
|
|
|
|
if suboptimal {
|
|
tracing::warn!(
|
|
"swapchain ({:?}) is suboptimal!",
|
|
ctx.current_swapchain.read().swapchain
|
|
);
|
|
}
|
|
|
|
// let image = images::Image2D::new_exclusive(
|
|
// self.vulkan.alloc.clone(),
|
|
// extent,
|
|
// 1,
|
|
// 1,
|
|
// vk::Format::R8G8B8A8_UNORM,
|
|
// vk::ImageTiling::OPTIMAL,
|
|
// vk::ImageUsageFlags::TRANSFER_SRC,
|
|
// vk_mem::MemoryUsage::AutoPreferDevice,
|
|
// vk_mem::AllocationCreateFlags::empty(),
|
|
// )?;
|
|
|
|
// let view = image.view(&dev, vk::ImageAspectFlags::COLOR)?;
|
|
|
|
let [r, g, b] = rand::prelude::StdRng::seed_from_u64(ctx.surface.surface.as_raw())
|
|
.gen::<[f32; 3]>();
|
|
let clear_color = Rgba([r, g, b, 1.0]);
|
|
let clear_values = vk::ClearColorValue {
|
|
float32: [r, g, b, 1.0],
|
|
};
|
|
|
|
unsafe {
|
|
let barriers = [images::image_barrier(
|
|
frame.image,
|
|
vk::ImageAspectFlags::COLOR,
|
|
vk::PipelineStageFlags2::TRANSFER,
|
|
vk::AccessFlags2::empty(),
|
|
vk::PipelineStageFlags2::TRANSFER,
|
|
vk::AccessFlags2::TRANSFER_WRITE,
|
|
vk::ImageLayout::UNDEFINED,
|
|
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
|
|
None,
|
|
)];
|
|
|
|
let dependency_info = vk::DependencyInfo::default()
|
|
.dependency_flags(vk::DependencyFlags::BY_REGION)
|
|
.image_memory_barriers(&barriers);
|
|
|
|
cmd.image_barrier(
|
|
frame.image,
|
|
vk::ImageAspectFlags::COLOR,
|
|
vk::PipelineStageFlags2::TRANSFER,
|
|
vk::AccessFlags2::empty(),
|
|
vk::PipelineStageFlags2::TRANSFER,
|
|
vk::AccessFlags2::TRANSFER_WRITE,
|
|
vk::ImageLayout::UNDEFINED,
|
|
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
|
|
None,
|
|
);
|
|
cmd.clear_color_image(
|
|
frame.image,
|
|
frame.format,
|
|
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
|
|
clear_color,
|
|
&[images::SUBRESOURCERANGE_COLOR_ALL],
|
|
);
|
|
cmd.image_barrier(
|
|
frame.image,
|
|
vk::ImageAspectFlags::COLOR,
|
|
vk::PipelineStageFlags2::TRANSFER,
|
|
vk::AccessFlags2::TRANSFER_WRITE,
|
|
vk::PipelineStageFlags2::TRANSFER,
|
|
vk::AccessFlags2::empty(),
|
|
vk::ImageLayout::TRANSFER_DST_OPTIMAL,
|
|
vk::ImageLayout::PRESENT_SRC_KHR,
|
|
None,
|
|
);
|
|
|
|
let future = cmd.submit_async(
|
|
Some((frame.acquire, vk::PipelineStageFlags::TRANSFER)),
|
|
Some(frame.release),
|
|
Arc::new(sync::Fence::create(dev.clone())?),
|
|
)?;
|
|
|
|
// call pre_present_notify
|
|
pre_present_cb();
|
|
|
|
frame.present();
|
|
future.block()?;
|
|
|
|
// wait for idle here is unnecessary.
|
|
// dev.dev().device_wait_idle();
|
|
}
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
pub fn new_window_context(
|
|
&mut self,
|
|
extent: vk::Extent2D,
|
|
window_id: W,
|
|
window: raw_window_handle::WindowHandle,
|
|
) -> Result<()>
|
|
where
|
|
W: core::hash::Hash + Eq,
|
|
{
|
|
use std::collections::hash_map::Entry;
|
|
match self.window_contexts.entry(window_id) {
|
|
Entry::Vacant(entry) => {
|
|
let ctx = WindowContext::new(
|
|
self.vulkan.instance.clone(),
|
|
self.vulkan.device.clone(),
|
|
extent,
|
|
window.as_raw(),
|
|
self.display,
|
|
)?;
|
|
|
|
entry.insert(ctx);
|
|
}
|
|
_ => {}
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
}
|
|
|
|
mod debug {
|
|
use ash::vk;
|
|
use tracing::{event, Level};
|
|
|
|
unsafe fn str_from_raw_parts<'a>(str: *const i8) -> std::borrow::Cow<'a, str> {
|
|
use std::{borrow::Cow, ffi};
|
|
if str.is_null() {
|
|
Cow::from("")
|
|
} else {
|
|
ffi::CStr::from_ptr(str).to_string_lossy()
|
|
}
|
|
}
|
|
|
|
pub(super) unsafe extern "system" fn debug_callback(
|
|
message_severity: vk::DebugUtilsMessageSeverityFlagsEXT,
|
|
message_type: vk::DebugUtilsMessageTypeFlagsEXT,
|
|
callback_data: *const vk::DebugUtilsMessengerCallbackDataEXT<'_>,
|
|
user_data: *mut core::ffi::c_void,
|
|
) -> vk::Bool32 {
|
|
_ = user_data;
|
|
let callback_data = *callback_data;
|
|
let message_id_number = callback_data.message_id_number;
|
|
|
|
let message_id_name = str_from_raw_parts(callback_data.p_message_id_name);
|
|
let message = str_from_raw_parts(callback_data.p_message);
|
|
|
|
match message_severity {
|
|
vk::DebugUtilsMessageSeverityFlagsEXT::ERROR => {
|
|
event!(target: "VK::DebugUtils", Level::ERROR, "{message_type:?} [{message_id_name}({message_id_number})]: {message}");
|
|
}
|
|
vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE => {
|
|
event!(target: "VK::DebugUtils", Level::TRACE, "{message_type:?} [{message_id_name}({message_id_number})]: {message}");
|
|
}
|
|
vk::DebugUtilsMessageSeverityFlagsEXT::INFO => {
|
|
event!(target: "VK::DebugUtils", Level::INFO, "{message_type:?} [{message_id_name}({message_id_number})]: {message}");
|
|
}
|
|
vk::DebugUtilsMessageSeverityFlagsEXT::WARNING => {
|
|
event!(target: "VK::DebugUtils", Level::WARN, "{message_type:?} [{message_id_name}({message_id_number})]: {message}");
|
|
}
|
|
_ => unreachable!(),
|
|
}
|
|
|
|
vk::FALSE
|
|
}
|
|
}
|
|
|
|
pub mod utils {
|
|
#![allow(dead_code)]
|
|
|
|
use ash::vk;
|
|
pub trait SplitCollect: Iterator {
|
|
fn collect2<F>(self, mut pred: F) -> (Vec<Self::Item>, Vec<Self::Item>)
|
|
where
|
|
Self: Sized,
|
|
F: FnMut(&Self::Item) -> bool,
|
|
{
|
|
let mut left = Vec::new();
|
|
let mut right = Vec::new();
|
|
|
|
for item in self {
|
|
if pred(&item) {
|
|
left.push(item);
|
|
} else {
|
|
right.push(item);
|
|
}
|
|
}
|
|
|
|
(left, right)
|
|
}
|
|
}
|
|
|
|
pub fn eq_device_features10(
|
|
lhs: &vk::PhysicalDeviceFeatures,
|
|
rhs: &vk::PhysicalDeviceFeatures,
|
|
) -> bool {
|
|
lhs.robust_buffer_access == rhs.robust_buffer_access
|
|
&& lhs.full_draw_index_uint32 == rhs.full_draw_index_uint32
|
|
&& lhs.image_cube_array == rhs.image_cube_array
|
|
&& lhs.independent_blend == rhs.independent_blend
|
|
&& lhs.geometry_shader == rhs.geometry_shader
|
|
&& lhs.tessellation_shader == rhs.tessellation_shader
|
|
&& lhs.sample_rate_shading == rhs.sample_rate_shading
|
|
&& lhs.dual_src_blend == rhs.dual_src_blend
|
|
&& lhs.logic_op == rhs.logic_op
|
|
&& lhs.multi_draw_indirect == rhs.multi_draw_indirect
|
|
&& lhs.draw_indirect_first_instance == rhs.draw_indirect_first_instance
|
|
&& lhs.depth_clamp == rhs.depth_clamp
|
|
&& lhs.depth_bias_clamp == rhs.depth_bias_clamp
|
|
&& lhs.fill_mode_non_solid == rhs.fill_mode_non_solid
|
|
&& lhs.depth_bounds == rhs.depth_bounds
|
|
&& lhs.wide_lines == rhs.wide_lines
|
|
&& lhs.large_points == rhs.large_points
|
|
&& lhs.alpha_to_one == rhs.alpha_to_one
|
|
&& lhs.multi_viewport == rhs.multi_viewport
|
|
&& lhs.sampler_anisotropy == rhs.sampler_anisotropy
|
|
&& lhs.texture_compression_etc2 == rhs.texture_compression_etc2
|
|
&& lhs.texture_compression_astc_ldr == rhs.texture_compression_astc_ldr
|
|
&& lhs.texture_compression_bc == rhs.texture_compression_bc
|
|
&& lhs.occlusion_query_precise == rhs.occlusion_query_precise
|
|
&& lhs.pipeline_statistics_query == rhs.pipeline_statistics_query
|
|
&& lhs.vertex_pipeline_stores_and_atomics == rhs.vertex_pipeline_stores_and_atomics
|
|
&& lhs.fragment_stores_and_atomics == rhs.fragment_stores_and_atomics
|
|
&& lhs.shader_tessellation_and_geometry_point_size
|
|
== rhs.shader_tessellation_and_geometry_point_size
|
|
&& lhs.shader_image_gather_extended == rhs.shader_image_gather_extended
|
|
&& lhs.shader_storage_image_extended_formats
|
|
== rhs.shader_storage_image_extended_formats
|
|
&& lhs.shader_storage_image_multisample == rhs.shader_storage_image_multisample
|
|
&& lhs.shader_storage_image_read_without_format
|
|
== rhs.shader_storage_image_read_without_format
|
|
&& lhs.shader_storage_image_write_without_format
|
|
== rhs.shader_storage_image_write_without_format
|
|
&& lhs.shader_uniform_buffer_array_dynamic_indexing
|
|
== rhs.shader_uniform_buffer_array_dynamic_indexing
|
|
&& lhs.shader_sampled_image_array_dynamic_indexing
|
|
== rhs.shader_sampled_image_array_dynamic_indexing
|
|
&& lhs.shader_storage_buffer_array_dynamic_indexing
|
|
== rhs.shader_storage_buffer_array_dynamic_indexing
|
|
&& lhs.shader_storage_image_array_dynamic_indexing
|
|
== rhs.shader_storage_image_array_dynamic_indexing
|
|
&& lhs.shader_clip_distance == rhs.shader_clip_distance
|
|
&& lhs.shader_cull_distance == rhs.shader_cull_distance
|
|
&& lhs.shader_float64 == rhs.shader_float64
|
|
&& lhs.shader_int64 == rhs.shader_int64
|
|
&& lhs.shader_int16 == rhs.shader_int16
|
|
&& lhs.shader_resource_residency == rhs.shader_resource_residency
|
|
&& lhs.shader_resource_min_lod == rhs.shader_resource_min_lod
|
|
&& lhs.sparse_binding == rhs.sparse_binding
|
|
&& lhs.sparse_residency_buffer == rhs.sparse_residency_buffer
|
|
&& lhs.sparse_residency_image2_d == rhs.sparse_residency_image2_d
|
|
&& lhs.sparse_residency_image3_d == rhs.sparse_residency_image3_d
|
|
&& lhs.sparse_residency2_samples == rhs.sparse_residency2_samples
|
|
&& lhs.sparse_residency4_samples == rhs.sparse_residency4_samples
|
|
&& lhs.sparse_residency8_samples == rhs.sparse_residency8_samples
|
|
&& lhs.sparse_residency16_samples == rhs.sparse_residency16_samples
|
|
&& lhs.sparse_residency_aliased == rhs.sparse_residency_aliased
|
|
&& lhs.variable_multisample_rate == rhs.variable_multisample_rate
|
|
&& lhs.inherited_queries == rhs.inherited_queries
|
|
}
|
|
|
|
pub fn eq_device_features11(
|
|
lhs: &vk::PhysicalDeviceVulkan11Features,
|
|
rhs: &vk::PhysicalDeviceVulkan11Features,
|
|
) -> bool {
|
|
lhs.storage_buffer16_bit_access == rhs.storage_buffer16_bit_access
|
|
&& lhs.uniform_and_storage_buffer16_bit_access
|
|
== rhs.uniform_and_storage_buffer16_bit_access
|
|
&& lhs.storage_push_constant16 == rhs.storage_push_constant16
|
|
&& lhs.storage_input_output16 == rhs.storage_input_output16
|
|
&& lhs.multiview == rhs.multiview
|
|
&& lhs.multiview_geometry_shader == rhs.multiview_geometry_shader
|
|
&& lhs.multiview_tessellation_shader == rhs.multiview_tessellation_shader
|
|
&& lhs.variable_pointers_storage_buffer == rhs.variable_pointers_storage_buffer
|
|
&& lhs.variable_pointers == rhs.variable_pointers
|
|
&& lhs.protected_memory == rhs.protected_memory
|
|
&& lhs.sampler_ycbcr_conversion == rhs.sampler_ycbcr_conversion
|
|
&& lhs.shader_draw_parameters == rhs.shader_draw_parameters
|
|
}
|
|
|
|
pub fn eq_device_features12(
|
|
lhs: &vk::PhysicalDeviceVulkan12Features,
|
|
rhs: &vk::PhysicalDeviceVulkan12Features,
|
|
) -> bool {
|
|
lhs.sampler_mirror_clamp_to_edge == rhs.sampler_mirror_clamp_to_edge
|
|
&& lhs.draw_indirect_count == rhs.draw_indirect_count
|
|
&& lhs.storage_buffer8_bit_access == rhs.storage_buffer8_bit_access
|
|
&& lhs.uniform_and_storage_buffer8_bit_access
|
|
== rhs.uniform_and_storage_buffer8_bit_access
|
|
&& lhs.storage_push_constant8 == rhs.storage_push_constant8
|
|
&& lhs.shader_buffer_int64_atomics == rhs.shader_buffer_int64_atomics
|
|
&& lhs.shader_shared_int64_atomics == rhs.shader_shared_int64_atomics
|
|
&& lhs.shader_float16 == rhs.shader_float16
|
|
&& lhs.shader_int8 == rhs.shader_int8
|
|
&& lhs.descriptor_indexing == rhs.descriptor_indexing
|
|
&& lhs.shader_input_attachment_array_dynamic_indexing
|
|
== rhs.shader_input_attachment_array_dynamic_indexing
|
|
&& lhs.shader_uniform_texel_buffer_array_dynamic_indexing
|
|
== rhs.shader_uniform_texel_buffer_array_dynamic_indexing
|
|
&& lhs.shader_storage_texel_buffer_array_dynamic_indexing
|
|
== rhs.shader_storage_texel_buffer_array_dynamic_indexing
|
|
&& lhs.shader_uniform_buffer_array_non_uniform_indexing
|
|
== rhs.shader_uniform_buffer_array_non_uniform_indexing
|
|
&& lhs.shader_sampled_image_array_non_uniform_indexing
|
|
== rhs.shader_sampled_image_array_non_uniform_indexing
|
|
&& lhs.shader_storage_buffer_array_non_uniform_indexing
|
|
== rhs.shader_storage_buffer_array_non_uniform_indexing
|
|
&& lhs.shader_storage_image_array_non_uniform_indexing
|
|
== rhs.shader_storage_image_array_non_uniform_indexing
|
|
&& lhs.shader_input_attachment_array_non_uniform_indexing
|
|
== rhs.shader_input_attachment_array_non_uniform_indexing
|
|
&& lhs.shader_uniform_texel_buffer_array_non_uniform_indexing
|
|
== rhs.shader_uniform_texel_buffer_array_non_uniform_indexing
|
|
&& lhs.shader_storage_texel_buffer_array_non_uniform_indexing
|
|
== rhs.shader_storage_texel_buffer_array_non_uniform_indexing
|
|
&& lhs.descriptor_binding_uniform_buffer_update_after_bind
|
|
== rhs.descriptor_binding_uniform_buffer_update_after_bind
|
|
&& lhs.descriptor_binding_sampled_image_update_after_bind
|
|
== rhs.descriptor_binding_sampled_image_update_after_bind
|
|
&& lhs.descriptor_binding_storage_image_update_after_bind
|
|
== rhs.descriptor_binding_storage_image_update_after_bind
|
|
&& lhs.descriptor_binding_storage_buffer_update_after_bind
|
|
== rhs.descriptor_binding_storage_buffer_update_after_bind
|
|
&& lhs.descriptor_binding_uniform_texel_buffer_update_after_bind
|
|
== rhs.descriptor_binding_uniform_texel_buffer_update_after_bind
|
|
&& lhs.descriptor_binding_storage_texel_buffer_update_after_bind
|
|
== rhs.descriptor_binding_storage_texel_buffer_update_after_bind
|
|
&& lhs.descriptor_binding_update_unused_while_pending
|
|
== rhs.descriptor_binding_update_unused_while_pending
|
|
&& lhs.descriptor_binding_partially_bound == rhs.descriptor_binding_partially_bound
|
|
&& lhs.descriptor_binding_variable_descriptor_count
|
|
== rhs.descriptor_binding_variable_descriptor_count
|
|
&& lhs.runtime_descriptor_array == rhs.runtime_descriptor_array
|
|
&& lhs.sampler_filter_minmax == rhs.sampler_filter_minmax
|
|
&& lhs.scalar_block_layout == rhs.scalar_block_layout
|
|
&& lhs.imageless_framebuffer == rhs.imageless_framebuffer
|
|
&& lhs.uniform_buffer_standard_layout == rhs.uniform_buffer_standard_layout
|
|
&& lhs.shader_subgroup_extended_types == rhs.shader_subgroup_extended_types
|
|
&& lhs.separate_depth_stencil_layouts == rhs.separate_depth_stencil_layouts
|
|
&& lhs.host_query_reset == rhs.host_query_reset
|
|
&& lhs.timeline_semaphore == rhs.timeline_semaphore
|
|
&& lhs.buffer_device_address == rhs.buffer_device_address
|
|
&& lhs.buffer_device_address_capture_replay == rhs.buffer_device_address_capture_replay
|
|
&& lhs.buffer_device_address_multi_device == rhs.buffer_device_address_multi_device
|
|
&& lhs.vulkan_memory_model == rhs.vulkan_memory_model
|
|
&& lhs.vulkan_memory_model_device_scope == rhs.vulkan_memory_model_device_scope
|
|
&& lhs.vulkan_memory_model_availability_visibility_chains
|
|
== rhs.vulkan_memory_model_availability_visibility_chains
|
|
&& lhs.shader_output_viewport_index == rhs.shader_output_viewport_index
|
|
&& lhs.shader_output_layer == rhs.shader_output_layer
|
|
&& lhs.subgroup_broadcast_dynamic_id == rhs.subgroup_broadcast_dynamic_id
|
|
}
|
|
|
|
pub fn eq_device_features13(
|
|
lhs: &vk::PhysicalDeviceVulkan13Features,
|
|
rhs: &vk::PhysicalDeviceVulkan13Features,
|
|
) -> bool {
|
|
lhs.robust_image_access == rhs.robust_image_access
|
|
&& lhs.inline_uniform_block == rhs.inline_uniform_block
|
|
&& lhs.descriptor_binding_inline_uniform_block_update_after_bind
|
|
== rhs.descriptor_binding_inline_uniform_block_update_after_bind
|
|
&& lhs.pipeline_creation_cache_control == rhs.pipeline_creation_cache_control
|
|
&& lhs.private_data == rhs.private_data
|
|
&& lhs.shader_demote_to_helper_invocation == rhs.shader_demote_to_helper_invocation
|
|
&& lhs.shader_terminate_invocation == rhs.shader_terminate_invocation
|
|
&& lhs.subgroup_size_control == rhs.subgroup_size_control
|
|
&& lhs.compute_full_subgroups == rhs.compute_full_subgroups
|
|
&& lhs.synchronization2 == rhs.synchronization2
|
|
&& lhs.texture_compression_astc_hdr == rhs.texture_compression_astc_hdr
|
|
&& lhs.shader_zero_initialize_workgroup_memory
|
|
== rhs.shader_zero_initialize_workgroup_memory
|
|
&& lhs.dynamic_rendering == rhs.dynamic_rendering
|
|
&& lhs.shader_integer_dot_product == rhs.shader_integer_dot_product
|
|
&& lhs.maintenance4 == rhs.maintenance4
|
|
}
|
|
|
|
pub fn bitand_device_features10(
|
|
lhs: &vk::PhysicalDeviceFeatures,
|
|
rhs: &vk::PhysicalDeviceFeatures,
|
|
) -> vk::PhysicalDeviceFeatures {
|
|
use core::ops::BitAnd;
|
|
vk::PhysicalDeviceFeatures {
|
|
robust_buffer_access: lhs.robust_buffer_access.bitand(&rhs.robust_buffer_access),
|
|
full_draw_index_uint32: lhs
|
|
.full_draw_index_uint32
|
|
.bitand(&rhs.full_draw_index_uint32),
|
|
image_cube_array: lhs.image_cube_array.bitand(&rhs.image_cube_array),
|
|
independent_blend: lhs.independent_blend.bitand(&rhs.independent_blend),
|
|
geometry_shader: lhs.geometry_shader.bitand(&rhs.geometry_shader),
|
|
tessellation_shader: lhs.tessellation_shader.bitand(&rhs.tessellation_shader),
|
|
sample_rate_shading: lhs.sample_rate_shading.bitand(&rhs.sample_rate_shading),
|
|
dual_src_blend: lhs.dual_src_blend.bitand(&rhs.dual_src_blend),
|
|
logic_op: lhs.logic_op.bitand(&rhs.logic_op),
|
|
multi_draw_indirect: lhs.multi_draw_indirect.bitand(&rhs.multi_draw_indirect),
|
|
draw_indirect_first_instance: lhs
|
|
.draw_indirect_first_instance
|
|
.bitand(&rhs.draw_indirect_first_instance),
|
|
depth_clamp: lhs.depth_clamp.bitand(&rhs.depth_clamp),
|
|
depth_bias_clamp: lhs.depth_bias_clamp.bitand(&rhs.depth_bias_clamp),
|
|
fill_mode_non_solid: lhs.fill_mode_non_solid.bitand(&rhs.fill_mode_non_solid),
|
|
depth_bounds: lhs.depth_bounds.bitand(&rhs.depth_bounds),
|
|
wide_lines: lhs.wide_lines.bitand(&rhs.wide_lines),
|
|
large_points: lhs.large_points.bitand(&rhs.large_points),
|
|
alpha_to_one: lhs.alpha_to_one.bitand(&rhs.alpha_to_one),
|
|
multi_viewport: lhs.multi_viewport.bitand(&rhs.multi_viewport),
|
|
sampler_anisotropy: lhs.sampler_anisotropy.bitand(&rhs.sampler_anisotropy),
|
|
texture_compression_etc2: lhs
|
|
.texture_compression_etc2
|
|
.bitand(&rhs.texture_compression_etc2),
|
|
texture_compression_astc_ldr: lhs
|
|
.texture_compression_astc_ldr
|
|
.bitand(&rhs.texture_compression_astc_ldr),
|
|
texture_compression_bc: lhs
|
|
.texture_compression_bc
|
|
.bitand(&rhs.texture_compression_bc),
|
|
occlusion_query_precise: lhs
|
|
.occlusion_query_precise
|
|
.bitand(&rhs.occlusion_query_precise),
|
|
pipeline_statistics_query: lhs
|
|
.pipeline_statistics_query
|
|
.bitand(&rhs.pipeline_statistics_query),
|
|
vertex_pipeline_stores_and_atomics: lhs
|
|
.vertex_pipeline_stores_and_atomics
|
|
.bitand(&rhs.vertex_pipeline_stores_and_atomics),
|
|
fragment_stores_and_atomics: lhs
|
|
.fragment_stores_and_atomics
|
|
.bitand(&rhs.fragment_stores_and_atomics),
|
|
shader_tessellation_and_geometry_point_size: lhs
|
|
.shader_tessellation_and_geometry_point_size
|
|
.bitand(&rhs.shader_tessellation_and_geometry_point_size),
|
|
shader_image_gather_extended: lhs
|
|
.shader_image_gather_extended
|
|
.bitand(&rhs.shader_image_gather_extended),
|
|
shader_storage_image_extended_formats: lhs
|
|
.shader_storage_image_extended_formats
|
|
.bitand(&rhs.shader_storage_image_extended_formats),
|
|
shader_storage_image_multisample: lhs
|
|
.shader_storage_image_multisample
|
|
.bitand(&rhs.shader_storage_image_multisample),
|
|
shader_storage_image_read_without_format: lhs
|
|
.shader_storage_image_read_without_format
|
|
.bitand(&rhs.shader_storage_image_read_without_format),
|
|
shader_storage_image_write_without_format: lhs
|
|
.shader_storage_image_write_without_format
|
|
.bitand(&rhs.shader_storage_image_write_without_format),
|
|
shader_uniform_buffer_array_dynamic_indexing: lhs
|
|
.shader_uniform_buffer_array_dynamic_indexing
|
|
.bitand(&rhs.shader_uniform_buffer_array_dynamic_indexing),
|
|
shader_sampled_image_array_dynamic_indexing: lhs
|
|
.shader_sampled_image_array_dynamic_indexing
|
|
.bitand(&rhs.shader_sampled_image_array_dynamic_indexing),
|
|
shader_storage_buffer_array_dynamic_indexing: lhs
|
|
.shader_storage_buffer_array_dynamic_indexing
|
|
.bitand(&rhs.shader_storage_buffer_array_dynamic_indexing),
|
|
shader_storage_image_array_dynamic_indexing: lhs
|
|
.shader_storage_image_array_dynamic_indexing
|
|
.bitand(&rhs.shader_storage_image_array_dynamic_indexing),
|
|
shader_clip_distance: lhs.shader_clip_distance.bitand(&rhs.shader_clip_distance),
|
|
shader_cull_distance: lhs.shader_cull_distance.bitand(&rhs.shader_cull_distance),
|
|
shader_float64: lhs.shader_float64.bitand(&rhs.shader_float64),
|
|
shader_int64: lhs.shader_int64.bitand(&rhs.shader_int64),
|
|
shader_int16: lhs.shader_int16.bitand(&rhs.shader_int16),
|
|
shader_resource_residency: lhs
|
|
.shader_resource_residency
|
|
.bitand(&rhs.shader_resource_residency),
|
|
shader_resource_min_lod: lhs
|
|
.shader_resource_min_lod
|
|
.bitand(&rhs.shader_resource_min_lod),
|
|
sparse_binding: lhs.sparse_binding.bitand(&rhs.sparse_binding),
|
|
sparse_residency_buffer: lhs
|
|
.sparse_residency_buffer
|
|
.bitand(&rhs.sparse_residency_buffer),
|
|
sparse_residency_image2_d: lhs
|
|
.sparse_residency_image2_d
|
|
.bitand(&rhs.sparse_residency_image2_d),
|
|
sparse_residency_image3_d: lhs
|
|
.sparse_residency_image3_d
|
|
.bitand(&rhs.sparse_residency_image3_d),
|
|
sparse_residency2_samples: lhs
|
|
.sparse_residency2_samples
|
|
.bitand(&rhs.sparse_residency2_samples),
|
|
sparse_residency4_samples: lhs
|
|
.sparse_residency4_samples
|
|
.bitand(&rhs.sparse_residency4_samples),
|
|
sparse_residency8_samples: lhs
|
|
.sparse_residency8_samples
|
|
.bitand(&rhs.sparse_residency8_samples),
|
|
sparse_residency16_samples: lhs
|
|
.sparse_residency16_samples
|
|
.bitand(&rhs.sparse_residency16_samples),
|
|
sparse_residency_aliased: lhs
|
|
.sparse_residency_aliased
|
|
.bitand(&rhs.sparse_residency_aliased),
|
|
variable_multisample_rate: lhs
|
|
.variable_multisample_rate
|
|
.bitand(&rhs.variable_multisample_rate),
|
|
inherited_queries: lhs.inherited_queries.bitand(&rhs.inherited_queries),
|
|
}
|
|
}
|
|
|
|
pub fn bitand_device_features11(
|
|
lhs: &vk::PhysicalDeviceVulkan11Features,
|
|
rhs: &vk::PhysicalDeviceVulkan11Features,
|
|
) -> vk::PhysicalDeviceVulkan11Features<'static> {
|
|
use core::ops::BitAnd;
|
|
vk::PhysicalDeviceVulkan11Features {
|
|
storage_buffer16_bit_access: lhs
|
|
.storage_buffer16_bit_access
|
|
.bitand(&rhs.storage_buffer16_bit_access),
|
|
uniform_and_storage_buffer16_bit_access: lhs
|
|
.uniform_and_storage_buffer16_bit_access
|
|
.bitand(&rhs.uniform_and_storage_buffer16_bit_access),
|
|
storage_push_constant16: lhs
|
|
.storage_push_constant16
|
|
.bitand(&rhs.storage_push_constant16),
|
|
storage_input_output16: lhs
|
|
.storage_input_output16
|
|
.bitand(&rhs.storage_input_output16),
|
|
multiview: lhs.multiview.bitand(&rhs.multiview),
|
|
multiview_geometry_shader: lhs
|
|
.multiview_geometry_shader
|
|
.bitand(&rhs.multiview_geometry_shader),
|
|
multiview_tessellation_shader: lhs
|
|
.multiview_tessellation_shader
|
|
.bitand(&rhs.multiview_tessellation_shader),
|
|
variable_pointers_storage_buffer: lhs
|
|
.variable_pointers_storage_buffer
|
|
.bitand(&rhs.variable_pointers_storage_buffer),
|
|
variable_pointers: lhs.variable_pointers.bitand(&rhs.variable_pointers),
|
|
protected_memory: lhs.protected_memory.bitand(&rhs.protected_memory),
|
|
sampler_ycbcr_conversion: lhs
|
|
.sampler_ycbcr_conversion
|
|
.bitand(&rhs.sampler_ycbcr_conversion),
|
|
shader_draw_parameters: lhs
|
|
.shader_draw_parameters
|
|
.bitand(&rhs.shader_draw_parameters),
|
|
..Default::default()
|
|
}
|
|
}
|
|
|
|
pub fn bitand_device_features12(
|
|
lhs: &vk::PhysicalDeviceVulkan12Features,
|
|
rhs: &vk::PhysicalDeviceVulkan12Features,
|
|
) -> vk::PhysicalDeviceVulkan12Features<'static> {
|
|
use core::ops::BitAnd;
|
|
vk::PhysicalDeviceVulkan12Features {
|
|
sampler_mirror_clamp_to_edge: lhs
|
|
.sampler_mirror_clamp_to_edge
|
|
.bitand(&rhs.sampler_mirror_clamp_to_edge),
|
|
draw_indirect_count: lhs.draw_indirect_count.bitand(&rhs.draw_indirect_count),
|
|
storage_buffer8_bit_access: lhs
|
|
.storage_buffer8_bit_access
|
|
.bitand(&rhs.storage_buffer8_bit_access),
|
|
uniform_and_storage_buffer8_bit_access: lhs
|
|
.uniform_and_storage_buffer8_bit_access
|
|
.bitand(&rhs.uniform_and_storage_buffer8_bit_access),
|
|
storage_push_constant8: lhs
|
|
.storage_push_constant8
|
|
.bitand(&rhs.storage_push_constant8),
|
|
shader_buffer_int64_atomics: lhs
|
|
.shader_buffer_int64_atomics
|
|
.bitand(&rhs.shader_buffer_int64_atomics),
|
|
shader_shared_int64_atomics: lhs
|
|
.shader_shared_int64_atomics
|
|
.bitand(&rhs.shader_shared_int64_atomics),
|
|
shader_float16: lhs.shader_float16.bitand(&rhs.shader_float16),
|
|
shader_int8: lhs.shader_int8.bitand(&rhs.shader_int8),
|
|
descriptor_indexing: lhs.descriptor_indexing.bitand(&rhs.descriptor_indexing),
|
|
shader_input_attachment_array_dynamic_indexing: lhs
|
|
.shader_input_attachment_array_dynamic_indexing
|
|
.bitand(&rhs.shader_input_attachment_array_dynamic_indexing),
|
|
shader_uniform_texel_buffer_array_dynamic_indexing: lhs
|
|
.shader_uniform_texel_buffer_array_dynamic_indexing
|
|
.bitand(&rhs.shader_uniform_texel_buffer_array_dynamic_indexing),
|
|
shader_storage_texel_buffer_array_dynamic_indexing: lhs
|
|
.shader_storage_texel_buffer_array_dynamic_indexing
|
|
.bitand(&rhs.shader_storage_texel_buffer_array_dynamic_indexing),
|
|
shader_uniform_buffer_array_non_uniform_indexing: lhs
|
|
.shader_uniform_buffer_array_non_uniform_indexing
|
|
.bitand(&rhs.shader_uniform_buffer_array_non_uniform_indexing),
|
|
shader_sampled_image_array_non_uniform_indexing: lhs
|
|
.shader_sampled_image_array_non_uniform_indexing
|
|
.bitand(&rhs.shader_sampled_image_array_non_uniform_indexing),
|
|
shader_storage_buffer_array_non_uniform_indexing: lhs
|
|
.shader_storage_buffer_array_non_uniform_indexing
|
|
.bitand(&rhs.shader_storage_buffer_array_non_uniform_indexing),
|
|
shader_storage_image_array_non_uniform_indexing: lhs
|
|
.shader_storage_image_array_non_uniform_indexing
|
|
.bitand(&rhs.shader_storage_image_array_non_uniform_indexing),
|
|
shader_input_attachment_array_non_uniform_indexing: lhs
|
|
.shader_input_attachment_array_non_uniform_indexing
|
|
.bitand(&rhs.shader_input_attachment_array_non_uniform_indexing),
|
|
shader_uniform_texel_buffer_array_non_uniform_indexing: lhs
|
|
.shader_uniform_texel_buffer_array_non_uniform_indexing
|
|
.bitand(&rhs.shader_uniform_texel_buffer_array_non_uniform_indexing),
|
|
shader_storage_texel_buffer_array_non_uniform_indexing: lhs
|
|
.shader_storage_texel_buffer_array_non_uniform_indexing
|
|
.bitand(&rhs.shader_storage_texel_buffer_array_non_uniform_indexing),
|
|
descriptor_binding_uniform_buffer_update_after_bind: lhs
|
|
.descriptor_binding_uniform_buffer_update_after_bind
|
|
.bitand(&rhs.descriptor_binding_uniform_buffer_update_after_bind),
|
|
descriptor_binding_sampled_image_update_after_bind: lhs
|
|
.descriptor_binding_sampled_image_update_after_bind
|
|
.bitand(&rhs.descriptor_binding_sampled_image_update_after_bind),
|
|
descriptor_binding_storage_image_update_after_bind: lhs
|
|
.descriptor_binding_storage_image_update_after_bind
|
|
.bitand(&rhs.descriptor_binding_storage_image_update_after_bind),
|
|
descriptor_binding_storage_buffer_update_after_bind: lhs
|
|
.descriptor_binding_storage_buffer_update_after_bind
|
|
.bitand(&rhs.descriptor_binding_storage_buffer_update_after_bind),
|
|
descriptor_binding_uniform_texel_buffer_update_after_bind: lhs
|
|
.descriptor_binding_uniform_texel_buffer_update_after_bind
|
|
.bitand(&rhs.descriptor_binding_uniform_texel_buffer_update_after_bind),
|
|
descriptor_binding_storage_texel_buffer_update_after_bind: lhs
|
|
.descriptor_binding_storage_texel_buffer_update_after_bind
|
|
.bitand(&rhs.descriptor_binding_storage_texel_buffer_update_after_bind),
|
|
descriptor_binding_update_unused_while_pending: lhs
|
|
.descriptor_binding_update_unused_while_pending
|
|
.bitand(&rhs.descriptor_binding_update_unused_while_pending),
|
|
descriptor_binding_partially_bound: lhs
|
|
.descriptor_binding_partially_bound
|
|
.bitand(&rhs.descriptor_binding_partially_bound),
|
|
descriptor_binding_variable_descriptor_count: lhs
|
|
.descriptor_binding_variable_descriptor_count
|
|
.bitand(&rhs.descriptor_binding_variable_descriptor_count),
|
|
runtime_descriptor_array: lhs
|
|
.runtime_descriptor_array
|
|
.bitand(&rhs.runtime_descriptor_array),
|
|
sampler_filter_minmax: lhs.sampler_filter_minmax.bitand(&rhs.sampler_filter_minmax),
|
|
scalar_block_layout: lhs.scalar_block_layout.bitand(&rhs.scalar_block_layout),
|
|
imageless_framebuffer: lhs.imageless_framebuffer.bitand(&rhs.imageless_framebuffer),
|
|
uniform_buffer_standard_layout: lhs
|
|
.uniform_buffer_standard_layout
|
|
.bitand(&rhs.uniform_buffer_standard_layout),
|
|
shader_subgroup_extended_types: lhs
|
|
.shader_subgroup_extended_types
|
|
.bitand(&rhs.shader_subgroup_extended_types),
|
|
separate_depth_stencil_layouts: lhs
|
|
.separate_depth_stencil_layouts
|
|
.bitand(&rhs.separate_depth_stencil_layouts),
|
|
host_query_reset: lhs.host_query_reset.bitand(&rhs.host_query_reset),
|
|
timeline_semaphore: lhs.timeline_semaphore.bitand(&rhs.timeline_semaphore),
|
|
buffer_device_address: lhs.buffer_device_address.bitand(&rhs.buffer_device_address),
|
|
buffer_device_address_capture_replay: lhs
|
|
.buffer_device_address_capture_replay
|
|
.bitand(&rhs.buffer_device_address_capture_replay),
|
|
buffer_device_address_multi_device: lhs
|
|
.buffer_device_address_multi_device
|
|
.bitand(&rhs.buffer_device_address_multi_device),
|
|
vulkan_memory_model: lhs.vulkan_memory_model.bitand(&rhs.vulkan_memory_model),
|
|
vulkan_memory_model_device_scope: lhs
|
|
.vulkan_memory_model_device_scope
|
|
.bitand(&rhs.vulkan_memory_model_device_scope),
|
|
vulkan_memory_model_availability_visibility_chains: lhs
|
|
.vulkan_memory_model_availability_visibility_chains
|
|
.bitand(&rhs.vulkan_memory_model_availability_visibility_chains),
|
|
shader_output_viewport_index: lhs
|
|
.shader_output_viewport_index
|
|
.bitand(&rhs.shader_output_viewport_index),
|
|
shader_output_layer: lhs.shader_output_layer.bitand(&rhs.shader_output_layer),
|
|
subgroup_broadcast_dynamic_id: lhs
|
|
.subgroup_broadcast_dynamic_id
|
|
.bitand(&rhs.subgroup_broadcast_dynamic_id),
|
|
..Default::default()
|
|
}
|
|
}
|
|
pub fn bitand_device_features13(
|
|
lhs: &vk::PhysicalDeviceVulkan13Features,
|
|
rhs: &vk::PhysicalDeviceVulkan13Features,
|
|
) -> vk::PhysicalDeviceVulkan13Features<'static> {
|
|
use core::ops::BitAnd;
|
|
vk::PhysicalDeviceVulkan13Features {
|
|
robust_image_access: lhs.robust_image_access.bitand(&rhs.robust_image_access),
|
|
inline_uniform_block: lhs.inline_uniform_block.bitand(&rhs.inline_uniform_block),
|
|
descriptor_binding_inline_uniform_block_update_after_bind: lhs
|
|
.descriptor_binding_inline_uniform_block_update_after_bind
|
|
.bitand(&rhs.descriptor_binding_inline_uniform_block_update_after_bind),
|
|
pipeline_creation_cache_control: lhs
|
|
.pipeline_creation_cache_control
|
|
.bitand(&rhs.pipeline_creation_cache_control),
|
|
private_data: lhs.private_data.bitand(&rhs.private_data),
|
|
shader_demote_to_helper_invocation: lhs
|
|
.shader_demote_to_helper_invocation
|
|
.bitand(&rhs.shader_demote_to_helper_invocation),
|
|
shader_terminate_invocation: lhs
|
|
.shader_terminate_invocation
|
|
.bitand(&rhs.shader_terminate_invocation),
|
|
subgroup_size_control: lhs.subgroup_size_control.bitand(&rhs.subgroup_size_control),
|
|
compute_full_subgroups: lhs
|
|
.compute_full_subgroups
|
|
.bitand(&rhs.compute_full_subgroups),
|
|
synchronization2: lhs.synchronization2.bitand(&rhs.synchronization2),
|
|
texture_compression_astc_hdr: lhs
|
|
.texture_compression_astc_hdr
|
|
.bitand(&rhs.texture_compression_astc_hdr),
|
|
shader_zero_initialize_workgroup_memory: lhs
|
|
.shader_zero_initialize_workgroup_memory
|
|
.bitand(&rhs.shader_zero_initialize_workgroup_memory),
|
|
dynamic_rendering: lhs.dynamic_rendering.bitand(&rhs.dynamic_rendering),
|
|
shader_integer_dot_product: lhs
|
|
.shader_integer_dot_product
|
|
.bitand(&rhs.shader_integer_dot_product),
|
|
maintenance4: lhs.maintenance4.bitand(&rhs.maintenance4),
|
|
..Default::default()
|
|
}
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod test_swapchain {
|
|
use super::*;
|
|
|
|
fn create_headless_vk() -> Result<(Vulkan, WindowContext)> {
|
|
let vk = Vulkan::new(
|
|
"testing",
|
|
&[],
|
|
&[ash::ext::headless_surface::NAME, khr::surface::NAME],
|
|
None,
|
|
)?;
|
|
|
|
let surface = Arc::new(Surface::headless(vk.instance.clone())?);
|
|
|
|
let swapchain = Arc::new(Swapchain::new(
|
|
vk.instance.clone(),
|
|
vk.device.clone(),
|
|
surface.clone(),
|
|
vk.device.phy(),
|
|
vk::Extent2D::default().width(1).height(1),
|
|
)?);
|
|
|
|
let window_ctx = WindowContext {
|
|
window_handle: RawWindowHandle::Web(raw_window_handle::WebWindowHandle::new(0)),
|
|
surface,
|
|
current_swapchain: RwLock::new(swapchain),
|
|
};
|
|
|
|
Ok((vk, window_ctx))
|
|
}
|
|
|
|
#[tracing_test::traced_test]
|
|
#[test]
|
|
fn async_swapchain_acquiring() {
|
|
let (vk, ctx) = create_headless_vk().expect("init");
|
|
let ctx = Arc::new(ctx);
|
|
let (rx, handle) = ctx.images();
|
|
|
|
let mut count = 0;
|
|
loop {
|
|
let mut now = std::time::Instant::now();
|
|
let frame = rx.recv_blocking().expect("recv");
|
|
frame.present();
|
|
tracing::info!("mspf: {}ms", now.elapsed().as_secs_f64() / 1000.0);
|
|
count += 1;
|
|
if count > 1000 {
|
|
handle.cancel();
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|