vidya/crates/renderer/src/swapchain.rs

878 lines
30 KiB
Rust

use std::{
mem::ManuallyDrop,
num::NonZero,
ops::Deref,
sync::{
Arc,
atomic::{AtomicU32, AtomicU64, Ordering},
},
};
use ash::{
khr,
prelude::VkResult,
vk::{self, Handle},
};
use parking_lot::{Mutex, RwLock};
use raw_window_handle::{RawDisplayHandle, RawWindowHandle};
use crate::{
Instance, PhysicalDeviceInfo, Result, SurfaceCapabilities,
device::{Device, DeviceObject},
images::{self, ImageViewDesc},
sync::Fence,
util::DropGuard,
};
use derive_more::Debug;
#[derive(Debug)]
pub struct Surface {
pub(crate) raw: vk::SurfaceKHR,
#[debug(skip)]
pub(crate) functor: khr::surface::Instance,
pub(crate) swapchain: RwLock<Option<Arc<Swapchain>>>,
// destroy surface after any fields that depend on it
_drop_guard: DropGuard,
// drop reference to instance after destroying the surface
pub(crate) _instance: Instance,
}
impl Surface {
pub fn configure(&self, device: &Device, config: SwapchainConfiguration) -> Result<()> {
let guard = self.swapchain.read();
if let Some(swapchain) = guard.as_ref()
&& swapchain.config.eq(&config)
&& swapchain.swapchain.device() == device
{
// the current swapchain already matches the requested configuration, so we can skip reconfiguration.
return Ok(());
}
let new_swapchain = Swapchain::new(
device.clone(),
self,
config,
guard.as_ref().map(AsRef::as_ref),
)?;
drop(guard);
self.swapchain.write().replace(Arc::new(new_swapchain));
Ok(())
}
#[allow(dead_code)]
pub fn headless(instance: &Instance) -> Result<Self> {
let headless_instance =
ash::ext::headless_surface::Instance::new(&instance.entry, &instance.raw);
let functor = khr::surface::Instance::new(&instance.entry, &instance.raw);
// SAFETY: the headless surface does not have any platform-specific requirements, and does not depend on any external handles, so it is safe to create without additional guarantees.
// (note): ash marks this function as unsafe, likely because of
// auto-generated bindings from vk.xml, but doesn't provide any safety
// bounds.
unsafe {
let raw = headless_instance
.create_headless_surface(&vk::HeadlessSurfaceCreateInfoEXT::default(), None)?;
Ok(Self {
raw,
swapchain: RwLock::new(None),
_instance: instance.clone(),
// the surface must be destroyed after the swapchain
_drop_guard: DropGuard::new({
let functor = functor.clone();
move || {
functor.destroy_surface(raw, None);
}
}),
functor,
})
}
}
/// # Safety
///
/// The caller must ensure that the provided display and window handles are
/// valid and remain valid for the lifetime of the surface. Namely, the
/// window handle must refer to a valid window that is associated with the
/// display handle, and both must not be destroyed while the surface is
/// still in use. Additionally, the caller must ensure that the instance
/// was created with the appropriate platform-specific surface extensions
/// enabled.
pub unsafe fn new_from_raw_window_handle(
instance: &Instance,
display_handle: RawDisplayHandle,
window_handle: RawWindowHandle,
) -> Result<Self> {
let functor = khr::surface::Instance::new(&instance.entry, &instance.raw);
// SAFETY: the caller guarantees the validity of the display and window handles, and that they remain valid for the lifetime of the surface.
let raw = unsafe {
ash_window::create_surface(
&instance.entry,
&instance.raw,
display_handle,
window_handle,
None,
)?
};
Ok(Self {
raw,
swapchain: RwLock::new(None),
_instance: instance.clone(),
// the surface must be destroyed after the swapchain
_drop_guard: DropGuard::new({
let functor = functor.clone();
move || unsafe {
functor.destroy_surface(raw, None);
}
}),
functor,
})
}
/// Validates a swapchain configuration and possibly adjusts it to be
/// compatible with the surface capabilities by setting incompatible fields
/// to default fallbacks.
fn validate_swapchain_configuration(
&self,
instance: &Instance,
adapter: &PhysicalDeviceInfo,
config: &mut SwapchainConfiguration,
) -> Result<SurfaceCapabilities> {
let surface_caps = instance.get_adapter_surface_capabilities(adapter.pdev, self, None)?;
let max_image_dim = adapter.properties.core.limits.max_image_dimension2_d;
if config.extent.width > max_image_dim || config.extent.height > max_image_dim {
return Err(crate::Error::ImageTooLarge {
width: config.extent.width,
height: config.extent.height,
max_size: max_image_dim,
});
}
if config.extent.width == 0 || config.extent.height == 0 {
return Err(crate::Error::ImageZeroSized);
}
if !surface_caps.present_modes.contains(&config.present_mode) {
// find the first of these modes that is supported by the surface, in order of preference.
let fallback_modes = [
vk::PresentModeKHR::MAILBOX,
vk::PresentModeKHR::IMMEDIATE,
vk::PresentModeKHR::FIFO,
];
let fallback_mode = fallback_modes
.iter()
.find(|&&mode| surface_caps.present_modes.contains(&mode))
.cloned().expect("FIFO is guaranteed to be supported as per Vulkan spec, so this should never happen");
config.present_mode = fallback_mode;
}
// We do this calculation with the original surface capabilities, which
// gives us the maximum min_image_count of all present modes, which
// allows us to switch present modes without needing to recreate the
// swapchain.
let max_image_count = NonZero::new(surface_caps.capabilities.max_image_count)
.map(|n| n.get())
.unwrap_or(u32::MAX);
let min_image_count = surface_caps.min_image_count;
// we want `config.image_count` images acquired at the same time, but we
// also need to respect the surface's minimum and maximum image count
// limits.
let image_count = (min_image_count + config.image_count).min(max_image_count);
// in case the surface's image count limits prevent us from having the
// desired number of images in flight, reduce the requested image count
// to fit within the limits.
// this value corresponds to `S-M` in the Vulkan spec.
config.image_count = image_count - min_image_count;
// different present modes have different image counts/extents: now that
// we know we have a supported present mode, re-query the surface
// capabilities to get the correct image counts.
let surface_caps = instance.get_adapter_surface_capabilities(
adapter.pdev,
self,
Some(config.present_mode),
)?;
if !surface_caps.formats.iter().any(|&format| {
format.format == config.format && format.color_space == config.color_space
}) {
// (note): wgpu just rejects the swapchain if the format is not supported. is that smarter?
// find a fallback format
let format = surface_caps
.formats
.iter()
.max_by_key(|&&format| {
// prefer UNORM RGBA formats, and then SRGB color space
let is_rgba_unorm = format.format == vk::Format::R8G8B8A8_UNORM
|| format.format == vk::Format::B8G8R8A8_UNORM;
let is_srgb = format.color_space == vk::ColorSpaceKHR::SRGB_NONLINEAR;
is_rgba_unorm as u8 * 10 + is_srgb as u8
})
// fall back to the first available format
.or(surface_caps.formats.first())
.cloned()
.expect("no surface format available!");
config.format = format.format;
config.color_space = format.color_space;
}
Ok(surface_caps)
}
#[allow(dead_code)]
pub fn get_fallback_swapchain_configuration(
&self,
instance: &Instance,
adapter: &PhysicalDeviceInfo,
) -> Result<SwapchainConfiguration> {
let surface_caps = instance.get_adapter_surface_capabilities(adapter.pdev, self, None)?;
let present_mode = surface_caps
.present_modes
.iter()
.find(|&mode| mode == &vk::PresentModeKHR::MAILBOX)
.cloned()
.unwrap_or(vk::PresentModeKHR::FIFO);
let format = surface_caps
.formats
.iter()
.max_by_key(|&&format| {
let is_rgba_unorm = format.format == vk::Format::R8G8B8A8_UNORM
|| format.format == vk::Format::B8G8R8A8_UNORM;
let is_srgb = format.color_space == vk::ColorSpaceKHR::SRGB_NONLINEAR;
is_rgba_unorm as u8 * 10 + is_srgb as u8
})
.or(surface_caps.formats.first())
.cloned()
.expect("no surface format available!");
// 0 here means no limit
let max_image_count = core::num::NonZero::new(surface_caps.capabilities.max_image_count)
.map(|n| n.get())
.unwrap_or(u32::MAX);
// we want PREFERRED_IMAGES_IN_FLIGHT images acquired at the same time,
let image_count = (surface_caps.capabilities.min_image_count
+ Swapchain::PREFERRED_IMAGES_IN_FLIGHT)
.min(max_image_count);
let extent = current_extent_or_clamped(
&surface_caps.capabilities,
vk::Extent2D::default().width(1).height(1),
);
let composite_alpha_mode = if surface_caps
.capabilities
.supported_composite_alpha
.contains(vk::CompositeAlphaFlagsKHR::OPAQUE)
{
vk::CompositeAlphaFlagsKHR::OPAQUE
} else {
// if the surface doesn't support opaque alpha, we can still use inherit, which means the alpha will be determined by the window system. This is supported by all window systems.
vk::CompositeAlphaFlagsKHR::INHERIT
};
Ok(SwapchainConfiguration {
present_mode,
format: format.format,
color_space: format.color_space,
image_count,
extent,
usage: vk::ImageUsageFlags::TRANSFER_DST | vk::ImageUsageFlags::COLOR_ATTACHMENT,
composite_alpha_mode,
})
}
pub fn acquire_image(
&self,
) -> Option<impl std::future::Future<Output = crate::Result<(SwapchainImage, bool)>>> {
// ensure lock does not block for the entire duration of the async image acquisition.
let swapchain = self.swapchain.read().as_ref().cloned();
swapchain.map(|swapchain| swapchain.acquire_image())
}
pub fn swapchain(&self) -> parking_lot::RwLockReadGuard<'_, Option<Arc<Swapchain>>> {
self.swapchain.read()
}
pub fn raw(&self) -> vk::SurfaceKHR {
self.raw
}
}
#[derive(Debug)]
pub struct Swapchain {
// swapchain images, managed by the swapchain and must not be destroyed manually.
images: Vec<vk::Image>,
pub(crate) swapchain: DeviceObject<vk::SwapchainKHR>,
// this carries the device handle, however the `swapchain` field holds a ref count on the device, so it is safe to hold the pointer in the functor as well.
#[debug(skip)]
functor: khr::swapchain::Device,
/// current configuration of the swapchain.
pub(crate) config: SwapchainConfiguration,
/// the minimum number of images the surface permits. This is used to calculate how many images we can have in-flight at the same time.
min_image_count: u32,
// sync objects:
// we need two semaphores per each image, one acquire-semaphore and one release-semaphore.
// semaphores must be unique to each frame and cannot be reused per swapchain.
acquire_semaphores: Vec<vk::Semaphore>,
release_semaphores: Vec<vk::Semaphore>,
current_frame: AtomicU32,
// Some of the swapchain operations require external synchronisation; this mutex allows `Swapchain` to be `Sync`.
#[debug(skip)]
pub(crate) guard: parking_lot::Mutex<()>,
// for khr_present_id/khr_present_wait
#[allow(unused)]
present_id: AtomicU64,
}
impl Swapchain {
/// This function frees the manually managed objects associated with the swapchain.
/// This function MUST be called once and only once before the swapchain is dropped.
pub unsafe fn release_resources(&self) {
_ = self.swapchain.device().wait_idle();
for &semaphore in self
.acquire_semaphores
.iter()
.chain(&self.release_semaphores)
{
unsafe {
self.swapchain
.device()
.raw
.destroy_semaphore(semaphore, None);
}
}
}
}
impl Drop for Swapchain {
fn drop(&mut self) {
unsafe {
self.release_resources();
}
// the swapchain itself will be automatically destroyed by the
// DeviceObject's Drop implementation.
}
}
impl Swapchain {
const PREFERRED_IMAGES_IN_FLIGHT: u32 = 3;
fn new(
device: Device,
surface: &Surface,
mut config: SwapchainConfiguration,
old_swapchain: Option<&Self>,
) -> Result<Self> {
let surface_caps = surface.validate_swapchain_configuration(
&device.instance,
&device.adapter,
&mut config,
)?;
tracing::trace!("surface capabilities: {surface_caps:#?}");
let functor = device
.device_extensions
.swapchain
.clone()
.expect("swapchain extension not loaded");
let (swapchain, images) = {
let _lock = old_swapchain.as_ref().map(|old| old.guard.lock());
let old_swapchain = old_swapchain
.map(|swp| *swp.swapchain)
.unwrap_or(vk::SwapchainKHR::null());
let queue_families = device.queues.swapchain_family_indices();
let create_info = vk::SwapchainCreateInfoKHR::default()
.surface(surface.raw)
.present_mode(config.present_mode)
.image_color_space(config.color_space)
.image_format(config.format)
.min_image_count(surface_caps.min_image_count + config.image_count)
.image_usage(config.usage)
.image_array_layers(1)
.image_extent(config.extent)
.image_sharing_mode(if queue_families.len() <= 1 {
vk::SharingMode::EXCLUSIVE
} else {
vk::SharingMode::CONCURRENT
})
.queue_family_indices(queue_families)
.pre_transform(vk::SurfaceTransformFlagsKHR::IDENTITY)
.composite_alpha(config.composite_alpha_mode)
.old_swapchain(old_swapchain)
.clipped(true);
let (swapchain, images) = unsafe {
let swapchain = functor.create_swapchain(&create_info, None)?;
let images = functor.get_swapchain_images(swapchain)?;
(swapchain, images)
};
(swapchain, images)
};
let num_images = images.len() as u32;
let inflight_frames = num_images - surface_caps.capabilities.min_image_count;
let acquire_semaphores = {
(0..inflight_frames)
.map(|i| unsafe {
device
.dev()
.create_semaphore(&vk::SemaphoreCreateInfo::default(), None)
.inspect(|r| {
#[cfg(debug_assertions)]
device.debug_name_object(
*r,
&format!("semaphore-{:x}_{i}-acquire", swapchain.as_raw()),
);
})
})
.collect::<VkResult<Vec<_>>>()?
};
let release_semaphores = {
(0..images.len())
.map(|i| unsafe {
device
.dev()
.create_semaphore(&vk::SemaphoreCreateInfo::default(), None)
.inspect(|r| {
#[cfg(debug_assertions)]
device.debug_name_object(
*r,
&format!("semaphore-{:x}_{i}-release", swapchain.as_raw()),
);
})
})
.collect::<VkResult<Vec<_>>>()?
};
tracing::trace!(
image_count = images.len(),
min_image_count = surface_caps.capabilities.min_image_count,
config = ?config,
"created swapchain:");
Ok(Swapchain {
functor: device
.device_extensions
.swapchain
.clone()
.expect("swapchain extension not loaded"),
swapchain: DeviceObject::new_debug_named(
device,
swapchain,
Some(format!("swapchain-{:x}", swapchain.as_raw())),
),
images,
config,
guard: Mutex::new(()),
min_image_count: surface_caps.capabilities.min_image_count,
acquire_semaphores,
release_semaphores,
current_frame: AtomicU32::new(0),
present_id: AtomicU64::new(1),
})
}
pub fn max_in_flight_images(&self) -> u32 {
self.num_images() - self.min_image_count
}
pub fn num_images(&self) -> u32 {
self.images.len() as u32
}
pub fn raw(&self) -> vk::SwapchainKHR {
*self.swapchain
}
/// returns a future yielding the frame, and true if the swapchain is
/// suboptimal and should be recreated.
fn acquire_image(
self: Arc<Self>,
) -> impl std::future::Future<Output = crate::Result<(SwapchainImage, bool)>> {
let frame = self
.current_frame
.try_update(Ordering::Release, Ordering::Relaxed, |i| {
Some((i + 1) % self.max_in_flight_images())
})
.unwrap() as usize;
tracing::trace!(frame, "acquiring image for frame {frame}");
async move {
let fence = Fence::from_pool(&self.swapchain.device().pools.fences, None)?;
let acquire = self.acquire_semaphores[frame];
// spawn on threadpool because it might block.
let (idx, suboptimal) = smol::unblock({
let this = self.clone();
let fence = fence.raw();
move || unsafe {
this.with_locked(|swapchain| {
this.functor
.acquire_next_image(swapchain.raw(), u64::MAX, acquire, fence)
})
}
})
.await?;
let release = self.release_semaphores[idx as usize];
let idx = idx as usize;
let image = self.images[idx];
let image = Arc::new(images::Image::from_swapchain_image(image, &self));
let view = image.create_view(ImageViewDesc {
name: Some(format!("swapchain-{:x}-image-view-{idx}", self.raw().as_raw()).into()),
kind: vk::ImageViewType::TYPE_2D,
format: self.config.format,
aspect: vk::ImageAspectFlags::COLOR,
..Default::default()
})?;
// wait for image to become available.
fence.into_future().await;
Ok((
SwapchainImage {
index: idx as u32,
swapchain: self,
view: ManuallyDrop::into_inner(view),
acquire,
release,
},
suboptimal,
))
}
}
/// # Safety
/// The caller must ensure that the provided index corresponds to an image
/// that is currently acquired and not yet presented.
unsafe fn present(&self, index: u32, wait: Option<vk::Semaphore>) -> Result<()> {
let _lock = self.guard.lock();
let wait_semaphores = wait.as_slice();
let present_info = vk::PresentInfoKHR::default()
.image_indices(core::slice::from_ref(&index))
.swapchains(core::slice::from_ref(&*self.swapchain))
.wait_semaphores(wait_semaphores);
let queue = self.swapchain.device().queues.graphics();
queue.with_locked(|queue| -> crate::Result<()> {
// TODO: make this optional for devices with no support for present_wait/present_id
// let present_id = self
// .present_id
// .fetch_add(1, std::sync::atomic::Ordering::Relaxed);
// let mut present_id =
// vk::PresentIdKHR::default().present_ids(core::slice::from_ref(&present_id));
//.push_next(&mut present_id)
// call winits pre_present_notify here
unsafe {
self.functor.queue_present(queue.raw(), &present_info)?;
}
Ok(())
})
}
pub fn config(&self) -> &SwapchainConfiguration {
&self.config
}
}
#[derive(Debug)]
#[must_use = "This struct represents an acquired image from the Swapchain and
must be presented in order to free resources on the device."]
pub struct SwapchainImage {
view: images::ImageView,
// The swapchain must be kept alive while the image is in use, because the
// image is owned by the swapchain and will be freed when the swapchain is
// dropped. Additionally, we need access to the swapchain in order to
// present the image.
swapchain: Arc<Swapchain>,
index: u32,
pub acquire: vk::Semaphore,
pub release: vk::Semaphore,
}
impl Deref for SwapchainImage {
type Target = images::ImageView;
fn deref(&self) -> &Self::Target {
&self.view
}
}
impl SwapchainImage {
pub fn index(&self) -> u32 {
self.index
}
pub fn present(self, wait: Option<vk::Semaphore>) -> crate::Result<()> {
// SAFETY: we know the index is valid because we've aquired the image,
// and we know it isn't presented yet because we still own the image.
unsafe { self.swapchain.present(self.index, wait) }
}
}
#[derive(Debug)]
#[must_use = "This struct represents an acquired image from the swapchain and
must be presented in order to free resources on the device."]
pub struct SwapchainFrame {
pub swapchain: Arc<Swapchain>,
pub index: u32,
pub image: Arc<images::Image>,
pub format: vk::Format,
pub view: vk::ImageView,
pub acquire: vk::Semaphore,
pub release: vk::Semaphore,
}
impl Eq for SwapchainFrame {}
impl PartialEq for SwapchainFrame {
fn eq(&self, other: &Self) -> bool {
self.index == other.index && self.image == other.image
}
}
impl SwapchainFrame {
pub fn present(self, wait: Option<vk::Semaphore>) -> crate::Result<()> {
// SAFETY: we know the index is valid because we've aquired the image, and we know it isn't presented yet because we still own the image.
unsafe { self.swapchain.present(self.index, wait) }
}
}
#[allow(dead_code)]
fn current_extent_or_clamped(
caps: &vk::SurfaceCapabilitiesKHR,
fallback: vk::Extent2D,
) -> vk::Extent2D {
if caps.current_extent.width == u32::MAX {
vk::Extent2D {
width: fallback
.width
.clamp(caps.min_image_extent.width, caps.max_image_extent.width),
height: fallback
.height
.clamp(caps.min_image_extent.height, caps.max_image_extent.height),
}
} else {
caps.current_extent
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct SwapchainConfiguration {
pub present_mode: vk::PresentModeKHR,
pub format: vk::Format,
pub color_space: vk::ColorSpaceKHR,
/// the number of images to request from the device
pub image_count: u32,
/// The dimensions of the swapchain images.
pub extent: vk::Extent2D,
/// Alpha compositing mode.
pub composite_alpha_mode: vk::CompositeAlphaFlagsKHR,
/// Usage flags for the swapchain images. This should be a combination of
/// `vk::ImageUsageFlags::COLOR_ATTACHMENT` and
/// `vk::ImageUsageFlags::TRANSFER_DST`, but can include additional usage
/// flags if supported by the device and surface.
pub usage: vk::ImageUsageFlags,
}
impl Default for SwapchainConfiguration {
fn default() -> Self {
Self {
present_mode: vk::PresentModeKHR::MAILBOX,
format: vk::Format::R8G8B8A8_UNORM,
color_space: vk::ColorSpaceKHR::SRGB_NONLINEAR,
image_count: 3,
extent: vk::Extent2D::default().width(1).height(1),
composite_alpha_mode: vk::CompositeAlphaFlagsKHR::OPAQUE,
usage: vk::ImageUsageFlags::TRANSFER_DST | vk::ImageUsageFlags::COLOR_ATTACHMENT,
}
}
}
impl Swapchain {
pub fn with_locked<T, F: FnOnce(&Self) -> T>(&self, f: F) -> T {
let _lock = self.guard.lock();
f(self)
}
}
// impl WindowSurface {
// pub fn new(
// device: Device,
// requested_extent: vk::Extent2D,
// window: RawWindowHandle,
// display: RawDisplayHandle,
// ) -> Result<Self> {
// let surface = Arc::new(unsafe {
// Surface::new_from_raw_window_handle(device.instance(), display, window)?
// });
// let swapchain = RwLock::new(Arc::new(Swapchain::new(
// device.clone(),
// surface.clone(),
// device.phy(),
// requested_extent,
// )?));
// Ok(Self {
// surface,
// // window_handle: window,
// current_swapchain: swapchain,
// })
// }
// /// spawns a task that continuously requests images from the current
// /// swapchain, sending them to a channel. returns the receiver of the
// /// channel, and a handle to the task, allowing for cancellation.
// pub fn images(
// self: Arc<Self>,
// ) -> (
// smol::channel::Receiver<SwapchainFrame>,
// smol::Task<std::result::Result<(), crate::Error>>,
// ) {
// let (tx, rx) = smol::channel::bounded(8);
// let task = smol::spawn(async move {
// loop {
// let frame = self.acquire_image().await?;
// tx.send(frame)
// .await
// .expect("channel closed on swapchain acquiring frame");
// }
// });
// (rx, task)
// }
// pub async fn acquire_image(&self) -> Result<SwapchainFrame> {
// // clone swapchain to keep it alive
// let swapchain = self.current_swapchain.read().clone();
// let (frame, suboptimal) = swapchain.clone().acquire_image().await?;
// if suboptimal {
// let mut lock = self.current_swapchain.write();
// // only recreate our swapchain if it is still same, or else it might have already been recreated.
// if Arc::ptr_eq(&swapchain, &lock) {
// *lock = Arc::new(lock.recreate(None)?);
// }
// }
// Ok(frame)
// }
// pub fn acquire_image_blocking(&self) -> Result<SwapchainFrame> {
// smol::block_on(self.acquire_image())
// }
// pub fn recreate_with(&self, extent: Option<vk::Extent2D>) -> Result<()> {
// let mut swapchain = self.current_swapchain.write();
// *swapchain = Arc::new(swapchain.recreate(extent)?);
// Ok(())
// }
// }
#[cfg(test)]
mod tests {
use crate::{PhysicalDeviceFeatures, instance::InstanceDesc, make_extension};
use super::*;
fn create_headless_vk() -> Result<(Device, Arc<Surface>)> {
let instance = Instance::new(&InstanceDesc {
instance_extensions: &[
make_extension!(ash::ext::headless_surface),
make_extension!(ash::khr::surface),
],
..Default::default()
})?;
let features = PhysicalDeviceFeatures {
core13: vk::PhysicalDeviceVulkan13Features {
synchronization2: vk::TRUE,
dynamic_rendering: vk::TRUE,
maintenance4: vk::TRUE,
..Default::default()
},
..Default::default()
};
let surface = Arc::new(Surface::headless(&instance)?);
let adapter = instance.choose_adapter_default(Some(&surface), &[], Some(&features))?;
let device = adapter.create_logical_device(&instance, &[], features, None)?;
surface.configure(
&device,
SwapchainConfiguration {
present_mode: vk::PresentModeKHR::FIFO,
format: vk::Format::R8G8B8A8_UNORM,
color_space: vk::ColorSpaceKHR::SRGB_NONLINEAR,
image_count: 2,
extent: vk::Extent2D::default().width(1).height(1),
composite_alpha_mode: vk::CompositeAlphaFlagsKHR::OPAQUE,
usage: vk::ImageUsageFlags::TRANSFER_DST | vk::ImageUsageFlags::COLOR_ATTACHMENT,
},
)?;
Ok((device, surface))
}
#[tracing_test::traced_test]
#[test]
fn async_swapchain_acquiring() {
let (_dev, surface) = create_headless_vk().expect("init");
let _ctx = Arc::new(surface);
// let (rx, handle) = ctx.clone().images();
// eprintln!("hello world!");
// let mut count = 0;
// loop {
// let now = std::time::Instant::now();
// let frame = rx.recv_blocking().expect("recv");
// _ = frame.present(None);
// tracing::info!("mspf: {:.3}ms", now.elapsed().as_micros() as f32 / 1e3);
// count += 1;
// if count > 1000 {
// smol::block_on(handle.cancel());
// break;
// }
// }
}
}