524 lines
13 KiB
Rust
524 lines
13 KiB
Rust
use core::{
|
|
borrow::{Borrow, BorrowMut},
|
|
cell::UnsafeCell,
|
|
fmt::Display,
|
|
marker::PhantomData,
|
|
mem::{self, ManuallyDrop, MaybeUninit},
|
|
ops::{Deref, DerefMut},
|
|
ptr::NonNull,
|
|
sync::atomic::{AtomicPtr, Ordering},
|
|
};
|
|
|
|
use alloc::boxed::Box;
|
|
|
|
/// A guard that runs a closure when it is dropped.
|
|
pub struct DropGuard<F: FnOnce()>(UnsafeCell<ManuallyDrop<F>>);
|
|
|
|
impl<F> DropGuard<F>
|
|
where
|
|
F: FnOnce(),
|
|
{
|
|
pub fn new(f: F) -> DropGuard<F> {
|
|
Self(UnsafeCell::new(ManuallyDrop::new(f)))
|
|
}
|
|
}
|
|
|
|
impl<F> Drop for DropGuard<F>
|
|
where
|
|
F: FnOnce(),
|
|
{
|
|
fn drop(&mut self) {
|
|
// SAFETY: We are the only owner of `self.0`, and we ensure that the
|
|
// closure is only called once.
|
|
unsafe {
|
|
ManuallyDrop::take(&mut *self.0.get())();
|
|
}
|
|
}
|
|
}
|
|
|
|
#[repr(transparent)]
|
|
#[derive(Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
|
|
pub struct SendPtr<T>(NonNull<T>);
|
|
|
|
impl<T> Copy for SendPtr<T> {}
|
|
|
|
impl<T> Clone for SendPtr<T> {
|
|
fn clone(&self) -> Self {
|
|
Self(self.0.clone())
|
|
}
|
|
}
|
|
|
|
impl<T> core::fmt::Pointer for SendPtr<T> {
|
|
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
|
<NonNull<T> as core::fmt::Pointer>::fmt(&self.0, f)
|
|
}
|
|
}
|
|
|
|
unsafe impl<T> core::marker::Send for SendPtr<T> {}
|
|
|
|
impl<T> Deref for SendPtr<T> {
|
|
type Target = NonNull<T>;
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
&self.0
|
|
}
|
|
}
|
|
|
|
impl<T> DerefMut for SendPtr<T> {
|
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
&mut self.0
|
|
}
|
|
}
|
|
|
|
impl<T> SendPtr<T> {
|
|
pub const fn new(ptr: *mut T) -> Option<Self> {
|
|
match NonNull::new(ptr) {
|
|
Some(ptr) => Some(Self(ptr)),
|
|
None => None,
|
|
}
|
|
}
|
|
|
|
/// ptr must be non-null
|
|
#[allow(dead_code)]
|
|
pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
|
|
unsafe { Self(NonNull::new_unchecked(ptr)) }
|
|
}
|
|
|
|
pub const fn new_const(ptr: *const T) -> Option<Self> {
|
|
Self::new(ptr.cast_mut())
|
|
}
|
|
|
|
/// ptr must be non-null
|
|
#[allow(dead_code)]
|
|
pub const unsafe fn new_const_unchecked(ptr: *const T) -> Self {
|
|
unsafe { Self::new_unchecked(ptr.cast_mut()) }
|
|
}
|
|
|
|
pub(crate) unsafe fn as_ref(&self) -> &T {
|
|
unsafe { self.0.as_ref() }
|
|
}
|
|
}
|
|
|
|
/// A tagged atomic pointer that can store a pointer and a tag `BITS` wide in the same space
|
|
/// as the pointer.
|
|
/// The pointer must be aligned to `BITS` bits, i.e. `align_of::<T>() >= 2^BITS`.
|
|
#[repr(transparent)]
|
|
#[derive(Debug)]
|
|
pub struct TaggedAtomicPtr<T, const BITS: u8> {
|
|
ptr: AtomicPtr<()>,
|
|
_pd: PhantomData<T>,
|
|
}
|
|
|
|
impl<T, const BITS: u8> TaggedAtomicPtr<T, BITS> {
|
|
const fn mask() -> usize {
|
|
!(!0usize << BITS)
|
|
}
|
|
|
|
pub fn new(ptr: *mut T, tag: usize) -> TaggedAtomicPtr<T, BITS> {
|
|
debug_assert!(core::mem::align_of::<T>().ilog2() as u8 >= BITS);
|
|
let mask = Self::mask();
|
|
Self {
|
|
ptr: AtomicPtr::new(ptr.with_addr((ptr.addr() & !mask) | (tag & mask)).cast()),
|
|
_pd: PhantomData,
|
|
}
|
|
}
|
|
|
|
pub fn ptr(&self, order: Ordering) -> NonNull<T> {
|
|
unsafe {
|
|
NonNull::new_unchecked(
|
|
self.ptr
|
|
.load(order)
|
|
.map_addr(|addr| addr & !Self::mask())
|
|
.cast(),
|
|
)
|
|
}
|
|
}
|
|
|
|
pub fn tag(&self, order: Ordering) -> usize {
|
|
self.ptr.load(order).addr() & Self::mask()
|
|
}
|
|
|
|
pub fn fetch_or_tag(&self, tag: usize, order: Ordering) -> usize {
|
|
let mask = Self::mask();
|
|
let old_ptr = self.ptr.fetch_or(tag & mask, order);
|
|
old_ptr.addr() & mask
|
|
}
|
|
|
|
/// returns the tag and clears it
|
|
pub fn take_tag(&self, order: Ordering) -> usize {
|
|
let mask = Self::mask();
|
|
let old_ptr = self.ptr.fetch_and(!mask, order);
|
|
old_ptr.addr() & mask
|
|
}
|
|
|
|
/// returns tag
|
|
#[inline(always)]
|
|
fn compare_exchange_tag_inner(
|
|
&self,
|
|
old: usize,
|
|
new: usize,
|
|
success: Ordering,
|
|
failure: Ordering,
|
|
cmpxchg: fn(
|
|
&AtomicPtr<()>,
|
|
*mut (),
|
|
*mut (),
|
|
Ordering,
|
|
Ordering,
|
|
) -> Result<*mut (), *mut ()>,
|
|
) -> Result<usize, usize> {
|
|
let mask = Self::mask();
|
|
let old_ptr = self.ptr.load(failure);
|
|
|
|
let old = old_ptr.map_addr(|addr| (addr & !mask) | (old & mask));
|
|
let new = old_ptr.map_addr(|addr| (addr & !mask) | (new & mask));
|
|
|
|
let result = cmpxchg(&self.ptr, old, new, success, failure);
|
|
|
|
result
|
|
.map(|ptr| ptr.addr() & mask)
|
|
.map_err(|ptr| ptr.addr() & mask)
|
|
}
|
|
|
|
/// returns tag
|
|
#[allow(dead_code)]
|
|
pub fn compare_exchange_tag(
|
|
&self,
|
|
old: usize,
|
|
new: usize,
|
|
success: Ordering,
|
|
failure: Ordering,
|
|
) -> Result<usize, usize> {
|
|
self.compare_exchange_tag_inner(
|
|
old,
|
|
new,
|
|
success,
|
|
failure,
|
|
AtomicPtr::<()>::compare_exchange,
|
|
)
|
|
}
|
|
|
|
/// returns tag
|
|
pub fn compare_exchange_weak_tag(
|
|
&self,
|
|
old: usize,
|
|
new: usize,
|
|
success: Ordering,
|
|
failure: Ordering,
|
|
) -> Result<usize, usize> {
|
|
self.compare_exchange_tag_inner(
|
|
old,
|
|
new,
|
|
success,
|
|
failure,
|
|
AtomicPtr::<()>::compare_exchange_weak,
|
|
)
|
|
}
|
|
|
|
#[allow(dead_code)]
|
|
pub fn set_ptr(&self, ptr: *mut T, success: Ordering, failure: Ordering) {
|
|
let mask = Self::mask();
|
|
let ptr = ptr.cast::<()>();
|
|
loop {
|
|
let old = self.ptr.load(failure);
|
|
let new = ptr.map_addr(|addr| (addr & !mask) | (old.addr() & mask));
|
|
if self
|
|
.ptr
|
|
.compare_exchange_weak(old, new, success, failure)
|
|
.is_ok()
|
|
{
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
pub fn set_tag(&self, tag: usize, success: Ordering, failure: Ordering) {
|
|
let mask = Self::mask();
|
|
loop {
|
|
let ptr = self.ptr.load(failure);
|
|
let new = ptr.map_addr(|addr| (addr & !mask) | (tag & mask));
|
|
|
|
if self
|
|
.ptr
|
|
.compare_exchange_weak(ptr, new, success, failure)
|
|
.is_ok()
|
|
{
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
pub fn ptr_and_tag(&self, order: Ordering) -> (NonNull<T>, usize) {
|
|
let mask = Self::mask();
|
|
let ptr = self.ptr.load(order);
|
|
let tag = ptr.addr() & mask;
|
|
let ptr = ptr.map_addr(|addr| addr & !mask);
|
|
let ptr = unsafe { NonNull::new_unchecked(ptr.cast()) };
|
|
(ptr, tag)
|
|
}
|
|
}
|
|
|
|
/// A small box that can store a value inline if the size and alignment of T is
|
|
/// less than or equal to the size and alignment of a boxed type. Typically this
|
|
/// will be `sizeof::<usize>()` bytes, but might be larger if
|
|
/// `sizeof::<Box<T>>()` is larger than that, like it is for dynamically sized
|
|
/// types like `[T]` or `dyn Trait`.
|
|
#[derive(Debug)]
|
|
#[repr(transparent)]
|
|
// We use a box here because a box can be unboxed, while a pointer cannot.
|
|
pub struct SmallBox<T>(pub MaybeUninit<Box<T>>);
|
|
|
|
impl<T: Display> Display for SmallBox<T> {
|
|
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
|
|
(**self).fmt(f)
|
|
}
|
|
}
|
|
|
|
impl<T: Ord> Ord for SmallBox<T> {
|
|
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
|
|
self.as_ref().cmp(other.as_ref())
|
|
}
|
|
}
|
|
|
|
impl<T: PartialOrd> PartialOrd for SmallBox<T> {
|
|
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
|
|
self.as_ref().partial_cmp(other.as_ref())
|
|
}
|
|
}
|
|
|
|
impl<T: Eq> Eq for SmallBox<T> {}
|
|
|
|
impl<T: PartialEq> PartialEq for SmallBox<T> {
|
|
fn eq(&self, other: &Self) -> bool {
|
|
self.as_ref().eq(other.as_ref())
|
|
}
|
|
}
|
|
|
|
impl<T: Default> Default for SmallBox<T> {
|
|
fn default() -> Self {
|
|
Self::new(Default::default())
|
|
}
|
|
}
|
|
|
|
impl<T: Clone> Clone for SmallBox<T> {
|
|
fn clone(&self) -> Self {
|
|
Self::new(self.as_ref().clone())
|
|
}
|
|
}
|
|
|
|
impl<T> Deref for SmallBox<T> {
|
|
type Target = T;
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
self.as_ref()
|
|
}
|
|
}
|
|
|
|
impl<T> DerefMut for SmallBox<T> {
|
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
self.as_mut()
|
|
}
|
|
}
|
|
|
|
impl<T> AsRef<T> for SmallBox<T> {
|
|
fn as_ref(&self) -> &T {
|
|
Self::as_ref(self)
|
|
}
|
|
}
|
|
impl<T> AsMut<T> for SmallBox<T> {
|
|
fn as_mut(&mut self) -> &mut T {
|
|
Self::as_mut(self)
|
|
}
|
|
}
|
|
|
|
impl<T> Borrow<T> for SmallBox<T> {
|
|
fn borrow(&self) -> &T {
|
|
&**self
|
|
}
|
|
}
|
|
impl<T> BorrowMut<T> for SmallBox<T> {
|
|
fn borrow_mut(&mut self) -> &mut T {
|
|
&mut **self
|
|
}
|
|
}
|
|
|
|
impl<T> SmallBox<T> {
|
|
/// must only be called once. takes a reference so this can be called in
|
|
/// drop()
|
|
unsafe fn get_unchecked(&self, inline: bool) -> T {
|
|
if inline {
|
|
unsafe { mem::transmute_copy::<MaybeUninit<Box<T>>, T>(&self.0) }
|
|
} else {
|
|
unsafe { *self.0.assume_init_read() }
|
|
}
|
|
}
|
|
|
|
pub fn as_ref(&self) -> &T {
|
|
unsafe {
|
|
if Self::is_inline() {
|
|
mem::transmute::<&MaybeUninit<Box<T>>, &T>(&self.0)
|
|
} else {
|
|
self.0.assume_init_ref()
|
|
}
|
|
}
|
|
}
|
|
pub fn as_mut(&mut self) -> &mut T {
|
|
unsafe {
|
|
if Self::is_inline() {
|
|
mem::transmute::<&mut MaybeUninit<Box<T>>, &mut T>(&mut self.0)
|
|
} else {
|
|
self.0.assume_init_mut()
|
|
}
|
|
}
|
|
}
|
|
|
|
pub fn into_inner(self) -> T {
|
|
let this = ManuallyDrop::new(self);
|
|
let inline = Self::is_inline();
|
|
|
|
// SAFETY: inline is correctly calculated and this function
|
|
// consumes `self`
|
|
unsafe { this.get_unchecked(inline) }
|
|
}
|
|
|
|
#[inline(always)]
|
|
pub const fn is_inline() -> bool {
|
|
// the value can be stored inline iff the size of T is equal or
|
|
// smaller than the size of the boxed type and the alignment of the
|
|
// boxed type is an integer multiple of the alignment of T
|
|
mem::size_of::<T>() <= mem::size_of::<Box<MaybeUninit<T>>>()
|
|
&& mem::align_of::<Box<MaybeUninit<T>>>() % mem::align_of::<T>() == 0
|
|
}
|
|
|
|
pub fn new(value: T) -> Self {
|
|
let inline = Self::is_inline();
|
|
|
|
if inline {
|
|
let mut this = MaybeUninit::new(Self(MaybeUninit::uninit()));
|
|
unsafe {
|
|
this.as_mut_ptr().cast::<T>().write(value);
|
|
this.assume_init()
|
|
}
|
|
} else {
|
|
Self(MaybeUninit::new(Box::new(value)))
|
|
}
|
|
}
|
|
}
|
|
|
|
impl<T> Drop for SmallBox<T> {
|
|
fn drop(&mut self) {
|
|
// drop contained value.
|
|
drop(unsafe { self.get_unchecked(Self::is_inline()) });
|
|
}
|
|
}
|
|
|
|
/// returns the number of available hardware threads, or 1 if it cannot be determined.
|
|
pub fn available_parallelism() -> usize {
|
|
std::thread::available_parallelism()
|
|
.map(|n| n.get())
|
|
.unwrap_or(1)
|
|
}
|
|
|
|
#[repr(transparent)]
|
|
pub struct Send<T>(pub(self) T);
|
|
|
|
unsafe impl<T> core::marker::Send for Send<T> {}
|
|
|
|
impl<T> Deref for Send<T> {
|
|
type Target = T;
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
&self.0
|
|
}
|
|
}
|
|
impl<T> DerefMut for Send<T> {
|
|
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
&mut self.0
|
|
}
|
|
}
|
|
|
|
impl<T> Send<T> {
|
|
pub unsafe fn new(value: T) -> Self {
|
|
Self(value)
|
|
}
|
|
}
|
|
|
|
pub fn unwrap_or_panic<T>(result: std::thread::Result<T>) -> T {
|
|
match result {
|
|
Ok(value) => value,
|
|
Err(payload) => std::panic::resume_unwind(payload),
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use super::*;
|
|
|
|
#[test]
|
|
fn tagged_ptr_zero_tag() {
|
|
let ptr = Box::into_raw(Box::new(42u32));
|
|
let tagged_ptr = TaggedAtomicPtr::<u32, 2>::new(ptr, 0);
|
|
assert_eq!(tagged_ptr.tag(Ordering::Relaxed), 0);
|
|
assert_eq!(tagged_ptr.ptr(Ordering::Relaxed).as_ptr(), ptr);
|
|
|
|
unsafe {
|
|
_ = Box::from_raw(ptr);
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn tagged_ptr_exchange() {
|
|
let ptr = Box::into_raw(Box::new(42u32));
|
|
let tagged_ptr = TaggedAtomicPtr::<u32, 2>::new(ptr, 0b11);
|
|
assert_eq!(tagged_ptr.tag(Ordering::Relaxed), 0b11);
|
|
assert_eq!(tagged_ptr.ptr(Ordering::Relaxed).as_ptr(), ptr);
|
|
|
|
assert_eq!(
|
|
tagged_ptr
|
|
.compare_exchange_tag(0b11, 0b10, Ordering::Relaxed, Ordering::Relaxed)
|
|
.unwrap(),
|
|
0b11
|
|
);
|
|
|
|
assert_eq!(tagged_ptr.tag(Ordering::Relaxed), 0b10);
|
|
assert_eq!(tagged_ptr.ptr(Ordering::Relaxed).as_ptr(), ptr);
|
|
|
|
unsafe {
|
|
_ = Box::from_raw(ptr);
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn value_inline() {
|
|
assert!(SmallBox::<u32>::is_inline(), "u32 should be inline");
|
|
assert!(SmallBox::<u8>::is_inline(), "u8 should be inline");
|
|
assert!(
|
|
SmallBox::<Box<u32>>::is_inline(),
|
|
"Box<u32> should be inline"
|
|
);
|
|
assert!(
|
|
SmallBox::<[u32; 2]>::is_inline(),
|
|
"[u32; 2] should be inline"
|
|
);
|
|
assert!(
|
|
!SmallBox::<[u32; 3]>::is_inline(),
|
|
"[u32; 3] should not be inline"
|
|
);
|
|
assert!(SmallBox::<usize>::is_inline(), "usize should be inline");
|
|
|
|
#[repr(C, align(16))]
|
|
struct LargeType(u8);
|
|
assert!(
|
|
!SmallBox::<LargeType>::is_inline(),
|
|
"LargeType should not be inline"
|
|
);
|
|
|
|
#[repr(C, align(4))]
|
|
struct SmallType(u8);
|
|
assert!(
|
|
SmallBox::<SmallType>::is_inline(),
|
|
"SmallType should be inline"
|
|
);
|
|
}
|
|
}
|