Compare commits
No commits in common. "df6ab625ccd48e9f7d454baca2af366f1c946379" and "568d14aa9c526f76f0707e4a28ac004a304fb967" have entirely different histories.
df6ab625cc
...
568d14aa9c
37
Cargo.lock
generated
37
Cargo.lock
generated
|
|
@ -2,12 +2,6 @@
|
||||||
# It is not intended for manual editing.
|
# It is not intended for manual editing.
|
||||||
version = 4
|
version = 4
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "allocator-api2"
|
|
||||||
version = "0.2.21"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "atomic-wait"
|
name = "atomic-wait"
|
||||||
version = "1.1.0"
|
version = "1.1.0"
|
||||||
|
|
@ -18,35 +12,6 @@ dependencies = [
|
||||||
"windows-sys",
|
"windows-sys",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "either"
|
|
||||||
version = "1.15.0"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "equivalent"
|
|
||||||
version = "1.0.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "foldhash"
|
|
||||||
version = "0.1.5"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2"
|
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "hashbrown"
|
|
||||||
version = "0.15.4"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5"
|
|
||||||
dependencies = [
|
|
||||||
"allocator-api2",
|
|
||||||
"equivalent",
|
|
||||||
"foldhash",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "libc"
|
name = "libc"
|
||||||
version = "0.2.174"
|
version = "0.2.174"
|
||||||
|
|
@ -58,8 +23,6 @@ name = "werkzeug"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"atomic-wait",
|
"atomic-wait",
|
||||||
"either",
|
|
||||||
"hashbrown",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
|
|
||||||
|
|
@ -4,10 +4,9 @@ version = "0.1.0"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["nightly", "alloc"]
|
default = ["alloc"]
|
||||||
alloc = ["dep:hashbrown"]
|
alloc = []
|
||||||
std = ["alloc"]
|
std = ["alloc"]
|
||||||
transposed-option = ["nightly"]
|
|
||||||
nightly = []
|
nightly = []
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|
@ -16,5 +15,3 @@ nightly = []
|
||||||
# While I could use libc / windows for this, why not just use this tiny crate
|
# While I could use libc / windows for this, why not just use this tiny crate
|
||||||
# which does exactly and only a futex
|
# which does exactly and only a futex
|
||||||
atomic-wait = "1.1.0"
|
atomic-wait = "1.1.0"
|
||||||
hashbrown = {version = "0.15", optional = true}
|
|
||||||
either = "1.15.0"
|
|
||||||
56
src/bytes.rs
56
src/bytes.rs
|
|
@ -1,56 +0,0 @@
|
||||||
//! Collection of utilities for working with primitive integral types in Rust, and converting between them.
|
|
||||||
|
|
||||||
/// interprets an array of two `u32`s as a `u64`.
|
|
||||||
/// Importantly, this does not account for endianness.
|
|
||||||
/// This is the inverse of `u32s_from_u64`.
|
|
||||||
pub fn u64_from_u32s(array: [u32; 2]) -> u64 {
|
|
||||||
// SAFETY: `out` and `array` are guaranteed not to overlap, we assert that
|
|
||||||
// we can transmute between the two types which guarantees that they have
|
|
||||||
// the same size. Both are well aligned and valid values for values for
|
|
||||||
// `u32` and `u64`.
|
|
||||||
unsafe {
|
|
||||||
let mut out: u64 = 0;
|
|
||||||
|
|
||||||
assert!(crate::mem::is_same_size::<u64, [u32; 2]>());
|
|
||||||
|
|
||||||
core::ptr::copy_nonoverlapping(array.as_ptr(), &raw mut out as *mut u32, 2);
|
|
||||||
out
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// interprets a `u64` as an array of two `u32`s.
|
|
||||||
/// Importantly, this does not account for endianness.
|
|
||||||
/// This is the inverse of `u64_from_u32s`.
|
|
||||||
pub fn u32s_from_u64(value: u64) -> [u32; 2] {
|
|
||||||
// SAFETY: `value` is guaranteed to be a valid `u64`, and we are creating a
|
|
||||||
// slice of two `u32`s which is also 8 bytes.
|
|
||||||
assert!(crate::mem::can_transmute::<u64, [u32; 2]>());
|
|
||||||
unsafe { core::ptr::read(&raw const value as *const [u32; 2]) }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// interprets an array of two `u16`s as a `u32`.
|
|
||||||
/// Importantly, this does not account for endianness.
|
|
||||||
/// This is the inverse of `u16s_from_u32`.
|
|
||||||
pub fn u32_from_u16s(array: [u16; 2]) -> u32 {
|
|
||||||
// SAFETY: `out` and `array` are guaranteed not to overlap, we assert that
|
|
||||||
// we can transmute between the two types which guarantees that they have
|
|
||||||
// the same size. Both are well aligned and valid values for values for
|
|
||||||
// `u32` and `u16`.
|
|
||||||
unsafe {
|
|
||||||
let mut out = 0u32;
|
|
||||||
|
|
||||||
// we can't use read here because [u16; 2] is not sufficiently aligned for u32
|
|
||||||
core::ptr::copy_nonoverlapping(array.as_ptr(), &raw mut out as *mut u16, 2);
|
|
||||||
out
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// interprets a `u32` as an array of two `u16`s.
|
|
||||||
/// Importantly, this does not account for endianness.
|
|
||||||
/// This is the inverse of `u32_from_u16s`.
|
|
||||||
pub fn u16s_from_u32(value: u32) -> [u16; 2] {
|
|
||||||
// SAFETY: `value` is guaranteed to be a valid `u32`, and we are creating a
|
|
||||||
// slice of two `u16`s which is also 4 bytes.
|
|
||||||
assert!(crate::mem::can_transmute::<u32, [u16; 2]>());
|
|
||||||
unsafe { core::ptr::read(&raw const value as *const [u16; 2]) }
|
|
||||||
}
|
|
||||||
100
src/iter.rs
100
src/iter.rs
|
|
@ -1,100 +0,0 @@
|
||||||
/// Trait for only yielding the next item in the Iterator if it tests true for some predicate
|
|
||||||
pub trait NextIf<I>: Iterator<Item = I> + Clone {
|
|
||||||
/// Yield next item if `pred` returns `true`.
|
|
||||||
/// If `pred` returns `false` the Iterator is not advanced.
|
|
||||||
#[must_use]
|
|
||||||
fn next_if<F>(&mut self, pred: F) -> Option<I>
|
|
||||||
where
|
|
||||||
F: FnOnce(&Self::Item) -> bool,
|
|
||||||
{
|
|
||||||
let old = self.clone();
|
|
||||||
match self.next() {
|
|
||||||
Some(item) => {
|
|
||||||
if pred(&item) {
|
|
||||||
Some(item)
|
|
||||||
} else {
|
|
||||||
*self = old;
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/// Yield next item if `pred` returns `Some(T)`.
|
|
||||||
/// If `pred` returns `None` the Iterator is not advanced.
|
|
||||||
#[must_use]
|
|
||||||
fn next_if_map<F, T>(&mut self, pred: F) -> Option<T>
|
|
||||||
where
|
|
||||||
F: FnOnce(Self::Item) -> Option<T>,
|
|
||||||
{
|
|
||||||
let old = self.clone();
|
|
||||||
match self.next() {
|
|
||||||
Some(item) => match pred(item) {
|
|
||||||
None => {
|
|
||||||
*self = old;
|
|
||||||
None
|
|
||||||
}
|
|
||||||
some => some,
|
|
||||||
},
|
|
||||||
None => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<I, T> NextIf<I> for T where T: Iterator<Item = I> + Clone {}
|
|
||||||
|
|
||||||
pub trait AdvanceWhile<I>: Iterator<Item = I> + Clone {
|
|
||||||
/// Advance the iterator while `pred` returns true.
|
|
||||||
fn advance_while<F>(&mut self, mut pred: F)
|
|
||||||
where
|
|
||||||
F: FnMut(&Self::Item) -> bool,
|
|
||||||
{
|
|
||||||
loop {
|
|
||||||
match self.next_if(&mut pred) {
|
|
||||||
Some(_) => {}
|
|
||||||
None => break,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<I, T> AdvanceWhile<I> for T where T: Iterator<Item = I> + Clone {}
|
|
||||||
|
|
||||||
pub trait FallibleMapIter<I>: Iterator<Item = I> + Clone {
|
|
||||||
/// consumes items from `self` if and only if `map` yields `Some`.
|
|
||||||
#[must_use]
|
|
||||||
fn map_iter_if<F, U>(&mut self, map: F) -> Option<U>
|
|
||||||
where
|
|
||||||
F: FnOnce(&mut Self) -> Option<U>,
|
|
||||||
{
|
|
||||||
// clone iterator and keep around
|
|
||||||
let old = self.clone();
|
|
||||||
match map(self) {
|
|
||||||
Some(result) => Some(result),
|
|
||||||
None => {
|
|
||||||
// the map function failed, restore iterator and yield None.
|
|
||||||
*self = old;
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[must_use]
|
|
||||||
fn try_map_iter_if<F, U, E>(&mut self, map: F) -> Result<U, E>
|
|
||||||
where
|
|
||||||
F: FnOnce(&mut Self) -> Result<U, E>,
|
|
||||||
{
|
|
||||||
// clone iterator and keep around
|
|
||||||
let old = self.clone();
|
|
||||||
match map(self) {
|
|
||||||
Ok(result) => Ok(result),
|
|
||||||
Err(e) => {
|
|
||||||
// the map function failed, restore iterator and yield None.
|
|
||||||
*self = old;
|
|
||||||
Err(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<I, T> FallibleMapIter<I> for T where T: Iterator<Item = I> + Clone {}
|
|
||||||
21
src/lib.rs
21
src/lib.rs
|
|
@ -1,14 +1,5 @@
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
#![cfg_attr(not(feature = "std"), no_std)]
|
||||||
#![cfg_attr(
|
#![cfg_attr(feature = "nightly", feature(strict_provenance_atomic_ptr))]
|
||||||
feature = "nightly",
|
|
||||||
feature(
|
|
||||||
box_vec_non_null,
|
|
||||||
maybe_uninit_slice,
|
|
||||||
debug_closure_helpers,
|
|
||||||
slice_ptr_get,
|
|
||||||
)
|
|
||||||
)]
|
|
||||||
#![cfg_attr(feature = "transposed-option", feature(try_trait_v2))]
|
|
||||||
|
|
||||||
#[cfg(any(test, feature = "std", feature = "alloc"))]
|
#[cfg(any(test, feature = "std", feature = "alloc"))]
|
||||||
extern crate alloc;
|
extern crate alloc;
|
||||||
|
|
@ -17,13 +8,8 @@ extern crate alloc;
|
||||||
extern crate std;
|
extern crate std;
|
||||||
|
|
||||||
pub mod atomic;
|
pub mod atomic;
|
||||||
pub mod bytes;
|
|
||||||
pub mod cachepadded;
|
pub mod cachepadded;
|
||||||
pub mod drop_guard;
|
pub mod drop_guard;
|
||||||
pub mod iter;
|
|
||||||
pub mod mem;
|
|
||||||
#[cfg(feature = "transposed-option")]
|
|
||||||
pub mod option;
|
|
||||||
pub mod ptr;
|
pub mod ptr;
|
||||||
pub mod rand;
|
pub mod rand;
|
||||||
#[cfg(feature = "alloc")]
|
#[cfg(feature = "alloc")]
|
||||||
|
|
@ -31,8 +17,5 @@ pub mod smallbox;
|
||||||
pub mod sync;
|
pub mod sync;
|
||||||
pub mod util;
|
pub mod util;
|
||||||
|
|
||||||
#[cfg(feature = "alloc")]
|
|
||||||
pub mod tree;
|
|
||||||
|
|
||||||
pub use cachepadded::CachePadded;
|
pub use cachepadded::CachePadded;
|
||||||
pub use mem::can_transmute;
|
pub use util::can_transmute;
|
||||||
|
|
|
||||||
22
src/mem.rs
22
src/mem.rs
|
|
@ -1,22 +0,0 @@
|
||||||
pub const fn can_transmute<A, B>() -> bool {
|
|
||||||
use core::mem::{align_of, size_of};
|
|
||||||
// We can transmute `A` to `B` iff `A` and `B` have the same size and the
|
|
||||||
// alignment of `A` is greater than or equal to the alignment of `B`.
|
|
||||||
(size_of::<A>() == size_of::<B>()) & (align_of::<A>() >= align_of::<B>())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub const fn is_same_size<A, B>() -> bool {
|
|
||||||
use core::mem::size_of;
|
|
||||||
|
|
||||||
size_of::<A>() == size_of::<B>()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks if `A` is aligned at least as well as `B`. e.g. `assert_aligned<u64,
|
|
||||||
/// u32>()` returns `true`, but `assert_aligned<u32, u64>()` returns
|
|
||||||
/// `false`. This is useful for ensuring that a type `A` can be safely cast to a
|
|
||||||
/// type `B` without violating alignment requirements.
|
|
||||||
pub const fn is_aligned<A, B>() -> bool {
|
|
||||||
use core::mem::align_of;
|
|
||||||
|
|
||||||
align_of::<A>() >= align_of::<B>()
|
|
||||||
}
|
|
||||||
|
|
@ -1,98 +0,0 @@
|
||||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Default)]
|
|
||||||
pub enum TransposedOption<T> {
|
|
||||||
#[default]
|
|
||||||
None,
|
|
||||||
Some(T),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> TransposedOption<T> {
|
|
||||||
pub fn new(value: T) -> Self {
|
|
||||||
TransposedOption::Some(value)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_none(&self) -> bool {
|
|
||||||
matches!(self, TransposedOption::None)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn map<U, F>(self, f: F) -> TransposedOption<U>
|
|
||||||
where
|
|
||||||
F: FnOnce(T) -> U,
|
|
||||||
{
|
|
||||||
use TransposedOption::*;
|
|
||||||
match self {
|
|
||||||
Some(value) => Some(f(value)),
|
|
||||||
None => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn and_then<U, F>(self, f: F) -> TransposedOption<U>
|
|
||||||
where
|
|
||||||
F: FnOnce(T) -> TransposedOption<U>,
|
|
||||||
{
|
|
||||||
use TransposedOption::*;
|
|
||||||
match self {
|
|
||||||
Some(value) => f(value),
|
|
||||||
None => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> From<Option<T>> for TransposedOption<T> {
|
|
||||||
fn from(option: Option<T>) -> Self {
|
|
||||||
match option {
|
|
||||||
Some(value) => TransposedOption::Some(value),
|
|
||||||
None => TransposedOption::None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> From<TransposedOption<T>> for Option<T> {
|
|
||||||
fn from(transposed: TransposedOption<T>) -> Self {
|
|
||||||
match transposed {
|
|
||||||
TransposedOption::Some(value) => Some(value),
|
|
||||||
TransposedOption::None => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> core::ops::Try for TransposedOption<T> {
|
|
||||||
type Output = TransposedOption<T>;
|
|
||||||
type Residual = T;
|
|
||||||
|
|
||||||
fn from_output(_: Self::Output) -> Self {
|
|
||||||
use TransposedOption::*;
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn branch(self) -> std::ops::ControlFlow<Self::Residual, Self::Output> {
|
|
||||||
use TransposedOption::*;
|
|
||||||
match self {
|
|
||||||
Some(value) => std::ops::ControlFlow::Break(value),
|
|
||||||
None => std::ops::ControlFlow::Continue(None),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: From<U>, U> core::ops::FromResidual<U> for TransposedOption<T> {
|
|
||||||
fn from_residual(residual: U) -> Self {
|
|
||||||
Self::new(residual.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// #[cfg(all(test, feature = "transposed-option"))]
|
|
||||||
// mod tests {
|
|
||||||
// use super::*;
|
|
||||||
// use TransposedOption::*;
|
|
||||||
|
|
||||||
// #[test]
|
|
||||||
// fn transposed_option_try() {
|
|
||||||
// let a: TransposedOption<i32> = try {
|
|
||||||
// TransposedOption::Some(42)?;
|
|
||||||
// None::<i32>?;
|
|
||||||
|
|
||||||
// Some(3)
|
|
||||||
// };
|
|
||||||
|
|
||||||
// assert_eq!(a, TransposedOption::Some(42));
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
107
src/ptr.rs
107
src/ptr.rs
|
|
@ -2,17 +2,13 @@ use core::{
|
||||||
cmp::Ordering,
|
cmp::Ordering,
|
||||||
fmt, hash,
|
fmt, hash,
|
||||||
marker::{PhantomData, Send},
|
marker::{PhantomData, Send},
|
||||||
mem::{self, ManuallyDrop},
|
mem,
|
||||||
num::NonZero,
|
num::NonZero,
|
||||||
ops::{Deref, DerefMut},
|
ops::{Deref, DerefMut},
|
||||||
pin::Pin,
|
|
||||||
ptr::NonNull,
|
ptr::NonNull,
|
||||||
sync::atomic::{self, AtomicPtr},
|
sync::atomic::{self, AtomicPtr},
|
||||||
};
|
};
|
||||||
|
|
||||||
/// This is a wrapper around `NonNull<T>` that is `Send` even if `T` is not
|
|
||||||
/// `Send`. This is useful for types that use `NonNull<T>` internally but are
|
|
||||||
/// safe to send to other threads.
|
|
||||||
#[repr(transparent)]
|
#[repr(transparent)]
|
||||||
pub struct SendNonNull<T>(NonNull<T>);
|
pub struct SendNonNull<T>(NonNull<T>);
|
||||||
|
|
||||||
|
|
@ -103,7 +99,6 @@ impl<T> DerefMut for SendNonNull<T> {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> SendNonNull<T> {
|
impl<T> SendNonNull<T> {
|
||||||
/// Creates a new `SendNonNull<T>` if `ptr` is non-null, otherwise returns `None`.
|
|
||||||
pub const fn new(ptr: *mut T) -> Option<Self> {
|
pub const fn new(ptr: *mut T) -> Option<Self> {
|
||||||
match NonNull::new(ptr) {
|
match NonNull::new(ptr) {
|
||||||
Some(ptr) => Some(Self(ptr)),
|
Some(ptr) => Some(Self(ptr)),
|
||||||
|
|
@ -111,17 +106,14 @@ impl<T> SendNonNull<T> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new `SendNonNull<T>` that is dangling.
|
|
||||||
pub const fn dangling() -> Self {
|
pub const fn dangling() -> Self {
|
||||||
Self(NonNull::dangling())
|
Self(NonNull::dangling())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Casts the pointer to a different type
|
|
||||||
pub const fn cast<U>(self) -> SendNonNull<U> {
|
pub const fn cast<U>(self) -> SendNonNull<U> {
|
||||||
SendNonNull(self.0.cast())
|
SendNonNull(self.0.cast())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new `SendNonNull<T>` with the given address, keeping the provenance of `self`.
|
|
||||||
pub fn with_addr(self, addr: NonZero<usize>) -> Self {
|
pub fn with_addr(self, addr: NonZero<usize>) -> Self {
|
||||||
// SAFETY: addr is non-zero, so the pointer is valid.
|
// SAFETY: addr is non-zero, so the pointer is valid.
|
||||||
unsafe {
|
unsafe {
|
||||||
|
|
@ -131,17 +123,11 @@ impl<T> SendNonNull<T> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Maps the address of the pointer using the given function, keeping the provenance of `self`.
|
|
||||||
pub fn map_addr(self, f: impl FnOnce(NonZero<usize>) -> NonZero<usize>) -> Self {
|
pub fn map_addr(self, f: impl FnOnce(NonZero<usize>) -> NonZero<usize>) -> Self {
|
||||||
// SAFETY: addr is non-zero, so the pointer is valid.
|
// SAFETY: addr is non-zero, so the pointer is valid.
|
||||||
self.with_addr(f(self.addr()))
|
self.with_addr(f(self.addr()))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a new pointer, offset from `self` by `offset` elements.
|
|
||||||
///
|
|
||||||
/// # Safety
|
|
||||||
///
|
|
||||||
/// The caller must ensure that the resulting pointer points at the same allocation as `self`.
|
|
||||||
pub unsafe fn offset(self, offset: isize) -> Self {
|
pub unsafe fn offset(self, offset: isize) -> Self {
|
||||||
// SAFETY: self is a valid pointer, offset is guaranteed to point to a valid memory location by the contract of `offset`
|
// SAFETY: self is a valid pointer, offset is guaranteed to point to a valid memory location by the contract of `offset`
|
||||||
unsafe { Self(NonNull::new_unchecked(self.as_ptr().offset(offset))) }
|
unsafe { Self(NonNull::new_unchecked(self.as_ptr().offset(offset))) }
|
||||||
|
|
@ -453,97 +439,6 @@ impl<T, const BITS: u8> TaggedAtomicPtr<T, BITS> {
|
||||||
let ptr = unsafe { NonNull::new_unchecked(ptr.cast()) };
|
let ptr = unsafe { NonNull::new_unchecked(ptr.cast()) };
|
||||||
(ptr, tag)
|
(ptr, tag)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn copy_from(
|
|
||||||
&self,
|
|
||||||
other: &Self,
|
|
||||||
load: atomic::Ordering,
|
|
||||||
store: atomic::Ordering,
|
|
||||||
) -> (*mut T, usize) {
|
|
||||||
let old = self.ptr.swap(other.ptr.load(load), store);
|
|
||||||
|
|
||||||
let mask = Self::mask();
|
|
||||||
(old.map_addr(|addr| addr & !mask).cast(), old.addr() & mask)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[repr(transparent)]
|
|
||||||
pub struct UniquePtr<'a, T> {
|
|
||||||
ptr: NonNull<T>,
|
|
||||||
_marker: PhantomData<&'a mut T>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, T> UniquePtr<'a, T> {
|
|
||||||
#[inline]
|
|
||||||
pub fn map<U, F>(value: T, f: F) -> U
|
|
||||||
where
|
|
||||||
F: FnOnce(UniquePtr<'_, T>) -> U,
|
|
||||||
{
|
|
||||||
let mut inner = ManuallyDrop::new(value);
|
|
||||||
let this = UniquePtr::new(&mut inner);
|
|
||||||
f(this)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_pinned(inner: Pin<&'a mut ManuallyDrop<T>>) -> Pin<Self> {
|
|
||||||
// SAFETY: `inner` is pinned, so it must remain pinned for the lifetime of `Self`.
|
|
||||||
unsafe {
|
|
||||||
Pin::new_unchecked(Self {
|
|
||||||
ptr: NonNull::new_unchecked(core::mem::transmute::<_, _>(inner)),
|
|
||||||
_marker: PhantomData,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new(inner: &'a mut ManuallyDrop<T>) -> Self {
|
|
||||||
Self {
|
|
||||||
ptr: NonNull::from(&mut **inner),
|
|
||||||
_marker: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub unsafe fn new_unchecked(ptr: *mut T) -> Self {
|
|
||||||
Self {
|
|
||||||
ptr: unsafe { NonNull::new_unchecked(ptr) },
|
|
||||||
_marker: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn as_ptr(&self) -> *mut T {
|
|
||||||
self.ptr.as_ptr()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn as_non_null(&self) -> NonNull<T> {
|
|
||||||
self.ptr
|
|
||||||
}
|
|
||||||
|
|
||||||
pub unsafe fn cast<U>(self) -> UniquePtr<'a, U> {
|
|
||||||
UniquePtr {
|
|
||||||
ptr: self.ptr.cast(),
|
|
||||||
_marker: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, T> Deref for UniquePtr<'a, T> {
|
|
||||||
type Target = T;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
unsafe { self.ptr.as_ref() }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, T> DerefMut for UniquePtr<'a, T> {
|
|
||||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
|
||||||
unsafe { self.ptr.as_mut() }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, T> Drop for UniquePtr<'a, T> {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
unsafe {
|
|
||||||
core::ptr::drop_in_place(&raw mut **self);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|
|
||||||
680
src/sync.rs
680
src/sync.rs
|
|
@ -357,8 +357,7 @@ pub mod channel {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Takes the value from the channel, if it is present.
|
/// Takes the value from the channel, if it is present.
|
||||||
/// this function must only ever return `Some` once.
|
fn take(&mut self) -> Option<T> {
|
||||||
pub unsafe fn take(&mut self) -> Option<T> {
|
|
||||||
// unset the OCCUPIED_BIT to indicate that we are taking the value, if any is present.
|
// unset the OCCUPIED_BIT to indicate that we are taking the value, if any is present.
|
||||||
if self
|
if self
|
||||||
.0
|
.0
|
||||||
|
|
@ -370,19 +369,13 @@ pub mod channel {
|
||||||
// The channel was empty, so we return None.
|
// The channel was empty, so we return None.
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
// SAFETY: we only ever access this field by pointer
|
|
||||||
// the OCCUPIED_BIT was set, so we can safely read the value.
|
|
||||||
// this function is only called once, within `recv`,
|
|
||||||
// guaranteeing that the value will only be dropped once.
|
|
||||||
unsafe { Some(self.0.val.get().read().assume_init_read()) }
|
unsafe { Some(self.0.val.get().read().assume_init_read()) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn recv(mut self) -> T {
|
pub fn recv(mut self) -> T {
|
||||||
loop {
|
loop {
|
||||||
// SAFETY: recv can only be called once, since it takes ownership of `self`.
|
if let Some(t) = self.take() {
|
||||||
// if `take` returns a value, it will never be called again.
|
|
||||||
if let Some(t) = unsafe { self.take() } {
|
|
||||||
return t;
|
return t;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -415,672 +408,3 @@ pub mod channel {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "alloc")]
|
|
||||||
pub mod queue {
|
|
||||||
//! A Queue with multiple receivers and multiple producers, where a producer can send a message to one of any of the receivers (any-cast), or one of the receivers (uni-cast).
|
|
||||||
//! After being woken up from waiting on a message, the receiver will look up the index of the message in the queue and return it.
|
|
||||||
|
|
||||||
use alloc::{boxed::Box, sync::Arc, vec::Vec};
|
|
||||||
use core::{
|
|
||||||
cell::UnsafeCell,
|
|
||||||
marker::{PhantomData, PhantomPinned},
|
|
||||||
mem::{self, MaybeUninit},
|
|
||||||
pin::Pin,
|
|
||||||
ptr::{self, NonNull},
|
|
||||||
sync::atomic::{AtomicU32, Ordering},
|
|
||||||
};
|
|
||||||
|
|
||||||
use hashbrown::HashMap;
|
|
||||||
|
|
||||||
use crate::{CachePadded, ptr::TaggedAtomicPtr};
|
|
||||||
|
|
||||||
use super::Parker;
|
|
||||||
|
|
||||||
struct QueueInner<T> {
|
|
||||||
receivers: HashMap<ReceiverToken, CachePadded<(Slot<T>, bool)>>,
|
|
||||||
messages: Vec<T>,
|
|
||||||
_phantom: core::marker::PhantomData<T>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct Queue<T> {
|
|
||||||
inner: UnsafeCell<QueueInner<T>>,
|
|
||||||
lock: AtomicU32,
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<T> Send for Queue<T> {}
|
|
||||||
unsafe impl<T> Sync for Queue<T> where T: Send {}
|
|
||||||
|
|
||||||
pub struct Receiver<T> {
|
|
||||||
queue: Arc<Queue<T>>,
|
|
||||||
lock: Pin<Box<(Parker, PhantomPinned)>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[repr(transparent)]
|
|
||||||
pub struct Sender<T> {
|
|
||||||
queue: Arc<Queue<T>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: make this a linked list of slots so we can queue multiple messages for
|
|
||||||
// a single receiver
|
|
||||||
const SLOT_ALIGN: u8 = core::mem::align_of::<usize>().ilog2() as u8;
|
|
||||||
struct Slot<T> {
|
|
||||||
value: UnsafeCell<MaybeUninit<T>>,
|
|
||||||
next_and_state: TaggedAtomicPtr<Self, SLOT_ALIGN>,
|
|
||||||
_phantom: PhantomData<Self>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Slot<T> {
|
|
||||||
fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
value: UnsafeCell::new(MaybeUninit::uninit()),
|
|
||||||
next_and_state: TaggedAtomicPtr::new(ptr::null_mut(), 0), // 0 means empty
|
|
||||||
_phantom: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_set(&self) -> bool {
|
|
||||||
self.next_and_state.tag(Ordering::Acquire) == 1
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe fn pop(&self) -> Option<T> {
|
|
||||||
NonNull::new(self.next_and_state.ptr(Ordering::Acquire))
|
|
||||||
.and_then(|next| {
|
|
||||||
// SAFETY: The next slot is a valid pointer to a Slot<T> that was allocated by us.
|
|
||||||
unsafe { next.as_ref().pop() }
|
|
||||||
})
|
|
||||||
.or_else(|| {
|
|
||||||
if self
|
|
||||||
.next_and_state
|
|
||||||
.swap_tag(0, Ordering::AcqRel, Ordering::Relaxed)
|
|
||||||
== 1
|
|
||||||
{
|
|
||||||
// SAFETY: The value is only initialized when the state is set to 1.
|
|
||||||
Some(unsafe { (&mut *self.value.get()).assume_init_read() })
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// this operation isn't atomic.
|
|
||||||
#[allow(dead_code)]
|
|
||||||
unsafe fn pop_front(&self) -> Option<T> {
|
|
||||||
// swap the slot at `next` with self, and return the value of self.
|
|
||||||
|
|
||||||
// get next ptr, if it is non-null.
|
|
||||||
if let Some(next) = NonNull::new(self.next_and_state.ptr(Ordering::Acquire)) {
|
|
||||||
unsafe {
|
|
||||||
// copy the next slot's next_and_state into self's next_and_state
|
|
||||||
let (_, old) = self.next_and_state.copy_from(
|
|
||||||
&next.as_ref().next_and_state,
|
|
||||||
Ordering::Acquire,
|
|
||||||
Ordering::Release,
|
|
||||||
);
|
|
||||||
|
|
||||||
// copy the next slot's value into self's value
|
|
||||||
mem::swap(&mut *self.value.get(), &mut *next.as_ref().value.get());
|
|
||||||
|
|
||||||
if old == 1 {
|
|
||||||
// SAFETY: The value is only initialized when the state is set to 1.
|
|
||||||
Some(next.as_ref().value.get().read().assume_init())
|
|
||||||
} else {
|
|
||||||
// next was empty, so we return None.
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// next is null, so popping from the back or front is the same.
|
|
||||||
unsafe { self.pop() }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// the caller must ensure that they have exclusive access to the slot
|
|
||||||
unsafe fn push(&self, value: T) {
|
|
||||||
if self.is_set() {
|
|
||||||
let next = self.next_ptr();
|
|
||||||
unsafe {
|
|
||||||
(next.as_ref()).push(value);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// SAFETY: The value is only initialized when the state is set to 1.
|
|
||||||
unsafe { (&mut *self.value.get()).write(value) };
|
|
||||||
self.next_and_state
|
|
||||||
.set_tag(1, Ordering::Release, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn next_ptr(&self) -> NonNull<Slot<T>> {
|
|
||||||
if let Some(next) = NonNull::new(self.next_and_state.ptr(Ordering::Acquire)) {
|
|
||||||
next.cast()
|
|
||||||
} else {
|
|
||||||
self.alloc_next()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn alloc_next(&self) -> NonNull<Slot<T>> {
|
|
||||||
let next = Box::into_raw(Box::new(Slot::new()));
|
|
||||||
|
|
||||||
let next = loop {
|
|
||||||
match self.next_and_state.compare_exchange_weak_ptr(
|
|
||||||
ptr::null_mut(),
|
|
||||||
next,
|
|
||||||
Ordering::Release,
|
|
||||||
Ordering::Acquire,
|
|
||||||
) {
|
|
||||||
Ok(_) => break next,
|
|
||||||
Err(other) => {
|
|
||||||
if other.is_null() {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
// next was allocated under us, so we need to drop the slot we just allocated again.
|
|
||||||
_ = unsafe { Box::from_raw(next) };
|
|
||||||
break other;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
unsafe {
|
|
||||||
// SAFETY: The next slot is a valid pointer to a Slot<T> that was allocated by us.
|
|
||||||
NonNull::new_unchecked(next)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Drop for Slot<T> {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
// drop next chain
|
|
||||||
if let Some(next) = NonNull::new(self.next_and_state.swap_ptr(
|
|
||||||
ptr::null_mut(),
|
|
||||||
Ordering::Release,
|
|
||||||
Ordering::Relaxed,
|
|
||||||
)) {
|
|
||||||
// SAFETY: The next slot is a valid pointer to a Slot<T> that was allocated by us.
|
|
||||||
// We drop this in place because idk..
|
|
||||||
unsafe {
|
|
||||||
next.drop_in_place();
|
|
||||||
_ = Box::<mem::ManuallyDrop<Self>>::from_raw(next.cast().as_ptr());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SAFETY: The value is only initialized when the state is set to 1.
|
|
||||||
if mem::needs_drop::<T>() && self.next_and_state.tag(Ordering::Acquire) == 1 {
|
|
||||||
unsafe { (&mut *self.value.get()).assume_init_drop() };
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// const BLOCK_SIZE: usize = 8;
|
|
||||||
// struct Block<T> {
|
|
||||||
// next: AtomicPtr<Block<T>>,
|
|
||||||
// slots: [CachePadded<Slot<T>>; BLOCK_SIZE],
|
|
||||||
// }
|
|
||||||
|
|
||||||
/// A token that can be used to identify a specific receiver in a queue.
|
|
||||||
#[repr(transparent)]
|
|
||||||
#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)]
|
|
||||||
pub struct ReceiverToken(crate::util::Send<NonNull<u32>>);
|
|
||||||
|
|
||||||
impl ReceiverToken {
|
|
||||||
pub fn as_ptr(&self) -> *mut u32 {
|
|
||||||
self.0.into_inner().as_ptr()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub unsafe fn as_parker(&self) -> &Parker {
|
|
||||||
// SAFETY: The pointer is guaranteed to be valid and aligned, as it comes from a pinned Parker.
|
|
||||||
unsafe { Parker::from_ptr(self.as_ptr()) }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub unsafe fn from_parker(parker: &Parker) -> Self {
|
|
||||||
// SAFETY: The pointer is guaranteed to be valid and aligned, as it comes from a pinned Parker.
|
|
||||||
let ptr = NonNull::from(parker).cast::<u32>();
|
|
||||||
ReceiverToken(crate::util::Send(ptr))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Queue<T> {
|
|
||||||
pub fn new() -> Arc<Self> {
|
|
||||||
Arc::new(Self {
|
|
||||||
inner: UnsafeCell::new(QueueInner {
|
|
||||||
messages: Vec::new(),
|
|
||||||
receivers: HashMap::new(),
|
|
||||||
_phantom: PhantomData,
|
|
||||||
}),
|
|
||||||
lock: AtomicU32::new(0),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_sender(self: &Arc<Self>) -> Sender<T> {
|
|
||||||
Sender {
|
|
||||||
queue: self.clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn num_receivers(self: &Arc<Self>) -> usize {
|
|
||||||
let _guard = self.lock();
|
|
||||||
self.inner().receivers.len()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn as_sender(self: &Arc<Self>) -> &Sender<T> {
|
|
||||||
unsafe { mem::transmute::<&Arc<Self>, &Sender<T>>(self) }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_receiver(self: &Arc<Self>) -> Receiver<T> {
|
|
||||||
let recv = Receiver {
|
|
||||||
queue: self.clone(),
|
|
||||||
lock: Box::pin((Parker::new(), PhantomPinned)),
|
|
||||||
};
|
|
||||||
|
|
||||||
// allocate slot for the receiver
|
|
||||||
let token = recv.get_token();
|
|
||||||
let _guard = recv.queue.lock();
|
|
||||||
recv.queue
|
|
||||||
.inner()
|
|
||||||
.receivers
|
|
||||||
.insert(token, CachePadded::new((Slot::<T>::new(), false)));
|
|
||||||
|
|
||||||
drop(_guard);
|
|
||||||
recv
|
|
||||||
}
|
|
||||||
|
|
||||||
fn lock(&self) -> impl Drop {
|
|
||||||
unsafe {
|
|
||||||
let lock = crate::sync::Lock::from_ptr(&self.lock as *const _ as _);
|
|
||||||
lock.lock();
|
|
||||||
crate::drop_guard::DropGuard::new(|| lock.unlock())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn inner(&self) -> &mut QueueInner<T> {
|
|
||||||
// SAFETY: The inner is only accessed while the queue is locked.
|
|
||||||
unsafe { &mut *self.inner.get() }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> QueueInner<T> {
|
|
||||||
fn poll(&mut self, token: ReceiverToken) -> Option<T> {
|
|
||||||
// check if someone has sent a message to this receiver
|
|
||||||
let CachePadded((slot, _)) = self.receivers.get(&token)?;
|
|
||||||
|
|
||||||
unsafe { slot.pop() }.or_else(|| {
|
|
||||||
// if the slot is empty, we can check the indexed messages
|
|
||||||
|
|
||||||
self.messages.pop()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Receiver<T> {
|
|
||||||
pub fn get_token(&self) -> ReceiverToken {
|
|
||||||
// the token is just the pointer to the lock of this receiver.
|
|
||||||
// the lock is pinned, so it's address is stable across calls to `receive`.
|
|
||||||
|
|
||||||
ReceiverToken(crate::util::Send(NonNull::from(&self.lock.0).cast()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Drop for Receiver<T> {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
if mem::needs_drop::<T>() {
|
|
||||||
// lock the queue
|
|
||||||
let _guard = self.queue.lock();
|
|
||||||
let queue = self.queue.inner();
|
|
||||||
|
|
||||||
// remove the receiver from the queue
|
|
||||||
_ = queue.receivers.remove(&self.get_token());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Send> Receiver<T> {
|
|
||||||
pub fn recv(&self) -> T {
|
|
||||||
let token = self.get_token();
|
|
||||||
|
|
||||||
loop {
|
|
||||||
// lock the queue
|
|
||||||
let _guard = self.queue.lock();
|
|
||||||
let queue = self.queue.inner();
|
|
||||||
|
|
||||||
// check if someone has sent a message to this receiver
|
|
||||||
if let Some(t) = queue.poll(token) {
|
|
||||||
queue.receivers.get_mut(&token).unwrap().1 = false; // mark the slot as not parked
|
|
||||||
return t;
|
|
||||||
}
|
|
||||||
|
|
||||||
// there was no message for this receiver, so we need to park it
|
|
||||||
queue.receivers.get_mut(&token).unwrap().1 = true; // mark the slot as parked
|
|
||||||
|
|
||||||
self.lock.0.park_with_callback(move || {
|
|
||||||
// drop the lock guard after having set the lock state to waiting.
|
|
||||||
// this avoids a deadlock if the sender tries to send a message
|
|
||||||
// while the receiver is in the process of parking (I think..)
|
|
||||||
drop(_guard);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn try_recv(&self) -> Option<T> {
|
|
||||||
let token = self.get_token();
|
|
||||||
|
|
||||||
// lock the queue
|
|
||||||
let _guard = self.queue.lock();
|
|
||||||
let queue = self.queue.inner();
|
|
||||||
|
|
||||||
// check if someone has sent a message to this receiver
|
|
||||||
queue.poll(token)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Send> Sender<T> {
|
|
||||||
/// Sends a message to one of the receivers in the queue, or makes it
|
|
||||||
/// available to any receiver that will park in the future.
|
|
||||||
pub fn anycast(&self, value: T) {
|
|
||||||
let _guard = self.queue.lock();
|
|
||||||
|
|
||||||
// SAFETY: The queue is locked, so we can safely access the inner queue.
|
|
||||||
match unsafe { self.try_anycast_inner(value) } {
|
|
||||||
Ok(_) => {}
|
|
||||||
Err(value) => {
|
|
||||||
// no parked receiver found, so we want to add the message to the indexed slots
|
|
||||||
let queue = self.queue.inner();
|
|
||||||
queue.messages.push(value);
|
|
||||||
|
|
||||||
// waking up a parked receiver is not necessary here, as any
|
|
||||||
// receivers that don't have a free slot are currently waking up.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn try_anycast(&self, value: T) -> Result<(), T> {
|
|
||||||
// lock the queue
|
|
||||||
let _guard = self.queue.lock();
|
|
||||||
|
|
||||||
// SAFETY: The queue is locked, so we can safely access the inner queue.
|
|
||||||
unsafe { self.try_anycast_inner(value) }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The caller must hold the lock on the queue for the duration of this function.
|
|
||||||
unsafe fn try_anycast_inner(&self, value: T) -> Result<(), T> {
|
|
||||||
// look for a receiver that is parked
|
|
||||||
let queue = self.queue.inner();
|
|
||||||
if let Some((token, slot)) =
|
|
||||||
queue
|
|
||||||
.receivers
|
|
||||||
.iter()
|
|
||||||
.find_map(|(token, CachePadded((slot, is_parked)))| {
|
|
||||||
// ensure the slot is available
|
|
||||||
if *is_parked && !slot.is_set() {
|
|
||||||
Some((*token, slot))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
{
|
|
||||||
// we found a receiver that is parked, so we can send the message to it
|
|
||||||
unsafe {
|
|
||||||
(&mut *slot.value.get()).write(value);
|
|
||||||
slot.next_and_state
|
|
||||||
.set_tag(1, Ordering::Release, Ordering::Relaxed);
|
|
||||||
Parker::from_ptr(token.0.into_inner().as_ptr()).unpark();
|
|
||||||
}
|
|
||||||
|
|
||||||
return Ok(());
|
|
||||||
} else {
|
|
||||||
return Err(value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Sends a message to a specific receiver, waking it if it is parked.
|
|
||||||
pub fn unicast(&self, value: T, receiver: ReceiverToken) -> Result<(), T> {
|
|
||||||
// lock the queue
|
|
||||||
let _guard = self.queue.lock();
|
|
||||||
let queue = self.queue.inner();
|
|
||||||
|
|
||||||
let Some(CachePadded((slot, _))) = queue.receivers.get_mut(&receiver) else {
|
|
||||||
return Err(value);
|
|
||||||
};
|
|
||||||
|
|
||||||
unsafe {
|
|
||||||
slot.push(value);
|
|
||||||
}
|
|
||||||
|
|
||||||
// wake the receiver
|
|
||||||
unsafe {
|
|
||||||
Parker::from_ptr(receiver.0.into_inner().as_ptr()).unpark();
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn broadcast(&self, value: T)
|
|
||||||
where
|
|
||||||
T: Clone,
|
|
||||||
{
|
|
||||||
// lock the queue
|
|
||||||
let _guard = self.queue.lock();
|
|
||||||
let queue = self.queue.inner();
|
|
||||||
|
|
||||||
// send the message to all receivers
|
|
||||||
for (token, CachePadded((slot, _))) in queue.receivers.iter() {
|
|
||||||
// SAFETY: The slot is owned by this receiver.
|
|
||||||
|
|
||||||
unsafe { slot.push(value.clone()) };
|
|
||||||
|
|
||||||
// wake the receiver
|
|
||||||
unsafe {
|
|
||||||
Parker::from_ptr(token.0.into_inner().as_ptr()).unpark();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn broadcast_with<F>(&self, mut f: F)
|
|
||||||
where
|
|
||||||
F: FnMut() -> T,
|
|
||||||
{
|
|
||||||
// lock the queue
|
|
||||||
let _guard = self.queue.lock();
|
|
||||||
let queue = self.queue.inner();
|
|
||||||
|
|
||||||
// send the message to all receivers
|
|
||||||
for (token, CachePadded((slot, _))) in queue.receivers.iter() {
|
|
||||||
// SAFETY: The slot is owned by this receiver.
|
|
||||||
|
|
||||||
unsafe { slot.push(f()) };
|
|
||||||
|
|
||||||
// check if the receiver is parked
|
|
||||||
// wake the receiver
|
|
||||||
unsafe {
|
|
||||||
Parker::from_ptr(token.0.into_inner().as_ptr()).unpark();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::println;
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_queue() {
|
|
||||||
let queue = Queue::<i32>::new();
|
|
||||||
|
|
||||||
let sender = queue.new_sender();
|
|
||||||
let receiver1 = queue.new_receiver();
|
|
||||||
let receiver2 = queue.new_receiver();
|
|
||||||
|
|
||||||
let token2 = receiver2.get_token();
|
|
||||||
|
|
||||||
sender.anycast(42);
|
|
||||||
|
|
||||||
assert_eq!(receiver1.recv(), 42);
|
|
||||||
|
|
||||||
sender.unicast(100, token2).unwrap();
|
|
||||||
assert_eq!(receiver1.try_recv(), None);
|
|
||||||
assert_eq!(receiver2.recv(), 100);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn queue_broadcast() {
|
|
||||||
let queue = Queue::<i32>::new();
|
|
||||||
|
|
||||||
let sender = queue.new_sender();
|
|
||||||
let receiver1 = queue.new_receiver();
|
|
||||||
let receiver2 = queue.new_receiver();
|
|
||||||
|
|
||||||
sender.broadcast(42);
|
|
||||||
|
|
||||||
assert_eq!(receiver1.recv(), 42);
|
|
||||||
assert_eq!(receiver2.recv(), 42);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn queue_multiple_messages() {
|
|
||||||
let queue = Queue::<i32>::new();
|
|
||||||
|
|
||||||
let sender = queue.new_sender();
|
|
||||||
let receiver = queue.new_receiver();
|
|
||||||
|
|
||||||
sender.anycast(1);
|
|
||||||
sender.unicast(2, receiver.get_token()).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(receiver.recv(), 2);
|
|
||||||
assert_eq!(receiver.recv(), 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn queue_threaded() {
|
|
||||||
#[derive(Debug, Clone, Copy)]
|
|
||||||
enum Message {
|
|
||||||
Send(i32),
|
|
||||||
Exit,
|
|
||||||
}
|
|
||||||
|
|
||||||
let queue = Queue::<Message>::new();
|
|
||||||
|
|
||||||
let sender = queue.new_sender();
|
|
||||||
|
|
||||||
let threads = (0..5)
|
|
||||||
.map(|_| {
|
|
||||||
let queue_clone = queue.clone();
|
|
||||||
let receiver = queue_clone.new_receiver();
|
|
||||||
|
|
||||||
std::thread::spawn(move || {
|
|
||||||
loop {
|
|
||||||
match receiver.recv() {
|
|
||||||
Message::Send(value) => {
|
|
||||||
println!(
|
|
||||||
"Receiver {:?} Received: {}",
|
|
||||||
receiver.get_token(),
|
|
||||||
value
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Message::Exit => {
|
|
||||||
println!("Exiting thread");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
// Send messages to the receivers
|
|
||||||
for i in 0..10 {
|
|
||||||
sender.anycast(Message::Send(i));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send exit messages to all receivers
|
|
||||||
sender.broadcast(Message::Exit);
|
|
||||||
for thread in threads {
|
|
||||||
thread.join().unwrap();
|
|
||||||
}
|
|
||||||
println!("All threads have exited.");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn drop_slot() {
|
|
||||||
// Test that dropping a slot does not cause a double free or panic
|
|
||||||
let slot = Slot::<i32>::new();
|
|
||||||
unsafe {
|
|
||||||
slot.push(42);
|
|
||||||
drop(slot);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn drop_slot_chain() {
|
|
||||||
struct DropCheck<'a>(&'a AtomicU32);
|
|
||||||
impl Drop for DropCheck<'_> {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
self.0.fetch_sub(1, Ordering::SeqCst);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> DropCheck<'a> {
|
|
||||||
fn new(counter: &'a AtomicU32) -> Self {
|
|
||||||
counter.fetch_add(1, Ordering::SeqCst);
|
|
||||||
Self(counter)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let counter = AtomicU32::new(0);
|
|
||||||
let slot = Slot::<DropCheck>::new();
|
|
||||||
for _ in 0..10 {
|
|
||||||
unsafe {
|
|
||||||
slot.push(DropCheck::new(&counter));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert_eq!(counter.load(Ordering::SeqCst), 10);
|
|
||||||
drop(slot);
|
|
||||||
assert_eq!(
|
|
||||||
counter.load(Ordering::SeqCst),
|
|
||||||
0,
|
|
||||||
"All DropCheck instances should have been dropped"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn send_self() {
|
|
||||||
// Test that sending a message to self works
|
|
||||||
let queue = Queue::<i32>::new();
|
|
||||||
let sender = queue.new_sender();
|
|
||||||
let receiver = queue.new_receiver();
|
|
||||||
|
|
||||||
sender.unicast(42, receiver.get_token()).unwrap();
|
|
||||||
assert_eq!(receiver.recv(), 42);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn send_self_many() {
|
|
||||||
// Test that sending multiple messages to self works
|
|
||||||
let queue = Queue::<i32>::new();
|
|
||||||
let sender = queue.new_sender();
|
|
||||||
let receiver = queue.new_receiver();
|
|
||||||
|
|
||||||
for i in 0..10 {
|
|
||||||
sender.unicast(i, receiver.get_token()).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
for i in (0..10).rev() {
|
|
||||||
assert_eq!(receiver.recv(), i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn slot_pop_front() {
|
|
||||||
// Test that popping from the front of a slot works correctly
|
|
||||||
let slot = Slot::<i32>::new();
|
|
||||||
unsafe {
|
|
||||||
slot.push(1);
|
|
||||||
slot.push(2);
|
|
||||||
slot.push(3);
|
|
||||||
}
|
|
||||||
|
|
||||||
assert_eq!(unsafe { slot.pop_front() }, Some(1));
|
|
||||||
assert_eq!(unsafe { slot.pop_front() }, Some(2));
|
|
||||||
assert_eq!(unsafe { slot.pop_front() }, Some(3));
|
|
||||||
assert_eq!(unsafe { slot.pop_front() }, None);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
||||||
1919
src/tree.rs
1919
src/tree.rs
File diff suppressed because it is too large
Load diff
63
src/util.rs
63
src/util.rs
|
|
@ -44,62 +44,9 @@ pub fn unwrap_or_panic<T>(result: std::thread::Result<T>) -> T {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[deprecated(
|
pub const fn can_transmute<A, B>() -> bool {
|
||||||
since = "0.1.0",
|
use core::mem::{align_of, size_of};
|
||||||
note = "use `can_transmute` from `mem` module instead"
|
// We can transmute `A` to `B` iff `A` and `B` have the same size and the
|
||||||
)]
|
// alignment of `A` is greater than or equal to the alignment of `B`.
|
||||||
pub use super::mem::can_transmute;
|
(size_of::<A>() == size_of::<B>()) & (align_of::<A>() >= align_of::<B>())
|
||||||
|
|
||||||
/// True if `c` is considered a whitespace according to Rust language definition.
|
|
||||||
/// See [Rust language reference](https://doc.rust-lang.org/reference/whitespace.html)
|
|
||||||
/// for definitions of these classes.
|
|
||||||
pub fn is_whitespace(c: char) -> bool {
|
|
||||||
// This is Pattern_White_Space.
|
|
||||||
//
|
|
||||||
// Note that this set is stable (ie, it doesn't change with different
|
|
||||||
// Unicode versions), so it's ok to just hard-code the values.
|
|
||||||
|
|
||||||
matches!(
|
|
||||||
c,
|
|
||||||
// Usual ASCII suspects
|
|
||||||
'\u{0009}' // \t
|
|
||||||
| '\u{000A}' // \n
|
|
||||||
| '\u{000B}' // vertical tab
|
|
||||||
| '\u{000C}' // form feed
|
|
||||||
| '\u{000D}' // \r
|
|
||||||
| '\u{0020}' // space
|
|
||||||
|
|
||||||
// NEXT LINE from latin1
|
|
||||||
| '\u{0085}'
|
|
||||||
|
|
||||||
// Bidi markers
|
|
||||||
| '\u{200E}' // LEFT-TO-RIGHT MARK
|
|
||||||
| '\u{200F}' // RIGHT-TO-LEFT MARK
|
|
||||||
|
|
||||||
// Dedicated whitespace characters from Unicode
|
|
||||||
| '\u{2028}' // LINE SEPARATOR
|
|
||||||
| '\u{2029}' // PARAGRAPH SEPARATOR
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn hash_f32<H: core::hash::Hasher>(state: &mut H, value: &f32) {
|
|
||||||
use core::hash::Hash;
|
|
||||||
if value.is_nan() {
|
|
||||||
f32::NAN.to_bits().hash(state);
|
|
||||||
} else if *value == 0.0 {
|
|
||||||
0u32.hash(state);
|
|
||||||
} else {
|
|
||||||
value.to_bits().hash(state);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn hash_f64<H: core::hash::Hasher>(state: &mut H, value: &f64) {
|
|
||||||
use core::hash::Hash;
|
|
||||||
if value.is_nan() {
|
|
||||||
f64::NAN.to_bits().hash(state);
|
|
||||||
} else if *value == 0.0 {
|
|
||||||
0u64.hash(state);
|
|
||||||
} else {
|
|
||||||
value.to_bits().hash(state);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue