171 lines
4.6 KiB
Rust
171 lines
4.6 KiB
Rust
use core::{
|
|
mem,
|
|
sync::atomic::{AtomicU32, Ordering},
|
|
};
|
|
|
|
const LOCKED_BIT: u32 = 0b001;
|
|
const EMPTY: u32 = 0;
|
|
|
|
/// A simple lock implementation using an atomic u32.
|
|
#[repr(transparent)]
|
|
pub struct Lock {
|
|
inner: AtomicU32,
|
|
}
|
|
|
|
impl Lock {
|
|
/// Creates a new lock in the unlocked state.
|
|
pub const fn new() -> Self {
|
|
Self {
|
|
inner: AtomicU32::new(0),
|
|
}
|
|
}
|
|
|
|
pub fn as_ptr(&self) -> *mut u32 {
|
|
self.inner.as_ptr()
|
|
}
|
|
|
|
pub unsafe fn from_ptr<'a>(ptr: *mut u32) -> &'a Self {
|
|
// SAFETY: The caller must ensure that `ptr` is not aliased, and lasts
|
|
// for the lifetime of the `Lock`.
|
|
unsafe { mem::transmute(AtomicU32::from_ptr(ptr)) }
|
|
}
|
|
|
|
/// Acquires the lock, blocking until it is available.
|
|
pub fn lock(&self) {
|
|
// attempt acquiring the lock with no contention.
|
|
if self
|
|
.inner
|
|
.compare_exchange_weak(EMPTY, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
|
|
.is_ok()
|
|
{
|
|
// We successfully acquired the lock.
|
|
return;
|
|
} else {
|
|
self.lock_slow();
|
|
}
|
|
}
|
|
|
|
pub fn unlock(&self) {
|
|
self.inner.fetch_and(!LOCKED_BIT, Ordering::Release);
|
|
}
|
|
|
|
fn lock_slow(&self) {
|
|
// The lock is either locked, or someone is waiting for it:
|
|
|
|
let mut spin_wait = SpinWait::new();
|
|
let mut state = self.inner.load(Ordering::Acquire);
|
|
loop {
|
|
// If the lock isn't locked, we can try to acquire it.
|
|
if state & LOCKED_BIT == 0 {
|
|
// Try to acquire the lock.
|
|
match self.inner.compare_exchange_weak(
|
|
state,
|
|
state | LOCKED_BIT,
|
|
Ordering::Acquire,
|
|
Ordering::Relaxed,
|
|
) {
|
|
Ok(_) => {
|
|
// We successfully acquired the lock.
|
|
return;
|
|
}
|
|
Err(new_state) => {
|
|
// The state changed, we need to check again.
|
|
state = new_state;
|
|
continue;
|
|
}
|
|
}
|
|
}
|
|
|
|
if {
|
|
let spun: bool;
|
|
#[cfg(feature = "std")]
|
|
{
|
|
spun = spin_wait.spin_yield();
|
|
}
|
|
#[cfg(not(feature = "std"))]
|
|
{
|
|
spun = spin_wait.spin();
|
|
}
|
|
|
|
spun
|
|
} {
|
|
// We can spin for a little while and see if it becomes available.
|
|
state = self.inner.load(Ordering::Relaxed);
|
|
continue;
|
|
}
|
|
|
|
// If we reach here, we need to park the thread.
|
|
atomic_wait::wait(&self.inner, LOCKED_BIT);
|
|
|
|
if self
|
|
.inner
|
|
.compare_exchange_weak(
|
|
state,
|
|
state | LOCKED_BIT,
|
|
Ordering::Acquire,
|
|
Ordering::Relaxed,
|
|
)
|
|
.is_ok()
|
|
{
|
|
// We successfully acquired the lock after being woken up.
|
|
return;
|
|
}
|
|
|
|
spin_wait.reset();
|
|
state = self.inner.load(Ordering::Relaxed);
|
|
}
|
|
}
|
|
}
|
|
|
|
pub struct SpinWait {
|
|
counter: u32,
|
|
}
|
|
|
|
impl SpinWait {
|
|
/// Creates a new `SpinWait` with an initial counter value.
|
|
pub const fn new() -> Self {
|
|
Self { counter: 0 }
|
|
}
|
|
|
|
/// Resets the counter to zero.
|
|
pub fn reset(&mut self) {
|
|
self.counter = 0;
|
|
}
|
|
|
|
pub fn spin(&mut self) -> bool {
|
|
if self.counter >= 10 {
|
|
// If the counter is too high, we signal the caller to potentially park.
|
|
return false;
|
|
}
|
|
self.counter += 1;
|
|
|
|
// spin for a small number of iterations based on the counter value.
|
|
for _ in 0..(1 << self.counter) {
|
|
core::hint::spin_loop();
|
|
}
|
|
|
|
true
|
|
}
|
|
|
|
#[cfg(feature = "std")]
|
|
pub fn spin_yield(&mut self) -> bool {
|
|
if self.counter >= 10 {
|
|
// If the counter is too high, we signal the caller to potentially park.
|
|
return false;
|
|
}
|
|
self.counter += 1;
|
|
|
|
if self.counter >= 3 {
|
|
// spin for a small number of iterations based on the counter value.
|
|
for _ in 0..(1 << self.counter) {
|
|
core::hint::spin_loop();
|
|
}
|
|
} else {
|
|
// yield the thread and wait for the OS to reschedule us.
|
|
std::thread::yield_now();
|
|
}
|
|
|
|
true
|
|
}
|
|
}
|