Skip to content

Commit

Permalink
Implemented various backoffs and per-CPU rng
Browse files Browse the repository at this point in the history
1. Modified crates/spinlock to provide support for multiple backoff strategies
when encountering lock contention, including exp-backoff and rand-exp-backoff.

2. Modified `BaseSpinLock` in crates/spinlock to support locking with a
different guard/backoff type, providing more flexibility.

3. Added a new module `ruxrand` that aims to provide support for the usage
of RNGs inside kernel. Currently a per-CPU RNG and support for rand-exp-backoff
in spinlock are implemented.

4. Changed the lock type used by `RunQueue`.
  • Loading branch information
Sssssaltyfish committed Jun 26, 2024
1 parent 24fa3dd commit 21cc7ac
Show file tree
Hide file tree
Showing 16 changed files with 614 additions and 29 deletions.
23 changes: 23 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ members = [
"modules/ruxruntime",
"modules/ruxtask",
"modules/ruxfutex",
"modules/ruxrand",

"api/ruxfeat",
"api/arceos_api",
Expand Down
2 changes: 2 additions & 0 deletions crates/spinlock/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,10 @@ documentation = "https://rcore-os.github.io/arceos/spinlock/index.html"
[features]
# To use in the multi-core environment
smp = []
rand = ["dep:crate_interface"]
default = []

[dependencies]
cfg-if = "1.0"
kernel_guard = "0.1.0"
crate_interface = { version = "0.1.1", optional = true }
51 changes: 37 additions & 14 deletions crates/spinlock/src/base.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,17 +24,23 @@ use core::sync::atomic::{AtomicBool, Ordering};

use kernel_guard::BaseGuard;

use crate::{strategy, Strategy};

/// The default strategy used in spinlocks.
pub type DefaultStrategy = strategy::Once;

/// A [spin lock](https://en.m.wikipedia.org/wiki/Spinlock) providing mutually
/// exclusive access to data.
///
/// This is a base struct, the specific behavior depends on the generic
/// parameter `G` that implements [`BaseGuard`], such as whether to disable
/// local IRQs or kernel preemption before acquiring the lock.
/// local IRQs or kernel preemption before acquiring the lock. The parameter `S`
/// that implements [`Strategy`] defines the behavior when encountering contention.
///
/// For single-core environment (without the "smp" feature), we remove the lock
/// state, CPU can always get the lock if we follow the proper guard in use.
pub struct BaseSpinLock<G: BaseGuard, T: ?Sized> {
_phantom: PhantomData<G>,
pub struct BaseSpinLock<DG: BaseGuard, T: ?Sized, S: Strategy = DefaultStrategy> {
_phantom: PhantomData<(DG, S)>,
#[cfg(feature = "smp")]
lock: AtomicBool,
data: UnsafeCell<T>,
Expand All @@ -52,10 +58,10 @@ pub struct BaseSpinLockGuard<'a, G: BaseGuard, T: ?Sized + 'a> {
}

// Same unsafe impls as `std::sync::Mutex`
unsafe impl<G: BaseGuard, T: ?Sized + Send> Sync for BaseSpinLock<G, T> {}
unsafe impl<G: BaseGuard, T: ?Sized + Send> Send for BaseSpinLock<G, T> {}
unsafe impl<G: BaseGuard, T: ?Sized + Send, B: Strategy> Sync for BaseSpinLock<G, T, B> {}
unsafe impl<G: BaseGuard, T: ?Sized + Send, B: Strategy> Send for BaseSpinLock<G, T, B> {}

impl<G: BaseGuard, T> BaseSpinLock<G, T> {
impl<G: BaseGuard, T, S: Strategy> BaseSpinLock<G, T, S> {
/// Creates a new [`BaseSpinLock`] wrapping the supplied data.
#[inline(always)]
pub const fn new(data: T) -> Self {
Expand All @@ -77,26 +83,33 @@ impl<G: BaseGuard, T> BaseSpinLock<G, T> {
}
}

impl<G: BaseGuard, T: ?Sized> BaseSpinLock<G, T> {
/// Locks the [`BaseSpinLock`] and returns a guard that permits access to the inner data.
impl<G: BaseGuard, T: ?Sized, S: Strategy> BaseSpinLock<G, T, S> {
/// Locks the [`BaseSpinLock`] using the given guard type and backoff strategy,
/// and returns a guard that permits access to the inner data.
///
/// The returned value may be dereferenced for data access
/// and the lock will be dropped when the guard falls out of scope.
#[inline(always)]
pub fn lock(&self) -> BaseSpinLockGuard<G, T> {
let irq_state = G::acquire();
pub fn lock_as<GT: BaseGuard, ST: Strategy>(&self) -> BaseSpinLockGuard<GT, T> {
let irq_state = GT::acquire();

#[cfg(feature = "smp")]
{
use crate::strategy::{Backoff, Relax};

let mut backoff = <ST as Strategy>::new_backoff();
// Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock`
// when called in a loop.
while self
.lock
.compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
backoff.backoff();
let mut relax = <ST as Strategy>::new_relax();

// Wait until the lock looks unlocked before retrying
while self.is_locked() {
core::hint::spin_loop();
relax.relax();
}
}
}
Expand All @@ -109,6 +122,16 @@ impl<G: BaseGuard, T: ?Sized> BaseSpinLock<G, T> {
}
}

/// Locks the [`BaseSpinLock`] using the "default" strategy specified by lock type,
/// and returns a guard that permits access to the inner data.
///
/// The returned value may be dereferenced for data access
/// and the lock will be dropped when the guard falls out of scope.
#[inline(always)]
pub fn lock(&self) -> BaseSpinLockGuard<G, T> {
self.lock_as::<G, S>()
}

/// Returns `true` if the lock is currently held.
///
/// # Safety
Expand Down Expand Up @@ -183,14 +206,14 @@ impl<G: BaseGuard, T: ?Sized> BaseSpinLock<G, T> {
}
}

impl<G: BaseGuard, T: ?Sized + Default> Default for BaseSpinLock<G, T> {
impl<G: BaseGuard, T: ?Sized + Default, S: Strategy> Default for BaseSpinLock<G, T, S> {
#[inline(always)]
fn default() -> Self {
Self::new(Default::default())
}
}

impl<G: BaseGuard, T: ?Sized + fmt::Debug> fmt::Debug for BaseSpinLock<G, T> {
impl<G: BaseGuard, T: ?Sized + fmt::Debug, S: Strategy> fmt::Debug for BaseSpinLock<G, T, S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_lock() {
Some(guard) => write!(f, "SpinLock {{ data: ")
Expand Down
15 changes: 12 additions & 3 deletions crates/spinlock/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,24 @@
//! environment (without this feature), the lock state is unnecessary and
//! optimized out. CPU can always get the lock if we follow the proper guard
//! in use. By default, this feature is disabled.
//! - `rand`: Provide extra contention-alleviating strategy using
#![cfg_attr(not(test), no_std)]

mod base;

use kernel_guard::{NoOp, NoPreempt, NoPreemptIrqSave};
/// Defines the strategies used when encountering lock contention.
pub mod strategy;

#[cfg(feature = "rand")]
mod rand_strategy;

use kernel_guard::{NoPreempt, NoPreemptIrqSave};

pub use self::base::{BaseSpinLock, BaseSpinLockGuard};

pub use self::strategy::*;

/// A spin lock that disables kernel preemption while trying to lock, and
/// re-enables it after unlocking.
///
Expand All @@ -48,7 +57,7 @@ pub type SpinNoIrqGuard<'a, T> = BaseSpinLockGuard<'a, NoPreemptIrqSave, T>;
///
/// It must be used in the preemption-disabled and local IRQ-disabled context,
/// or never be used in interrupt handlers.
pub type SpinRaw<T> = BaseSpinLock<NoOp, T>;
pub type SpinRaw<T> = BaseSpinLock<kernel_guard::NoOp, T>;

/// A guard that provides mutable data access for [`SpinRaw`].
pub type SpinRawGuard<'a, T> = BaseSpinLockGuard<'a, NoOp, T>;
pub type SpinRawGuard<'a, T> = BaseSpinLockGuard<'a, kernel_guard::NoOp, T>;
55 changes: 55 additions & 0 deletions crates/spinlock/src/rand_strategy.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
use core::ops::RangeInclusive;

use crate::{Backoff, Relax};

/// Defines the interface to generate a random number within given range,
/// which is to be used in random exponential backoff algorithm.
#[crate_interface::def_interface]
pub trait SpinRandIf {
/// Generates a random number within given range.
///
/// Note that this method may be called simultaneously on multiple CPUs,
/// so the implementation should be thread-safe.
fn percpu_rand(r: RangeInclusive<u32>) -> u32;
}

#[inline(always)]
fn exp_rand_backoff(current_limit: &mut u32, max: u32) {
use crate_interface::call_interface;

let limit = *current_limit;
*current_limit = max.max(limit);
let delay = call_interface!(SpinRandIf::percpu_rand, 0..=limit);
for _ in 0..delay {
core::hint::spin_loop();
}
}

/// Call [`core::hint::spin_loop`] random times within an exponentially grown limit
/// when backoff/relax is required. The random number is generated using [`SpinRandIf::percpu_rand`],
/// which ought to be implemented by the user.
///
/// This would generally increase performance when the lock is highly contended.
#[derive(Debug)]
pub struct ExpRand<const MAX: u32>(u32);

impl<const N: u32> Relax for ExpRand<N> {
#[inline(always)]
fn relax(&mut self) {
exp_rand_backoff(&mut self.0, N);
}
}

impl<const N: u32> Backoff for ExpRand<N> {
#[inline(always)]
fn backoff(&mut self) {
exp_rand_backoff(&mut self.0, N);
}
}

impl<const N: u32> Default for ExpRand<N> {
#[inline(always)]
fn default() -> Self {
Self(1)
}
}
Loading

0 comments on commit 21cc7ac

Please sign in to comment.