From a864da7f6713a37b7a28c93dbf86f1d08eb8ae42 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Sun, 24 Jul 2022 01:23:49 +0900 Subject: [PATCH] Use asm-based atomic load/store on thumbv6m --- README.md | 2 +- no_atomic.rs | 1 + specs/thumbv6m-none-eabi.json | 18 +++ src/imp/arm.rs | 288 ++++++++++++++++++++++++++++++++++ src/imp/interrupt/mod.rs | 4 +- src/imp/mod.rs | 21 +++ src/lib.rs | 2 +- tools/no_atomic.sh | 15 +- 8 files changed, 346 insertions(+), 5 deletions(-) create mode 100644 specs/thumbv6m-none-eabi.json create mode 100644 src/imp/arm.rs diff --git a/README.md b/README.md index c8a07372..39f454a3 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ Portable atomic types including support for 128-bit atomics, atomic float, etc. - Provide `AtomicI128` and `AtomicU128`. - Provide `AtomicF32` and `AtomicF64`. (optional) -- Provide atomic load/store for targets where atomic is not available at all in the standard library. (riscv without A-extension, msp430, avr) +- Provide atomic load/store for targets where atomic is not available at all in the standard library. (thumbv6m, riscv without A-extension, msp430, avr) - Provide atomic CAS for targets where atomic CAS is not available in the standard library. (thumbv6m, riscv without A-extension, msp430, avr) (optional, [single-core only](#optional-cfg)) ## 128-bit atomics support diff --git a/no_atomic.rs b/no_atomic.rs index 3cdfa00f..1d7a8b2e 100644 --- a/no_atomic.rs +++ b/no_atomic.rs @@ -69,4 +69,5 @@ const NO_ATOMIC: &[&str] = &[ "riscv32i-unknown-none-elf", "riscv32im-unknown-none-elf", "riscv32imc-unknown-none-elf", + "thumbv6m-none-eabi", ]; diff --git a/specs/thumbv6m-none-eabi.json b/specs/thumbv6m-none-eabi.json new file mode 100644 index 00000000..c24d9049 --- /dev/null +++ b/specs/thumbv6m-none-eabi.json @@ -0,0 +1,18 @@ +{ + "abi": "eabi", + "arch": "arm", + "atomic-cas": false, + "max-atomic-width": 0, + "c-enum-min-bits": 8, + "data-layout": "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64", + "emit-debug-gdb-scripts": false, + "features": "+strict-align", + "frame-pointer": "always", + "is-builtin": false, + "linker": "rust-lld", + "linker-flavor": "ld.lld", + "llvm-target": "thumbv6m-none-eabi", + "panic-strategy": "abort", + "relocation-model": "static", + "target-pointer-width": "32" +} diff --git a/src/imp/arm.rs b/src/imp/arm.rs new file mode 100644 index 00000000..47ece84e --- /dev/null +++ b/src/imp/arm.rs @@ -0,0 +1,288 @@ +// Atomic load/store implementation on ARMv6-M. +// +// Refs: +// - atomic-maybe-uninit https://github.com/taiki-e/atomic-maybe-uninit +// +// Generated asm: https://godbolt.org/z/hx3a6j9vv + +#[cfg(not(portable_atomic_no_asm))] +use core::arch::asm; +use core::{cell::UnsafeCell, sync::atomic::Ordering}; + +use crate::utils::{assert_load_ordering, assert_store_ordering}; + +// Only a full system barrier exists in the M-class architectures. +macro_rules! dmb { + () => { + "dmb sy" + }; +} + +#[repr(transparent)] +pub(crate) struct AtomicBool { + v: UnsafeCell, +} + +// Send is implicitly implemented. +// SAFETY: any data races are prevented by atomic operations. +unsafe impl Sync for AtomicBool {} + +impl AtomicBool { + #[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))] + #[inline] + pub(crate) const fn new(v: bool) -> Self { + Self { v: UnsafeCell::new(v as u8) } + } + + #[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))] + #[inline] + pub(crate) fn is_lock_free() -> bool { + Self::is_always_lock_free() + } + #[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))] + #[inline] + pub(crate) const fn is_always_lock_free() -> bool { + true + } + + #[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))] + #[inline] + pub(crate) fn get_mut(&mut self) -> &mut bool { + // SAFETY: the mutable reference guarantees unique ownership. + unsafe { &mut *(self.v.get() as *mut bool) } + } + + #[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))] + #[inline] + pub(crate) fn into_inner(self) -> bool { + self.v.into_inner() != 0 + } + + #[inline] + pub(crate) fn load(&self, order: Ordering) -> bool { + assert_load_ordering(order); + // SAFETY: any data races are prevented by atomic intrinsics and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { u8::atomic_load(self.v.get(), order) != 0 } + } + + #[inline] + pub(crate) fn store(&self, val: bool, order: Ordering) { + assert_store_ordering(order); + // SAFETY: any data races are prevented by atomic intrinsics and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + u8::atomic_store(self.v.get(), val as u8, order); + } + } +} + +#[repr(transparent)] +pub(crate) struct AtomicPtr { + p: UnsafeCell<*mut T>, +} + +// SAFETY: any data races are prevented by atomic operations. +unsafe impl Send for AtomicPtr {} +// SAFETY: any data races are prevented by atomic operations. +unsafe impl Sync for AtomicPtr {} + +impl AtomicPtr { + #[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))] + #[inline] + pub(crate) const fn new(p: *mut T) -> Self { + Self { p: UnsafeCell::new(p) } + } + + #[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))] + #[inline] + pub(crate) fn is_lock_free() -> bool { + Self::is_always_lock_free() + } + #[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))] + #[inline] + pub(crate) const fn is_always_lock_free() -> bool { + true + } + + #[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))] + #[inline] + pub(crate) fn get_mut(&mut self) -> &mut *mut T { + self.p.get_mut() + } + + #[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))] + #[inline] + pub(crate) fn into_inner(self) -> *mut T { + self.p.into_inner() + } + + #[inline] + pub(crate) fn load(&self, order: Ordering) -> *mut T { + assert_load_ordering(order); + // SAFETY: any data races are prevented by atomic intrinsics and the raw + // pointer passed in is valid because we got it from a reference. + // TODO: remove int to ptr cast + unsafe { usize::atomic_load(self.p.get() as *mut usize, order) as *mut T } + } + + #[inline] + pub(crate) fn store(&self, ptr: *mut T, order: Ordering) { + assert_store_ordering(order); + // SAFETY: any data races are prevented by atomic intrinsics and the raw + // pointer passed in is valid because we got it from a reference. + // TODO: remove int to ptr cast + unsafe { + usize::atomic_store(self.p.get() as *mut usize, ptr as usize, order); + } + } +} + +macro_rules! atomic_int { + ($int_type:ident, $atomic_type:ident, $asm_suffix:expr) => { + #[repr(transparent)] + pub(crate) struct $atomic_type { + v: UnsafeCell<$int_type>, + } + + // Send is implicitly implemented. + // SAFETY: any data races are prevented by atomic operations. + unsafe impl Sync for $atomic_type {} + + impl $atomic_type { + #[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))] + #[inline] + pub(crate) const fn new(v: $int_type) -> Self { + Self { v: UnsafeCell::new(v) } + } + + #[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))] + #[inline] + pub(crate) fn is_lock_free() -> bool { + Self::is_always_lock_free() + } + #[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))] + #[inline] + pub(crate) const fn is_always_lock_free() -> bool { + true + } + + #[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))] + #[inline] + pub(crate) fn get_mut(&mut self) -> &mut $int_type { + self.v.get_mut() + } + + #[cfg(any(test, not(portable_atomic_unsafe_assume_single_core)))] + #[inline] + pub(crate) fn into_inner(self) -> $int_type { + self.v.into_inner() + } + + #[inline] + pub(crate) fn load(&self, order: Ordering) -> $int_type { + assert_load_ordering(order); + // SAFETY: any data races are prevented by atomic intrinsics and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { $int_type::atomic_load(self.v.get(), order) } + } + + #[inline] + pub(crate) fn store(&self, val: $int_type, order: Ordering) { + assert_store_ordering(order); + // SAFETY: any data races are prevented by atomic intrinsics and the raw + // pointer passed in is valid because we got it from a reference. + unsafe { + $int_type::atomic_store(self.v.get(), val, order); + } + } + } + + impl AtomicLoadStore for $int_type { + #[inline] + unsafe fn atomic_load(src: *const Self, order: Ordering) -> Self { + // SAFETY: the caller must uphold the safety contract for `atomic_load`. + unsafe { + let out; + match order { + Ordering::Relaxed => { + asm!( + concat!("ldr", $asm_suffix, " {out}, [{src}]"), + src = in(reg) src, + out = lateout(reg) out, + options(nostack, readonly), + ); + } + // Acquire and SeqCst loads are equivalent. + Ordering::Acquire | Ordering::SeqCst => { + asm!( + concat!("ldr", $asm_suffix, " {out}, [{src}]"), + dmb!(), + src = in(reg) src, + out = lateout(reg) out, + options(nostack), + ); + } + _ => unreachable!("{:?}", order), + } + out + } + } + + #[inline] + unsafe fn atomic_store(dst: *mut Self, val: Self, order: Ordering) { + // SAFETY: the caller must uphold the safety contract for `atomic_store`. + unsafe { + macro_rules! atomic_store { + ($acquire:expr, $release:expr) => { + asm!( + $release, + concat!("str", $asm_suffix, " {val}, [{dst}]"), + $acquire, + dst = in(reg) dst, + val = in(reg) val, + options(nostack), + ) + }; + } + match order { + Ordering::Relaxed => atomic_store!("", ""), + Ordering::Release => atomic_store!("", dmb!()), + Ordering::SeqCst => atomic_store!(dmb!(), dmb!()), + _ => unreachable!("{:?}", order), + } + } + } + } + } +} + +atomic_int!(i8, AtomicI8, "b"); +atomic_int!(u8, AtomicU8, "b"); +atomic_int!(i16, AtomicI16, "h"); +atomic_int!(u16, AtomicU16, "h"); +atomic_int!(i32, AtomicI32, ""); +atomic_int!(u32, AtomicU32, ""); +atomic_int!(isize, AtomicIsize, ""); +atomic_int!(usize, AtomicUsize, ""); + +trait AtomicLoadStore: Sized { + unsafe fn atomic_load(src: *const Self, order: Ordering) -> Self; + unsafe fn atomic_store(dst: *mut Self, val: Self, order: Ordering); +} + +#[cfg(test)] +mod tests { + use super::*; + + test_atomic_bool_load_store!(); + test_atomic_ptr_load_store!(); + test_atomic_int_load_store!(i8); + test_atomic_int_load_store!(u8); + test_atomic_int_load_store!(i16); + test_atomic_int_load_store!(u16); + test_atomic_int_load_store!(i32); + test_atomic_int_load_store!(u32); + test_atomic_int_load_store!(isize); + test_atomic_int_load_store!(usize); +} diff --git a/src/imp/interrupt/mod.rs b/src/imp/interrupt/mod.rs index 0edc62f6..0daebaad 100644 --- a/src/imp/interrupt/mod.rs +++ b/src/imp/interrupt/mod.rs @@ -16,12 +16,12 @@ // CAS together with atomic load/store. The load/store will not be // called while interrupts are disabled, and since the load/store is // atomic, it is not affected by interrupts even if interrupts are enabled. +#[cfg(portable_atomic_armv6m)] +use super::arm as atomic; #[cfg(target_arch = "msp430")] use super::msp430 as atomic; #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] use super::riscv as atomic; -#[cfg(target_arch = "arm")] -use core::sync::atomic; #[cfg_attr(portable_atomic_armv6m, path = "armv6m.rs")] #[cfg_attr(target_arch = "avr", path = "avr.rs")] diff --git a/src/imp/mod.rs b/src/imp/mod.rs index 284337ec..850e4b66 100644 --- a/src/imp/mod.rs +++ b/src/imp/mod.rs @@ -39,6 +39,14 @@ mod s390x; #[cfg(target_arch = "msp430")] mod msp430; +#[cfg(any(not(portable_atomic_no_asm), portable_atomic_nightly))] +#[cfg(portable_atomic_armv6m)] +mod arm; +#[cfg(not(any(not(portable_atomic_no_asm), portable_atomic_nightly)))] +#[cfg(portable_atomic_armv6m)] +#[path = "core_atomic.rs"] +mod arm; + #[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(any(test, portable_atomic_no_atomic_cas)))] #[cfg_attr( not(portable_atomic_no_cfg_target_has_atomic), @@ -124,11 +132,19 @@ mod interrupt; pub(crate) use self::core_atomic::{ AtomicBool, AtomicI16, AtomicI8, AtomicIsize, AtomicPtr, AtomicU16, AtomicU8, AtomicUsize, }; +// armv6m +#[cfg(not(portable_atomic_unsafe_assume_single_core))] +#[cfg(portable_atomic_armv6m)] +pub(crate) use self::arm::{ + AtomicBool, AtomicI16, AtomicI8, AtomicIsize, AtomicPtr, AtomicU16, AtomicU8, AtomicUsize, +}; +// msp430 #[cfg(not(portable_atomic_unsafe_assume_single_core))] #[cfg(target_arch = "msp430")] pub(crate) use self::msp430::{ AtomicBool, AtomicI16, AtomicI8, AtomicIsize, AtomicPtr, AtomicU16, AtomicU8, AtomicUsize, }; +// riscv32 without A-extension #[cfg(not(portable_atomic_unsafe_assume_single_core))] #[cfg(target_arch = "riscv32")] #[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(portable_atomic_no_atomic_cas))] @@ -173,6 +189,11 @@ pub(crate) use self::interrupt::{ )) )] pub(crate) use self::core_atomic::{AtomicI32, AtomicU32}; +// armv6m +#[cfg(not(portable_atomic_unsafe_assume_single_core))] +#[cfg(portable_atomic_armv6m)] +pub(crate) use self::arm::{AtomicI32, AtomicU32}; +// riscv32 without A-extension #[cfg(not(portable_atomic_unsafe_assume_single_core))] #[cfg(target_arch = "riscv32")] #[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(portable_atomic_no_atomic_cas))] diff --git a/src/lib.rs b/src/lib.rs index b0fb8c26..23074c01 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -5,7 +5,7 @@ Portable atomic types including support for 128-bit atomics, atomic float, etc. - Provide `AtomicI128` and `AtomicU128`. - Provide `AtomicF32` and `AtomicF64`. (optional) -- Provide atomic load/store for targets where atomic is not available at all in the standard library. (riscv without A-extension, msp430, avr) +- Provide atomic load/store for targets where atomic is not available at all in the standard library. (thumbv6m, riscv without A-extension, msp430, avr) - Provide atomic CAS for targets where atomic CAS is not available in the standard library. (thumbv6m, riscv without A-extension, msp430, avr) (optional, [single-core only](#optional-cfg)) ## 128-bit atomics support diff --git a/tools/no_atomic.sh b/tools/no_atomic.sh index bd7b894b..a9a5d168 100755 --- a/tools/no_atomic.sh +++ b/tools/no_atomic.sh @@ -1,4 +1,5 @@ #!/bin/bash +# shellcheck disable=SC2207 set -euo pipefail IFS=$'\n\t' cd "$(dirname "$0")"/.. @@ -12,7 +13,9 @@ file="no_atomic.rs" no_atomic_cas=() no_atomic_64=() -no_atomic=() +no_atomic=( + "thumbv6m-none-eabi" # https://github.com/rust-lang/rust/pull/99595 +) for target in $(rustc --print target-list); do target_spec=$(rustc --print target-spec-json -Z unstable-options --target "${target}") res=$(jq <<<"${target_spec}" -r 'select(."atomic-cas" == false)') @@ -37,6 +40,16 @@ for target in $(rustc --print target-list); do esac done +# sort and dedup +IFS=$'\n' +no_atomic_cas=($(LC_ALL=C sort <<<"${no_atomic_cas[*]}")) # +no_atomic_cas=($(uniq <<<"${no_atomic_cas[*]}")) +no_atomic_64=($(LC_ALL=C sort <<<"${no_atomic_64[*]}")) # +no_atomic_64=($(uniq <<<"${no_atomic_64[*]}")) +no_atomic=($(LC_ALL=C sort <<<"${no_atomic[*]}")) # +no_atomic=($(uniq <<<"${no_atomic[*]}")) +IFS=$'\n\t' + cat >"${file}" <