From ddda491913cf819f6fb23fbe4016fd15438e6234 Mon Sep 17 00:00:00 2001 From: Jon Lange Date: Fri, 10 Jan 2025 15:50:24 -0800 Subject: [PATCH 1/5] utils: remove `ImmutAfterInitCell` reinitialization support The reinitialization behavior of `ImmutAfterInitCell` is not safe because it permits safe code to reinitialize a cell while another thread may be borrowing the contents of the cell. The most effective way to avoid this conflict is to remove the support for reinitialization. This also means that there is no need to support construction of an `ImmutAfterInitCell` with initialized contents. Signed-off-by: Jon Lange --- kernel/src/console.rs | 13 ++++--- kernel/src/mm/pagetable.rs | 33 +++++++---------- kernel/src/stage2.rs | 11 +++--- kernel/src/svsm.rs | 2 +- kernel/src/utils/immut_after_init.rs | 53 ++-------------------------- 5 files changed, 31 insertions(+), 81 deletions(-) diff --git a/kernel/src/console.rs b/kernel/src/console.rs index 701cdbe36..5c6ef3f22 100644 --- a/kernel/src/console.rs +++ b/kernel/src/console.rs @@ -10,6 +10,7 @@ use crate::locking::SpinLock; use crate::serial::{SerialPort, Terminal, DEFAULT_SERIAL_PORT}; use crate::utils::immut_after_init::{ImmutAfterInitCell, ImmutAfterInitResult}; use core::fmt; +use core::sync::atomic::{AtomicBool, Ordering}; use release::COCONUT_VERSION; #[derive(Clone, Copy, Debug)] @@ -35,12 +36,16 @@ impl Console { static WRITER: SpinLock = SpinLock::new(Console { writer: &DEFAULT_SERIAL_PORT, }); -static CONSOLE_INITIALIZED: ImmutAfterInitCell = ImmutAfterInitCell::new(false); + +// CONSOLE_INITIALIZED is only ever written during the single-proc phase of +// boot, so it can safely be written with relaxed ordering. FOr the same +// reason, it can always safely be read with relaxed ordering. +static CONSOLE_INITIALIZED: AtomicBool = AtomicBool::new(false); static CONSOLE_SERIAL: ImmutAfterInitCell> = ImmutAfterInitCell::uninit(); fn init_console(writer: &'static dyn Terminal) -> ImmutAfterInitResult<()> { WRITER.lock().writer = writer; - CONSOLE_INITIALIZED.reinit(&true)?; + CONSOLE_INITIALIZED.store(true, Ordering::Relaxed); log::info!( "COCONUT Secure Virtual Machine Service Module Version {}", COCONUT_VERSION @@ -59,7 +64,7 @@ pub fn init_svsm_console(writer: &'static dyn IOPort, port: u16) -> Result<(), S #[doc(hidden)] pub fn _print(args: fmt::Arguments<'_>) { use core::fmt::Write; - if !*CONSOLE_INITIALIZED { + if !CONSOLE_INITIALIZED.load(Ordering::Relaxed) { return; } WRITER.lock().write_fmt(args).unwrap(); @@ -71,7 +76,7 @@ pub fn _print(args: fmt::Arguments<'_>) { /// /// * `buffer`: u8 slice with bytes to write. pub fn console_write(buffer: &[u8]) { - if !*CONSOLE_INITIALIZED { + if !CONSOLE_INITIALIZED.load(Ordering::Relaxed) { return; } WRITER.lock().write_bytes(buffer); diff --git a/kernel/src/mm/pagetable.rs b/kernel/src/mm/pagetable.rs index d9d01f755..0563c5e29 100644 --- a/kernel/src/mm/pagetable.rs +++ b/kernel/src/mm/pagetable.rs @@ -32,10 +32,10 @@ use alloc::boxed::Box; const ENTRY_COUNT: usize = 512; /// Mask for private page table entry. -static PRIVATE_PTE_MASK: ImmutAfterInitCell = ImmutAfterInitCell::new(0); +static PRIVATE_PTE_MASK: ImmutAfterInitCell = ImmutAfterInitCell::uninit(); /// Mask for shared page table entry. -static SHARED_PTE_MASK: ImmutAfterInitCell = ImmutAfterInitCell::new(0); +static SHARED_PTE_MASK: ImmutAfterInitCell = ImmutAfterInitCell::uninit(); /// Maximum physical address supported by the system. static MAX_PHYS_ADDR: ImmutAfterInitCell = ImmutAfterInitCell::uninit(); @@ -47,32 +47,25 @@ static PHYS_ADDR_SIZE: ImmutAfterInitCell = ImmutAfterInitCell::uninit(); pub const LAUNCH_VMSA_ADDR: PhysAddr = PhysAddr::new(0xFFFFFFFFF000); /// Feature mask for page table entry flags. -static FEATURE_MASK: ImmutAfterInitCell = - ImmutAfterInitCell::new(PTEntryFlags::empty()); - -/// Re-initializes early paging settings. -pub fn paging_init_early(platform: &dyn SvsmPlatform) -> ImmutAfterInitResult<()> { - init_encrypt_mask(platform)?; - - let mut feature_mask = PTEntryFlags::all(); - feature_mask.remove(PTEntryFlags::GLOBAL); - FEATURE_MASK.reinit(&feature_mask) -} +static FEATURE_MASK: ImmutAfterInitCell = ImmutAfterInitCell::uninit(); /// Initializes paging settings. -pub fn paging_init(platform: &dyn SvsmPlatform) -> ImmutAfterInitResult<()> { +pub fn paging_init(platform: &dyn SvsmPlatform, suppress_global: bool) -> ImmutAfterInitResult<()> { init_encrypt_mask(platform)?; - let feature_mask = PTEntryFlags::all(); - FEATURE_MASK.reinit(&feature_mask) + let mut feature_mask = PTEntryFlags::all(); + if suppress_global { + feature_mask.remove(PTEntryFlags::GLOBAL); + } + FEATURE_MASK.init(&feature_mask) } /// Initializes the encrypt mask. fn init_encrypt_mask(platform: &dyn SvsmPlatform) -> ImmutAfterInitResult<()> { let masks = platform.get_page_encryption_masks(); - PRIVATE_PTE_MASK.reinit(&masks.private_pte_mask)?; - SHARED_PTE_MASK.reinit(&masks.shared_pte_mask)?; + PRIVATE_PTE_MASK.init(&masks.private_pte_mask)?; + SHARED_PTE_MASK.init(&masks.shared_pte_mask)?; let guest_phys_addr_size = (masks.phys_addr_sizes >> 16) & 0xff; let host_phys_addr_size = masks.phys_addr_sizes & 0xff; @@ -85,7 +78,7 @@ fn init_encrypt_mask(platform: &dyn SvsmPlatform) -> ImmutAfterInitResult<()> { guest_phys_addr_size }; - PHYS_ADDR_SIZE.reinit(&phys_addr_size)?; + PHYS_ADDR_SIZE.init(&phys_addr_size)?; // If the C-bit is a physical address bit however, the guest physical // address space is effectively reduced by 1 bit. @@ -93,7 +86,7 @@ fn init_encrypt_mask(platform: &dyn SvsmPlatform) -> ImmutAfterInitResult<()> { let effective_phys_addr_size = cmp::min(masks.addr_mask_width, phys_addr_size); let max_addr = 1 << effective_phys_addr_size; - MAX_PHYS_ADDR.reinit(&max_addr) + MAX_PHYS_ADDR.init(&max_addr) } /// Returns the private encrypt mask value. diff --git a/kernel/src/stage2.rs b/kernel/src/stage2.rs index ded546971..13cca733b 100755 --- a/kernel/src/stage2.rs +++ b/kernel/src/stage2.rs @@ -28,7 +28,7 @@ use svsm::error::SvsmError; use svsm::fw_cfg::FwCfg; use svsm::igvm_params::IgvmParams; use svsm::mm::alloc::{memory_info, print_memory_info, root_mem_init}; -use svsm::mm::pagetable::{paging_init_early, PTEntryFlags, PageTable}; +use svsm::mm::pagetable::{paging_init, PTEntryFlags, PageTable}; use svsm::mm::validate::{ init_valid_bitmap_alloc, valid_bitmap_addr, valid_bitmap_set_valid_range, }; @@ -101,6 +101,10 @@ fn setup_env( PhysAddr::from(u64::from(STAGE2_START)), ); + let cpuid_page = unsafe { &*(launch_info.cpuid_page as *const SnpCpuidTable) }; + register_cpuid_table(cpuid_page); + paging_init(platform, true).expect("Failed to initialize early paging"); + // Use the low 640 KB of memory as the heap. let lowmem_region = MemoryRegion::new(VirtAddr::from(0u64), 640 * 1024); let heap_mapping = FixedAddressMappingRange::new( @@ -116,11 +120,6 @@ fn setup_env( .validate_virtual_page_range(lowmem_region, PageValidateOp::Validate) .expect("failed to validate low 640 KB"); - let cpuid_page = unsafe { &*(launch_info.cpuid_page as *const SnpCpuidTable) }; - - register_cpuid_table(cpuid_page); - paging_init_early(platform).expect("Failed to initialize early paging"); - // Configure the heap to exist from 64 KB to 640 KB. setup_stage2_allocator(0x10000, 0xA0000); diff --git a/kernel/src/svsm.rs b/kernel/src/svsm.rs index ced1b9d05..e74e7d98d 100755 --- a/kernel/src/svsm.rs +++ b/kernel/src/svsm.rs @@ -212,7 +212,7 @@ pub extern "C" fn svsm_start(li: &KernelLaunchInfo, vb_addr: usize) { Err(e) => panic!("error reading kernel ELF: {}", e), }; - paging_init(&*platform).expect("Failed to initialize paging"); + paging_init(&*platform, false).expect("Failed to initialize paging"); let init_pgtable = init_page_table(&launch_info, &kernel_elf).expect("Could not initialize the page table"); // SAFETY: we are initializing the state, including stack and registers diff --git a/kernel/src/utils/immut_after_init.rs b/kernel/src/utils/immut_after_init.rs index 19ab2d734..27dced8b7 100644 --- a/kernel/src/utils/immut_after_init.rs +++ b/kernel/src/utils/immut_after_init.rs @@ -56,19 +56,6 @@ pub enum ImmutAfterInitError { /// assert_eq!(*X, 123); /// } /// ``` -/// -/// Also, to support early/late initialization scenarios, a -/// `ImmutAfterInitCell`'s value may get reset after having been initialized -/// already: -/// ``` -/// # use svsm::utils::immut_after_init::ImmutAfterInitCell; -/// static X: ImmutAfterInitCell = ImmutAfterInitCell::new(0); -/// pub fn main() { -/// assert_eq!(*X, 0); -/// unsafe { X.reinit(&123) }; -/// assert_eq!(*X, 123); -/// } -/// ``` #[derive(Debug)] pub struct ImmutAfterInitCell { #[doc(hidden)] @@ -150,29 +137,6 @@ impl ImmutAfterInitCell { unsafe { self.set_inner(v) }; Ok(()) } - - /// Reinitialize an initialized `ImmutAfterInitCell` instance from a value. - /// - /// Must **not** get called while any borrow via [`Self::deref()`] or - /// [`ImmutAfterInitRef::deref()`] is alive! - /// - /// * `v` - Initialization value. - pub fn reinit(&self, v: &T) -> ImmutAfterInitResult<()> { - self.check_single_threaded()?; - unsafe { self.set_inner(v) } - Ok(()) - } - - /// Create an initialized `ImmutAfterInitCell` instance from a value. - /// - /// * `v` - Initialization value. - pub const fn new(v: T) -> Self { - Self { - data: UnsafeCell::new(MaybeUninit::new(v)), - #[cfg(debug_assertions)] - init: AtomicBool::new(true), - } - } } impl Deref for ImmutAfterInitCell { @@ -232,7 +196,9 @@ unsafe impl Sync for ImmutAfterInitCell {} /// static X : i32 = 123; /// /// fn main() { -/// init_rx(ImmutAfterInitRef::new_from_ref(&X)); +/// let local = ImmutAfterInitRef::::uninit(); +/// local.init_from_ref(&X); +/// init_rx(local); /// assert_eq!(*RX, 123); /// } /// ``` @@ -272,19 +238,6 @@ impl<'a, T: Copy> ImmutAfterInitRef<'a, T> { self.ptr.init(&(r as *const T)) } - /// Create an initialized `ImmutAfterInitRef` instance pointing to a value - /// specified by a regular reference. - /// - /// * `r` - Reference to the value to make the `ImmutAfterInitRef` to refer - /// to. By convention, the referenced value must have been - /// initialized already. - pub const fn new_from_ref(r: &'a T) -> Self { - Self { - ptr: ImmutAfterInitCell::new(r as *const T), - _phantom: PhantomData, - } - } - /// Dereference the referenced value with lifetime propagation. Must **only /// ever** get called on an initialized `ImmutAfterInitRef` instance! Moreover, /// the value referenced must have been initialized as well. From 826b79d6f0c96bdd7f9c5eca0534318104f3d249 Mon Sep 17 00:00:00 2001 From: Jon Lange Date: Fri, 10 Jan 2025 21:53:27 -0800 Subject: [PATCH 2/5] utils: always enforce `ImmutAfterInitCell` initialization Safety requires enforcement of soundness rules in all configurations, not just those that enable debug assertions. The initialization guards of `ImmutAfterInitCell` should therefore not rely on debug assertions. In addition, they should be enforced using atomic initialization guards to ensure that initialization is safe with respect to possible preemption by interrupt or exception handlers. By making initialization fully `Sync` compliant, there is no need to require initialization to complete prior to multi-processor startup. Signed-off-by: Jon Lange --- kernel/src/cpu/smp.rs | 2 - kernel/src/utils/immut_after_init.rs | 118 ++++++++++----------------- 2 files changed, 44 insertions(+), 76 deletions(-) diff --git a/kernel/src/cpu/smp.rs b/kernel/src/cpu/smp.rs index 89f47c8da..c1d8e4281 100644 --- a/kernel/src/cpu/smp.rs +++ b/kernel/src/cpu/smp.rs @@ -18,7 +18,6 @@ use crate::hyperv; use crate::platform::{SvsmPlatform, SVSM_PLATFORM}; use crate::requests::{request_loop, request_processing_main}; use crate::task::{schedule_init, start_kernel_task}; -use crate::utils::immut_after_init::immut_after_init_set_multithreaded; use alloc::string::String; use bootlib::kernel_launch::ApStartContext; @@ -40,7 +39,6 @@ fn start_cpu(platform: &dyn SvsmPlatform, apic_id: u32) -> Result<(), SvsmError> } pub fn start_secondary_cpus(platform: &dyn SvsmPlatform, cpus: &[ACPICPUInfo]) { - immut_after_init_set_multithreaded(); let mut count: usize = 0; for c in cpus.iter().filter(|c| c.apic_id != 0 && c.enabled) { log::info!("Launching AP with APIC-ID {}", c.apic_id); diff --git a/kernel/src/utils/immut_after_init.rs b/kernel/src/utils/immut_after_init.rs index 27dced8b7..b69f277f2 100644 --- a/kernel/src/utils/immut_after_init.rs +++ b/kernel/src/utils/immut_after_init.rs @@ -8,24 +8,14 @@ use core::cell::UnsafeCell; use core::marker::PhantomData; use core::mem::MaybeUninit; use core::ops::Deref; -#[cfg(debug_assertions)] -use core::sync::atomic::{AtomicBool, Ordering}; +use core::sync::atomic::{AtomicU8, Ordering}; -#[cfg(not(debug_assertions))] -pub type ImmutAfterInitResult = Result; - -#[cfg(debug_assertions)] pub type ImmutAfterInitResult = Result; -#[cfg(debug_assertions)] -static MULTI_THREADED: AtomicBool = AtomicBool::new(false); - -#[cfg(debug_assertions)] #[derive(Clone, Copy, Debug)] pub enum ImmutAfterInitError { AlreadyInit, Uninitialized, - NotSingleThreaded, } /// A memory location which is effectively immutable after initalization code @@ -60,81 +50,71 @@ pub enum ImmutAfterInitError { pub struct ImmutAfterInitCell { #[doc(hidden)] data: UnsafeCell>, - // Used to keep track of the initialization state. Even though this - // is atomic, the data structure does not guarantee thread safety. - #[cfg(debug_assertions)] - init: AtomicBool, + #[doc(hidden)] + init: AtomicU8, } +const IMMUT_UNINIT: u8 = 0; +const IMMUT_INIT_IN_PROGRESS: u8 = 1; +const IMMUT_INITIALIZED: u8 = 2; + impl ImmutAfterInitCell { /// Create an unitialized `ImmutAfterInitCell` instance. The value must get /// initialized by means of [`Self::init()`] before first usage. pub const fn uninit() -> Self { Self { data: UnsafeCell::new(MaybeUninit::uninit()), - #[cfg(debug_assertions)] - init: AtomicBool::new(false), + init: AtomicU8::new(IMMUT_UNINIT), } } - fn set_init(&self) { - #[cfg(debug_assertions)] - self.init.store(true, Ordering::Relaxed); - } - fn check_init(&self) -> ImmutAfterInitResult<()> { - #[cfg(debug_assertions)] - if !self.init.load(Ordering::Relaxed) { - return Err(ImmutAfterInitError::Uninitialized); - } - Ok(()) - } - - fn check_uninit(&self) -> ImmutAfterInitResult<()> { - #[cfg(debug_assertions)] - if self.init.load(Ordering::Relaxed) { - return Err(ImmutAfterInitError::AlreadyInit); + if self.init.load(Ordering::Acquire) == IMMUT_INITIALIZED { + Ok(()) + } else { + Err(ImmutAfterInitError::Uninitialized) } - Ok(()) } - fn check_single_threaded(&self) -> ImmutAfterInitResult<()> { - #[cfg(debug_assertions)] - if MULTI_THREADED.load(Ordering::Relaxed) { - return Err(ImmutAfterInitError::NotSingleThreaded); - } + fn try_init(&self) -> ImmutAfterInitResult<()> { + self.init + .compare_exchange( + IMMUT_UNINIT, + IMMUT_INIT_IN_PROGRESS, + Ordering::Acquire, + Ordering::Relaxed, + ) + .map_err(|_| ImmutAfterInitError::AlreadyInit)?; Ok(()) } - // The caller must check the initialization status to avoid double init bugs - unsafe fn set_inner(&self, v: &T) { - self.set_init(); - unsafe { - (*self.data.get()) - .as_mut_ptr() - .copy_from_nonoverlapping(v, 1) - } - } - - // The caller must ensure that the cell is initialized - unsafe fn get_inner(&self) -> &T { - unsafe { (*self.data.get()).assume_init_ref() } + /// # Safety + /// The caller must ensure that the cell is in the init-in-progress phase + /// and that the contents of the cell have been populated. + unsafe fn complete_init(&self) { + self.init.store(IMMUT_INITIALIZED, Ordering::Release); } fn try_get_inner(&self) -> ImmutAfterInitResult<&T> { self.check_init()?; - unsafe { Ok(self.get_inner()) } + let r = unsafe { (*self.data.get()).assume_init_ref() }; + Ok(r) } /// Initialize an uninitialized `ImmutAfterInitCell` instance from a value. - /// - /// Must **not** get called on an already initialized instance! + /// Will fail if called on an initialized instance. /// /// * `v` - Initialization value. pub fn init(&self, v: &T) -> ImmutAfterInitResult<()> { - self.check_uninit()?; - self.check_single_threaded()?; - unsafe { self.set_inner(v) }; + self.try_init()?; + // SAFETY: Successful completion of `try_init` conveys the exclusive + // right to populate the contents of the cell. + unsafe { + (*self.data.get()) + .as_mut_ptr() + .copy_from_nonoverlapping(v, 1); + self.complete_init(); + } Ok(()) } } @@ -142,8 +122,8 @@ impl ImmutAfterInitCell { impl Deref for ImmutAfterInitCell { type Target = T; - /// Dereference the wrapped value. Must **only ever** get called on an - /// initialized instance! + /// Dereference the wrapped value. Will panic if called on an + /// uninitialized instance. fn deref(&self) -> &T { self.try_get_inner().unwrap() } @@ -223,10 +203,8 @@ impl<'a, T: Copy> ImmutAfterInitRef<'a, T> { } /// Initialize an uninitialized `ImmutAfterInitRef` instance to point to value - /// specified by a regular reference. - /// - /// Must **not** get called on an already initialized `ImmutAfterInitRef` - /// instance! + /// specified by a regular reference. Will fail if called on an + /// initialized instance. /// /// * `r` - Reference to the value to make the `ImmutAfterInitRef` to refer /// to. By convention, the referenced value must have been @@ -238,15 +216,12 @@ impl<'a, T: Copy> ImmutAfterInitRef<'a, T> { self.ptr.init(&(r as *const T)) } - /// Dereference the referenced value with lifetime propagation. Must **only - /// ever** get called on an initialized `ImmutAfterInitRef` instance! Moreover, - /// the value referenced must have been initialized as well. + /// Dereference the referenced value with lifetime propagation. Will panic + /// if called on an uninitialized instance. pub fn get(&self) -> &'a T { unsafe { &**self.ptr } } -} -impl<'a, T: Copy> ImmutAfterInitRef<'a, T> { /// Initialize an uninitialized `ImmutAfterInitRef` instance to point to /// value wrapped in a [`ImmutAfterInitCell`]. /// @@ -277,8 +252,3 @@ impl Deref for ImmutAfterInitRef<'_, T> { unsafe impl Send for ImmutAfterInitRef<'_, T> {} unsafe impl Sync for ImmutAfterInitRef<'_, T> {} - -pub fn immut_after_init_set_multithreaded() { - #[cfg(debug_assertions)] - MULTI_THREADED.store(true, Ordering::Relaxed); -} From 3eca977546b1bd2a88dde73ee63386263b0086bf Mon Sep 17 00:00:00 2001 From: Jon Lange Date: Tue, 21 Jan 2025 17:00:28 -0800 Subject: [PATCH 3/5] utils: expose `ImmutAfterInitCell::try_get_inner()` The `try_get_inner()` function is used to obtain the contents of an `ImmutAfterInitCell` (or to return an error). It should be possible for a caller to obtain a `Result` instead of being forced to derefence and panic if the cell is not initialized. Signed-off-by: Jon Lange --- kernel/src/utils/immut_after_init.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/src/utils/immut_after_init.rs b/kernel/src/utils/immut_after_init.rs index b69f277f2..75e4d95b8 100644 --- a/kernel/src/utils/immut_after_init.rs +++ b/kernel/src/utils/immut_after_init.rs @@ -95,7 +95,9 @@ impl ImmutAfterInitCell { self.init.store(IMMUT_INITIALIZED, Ordering::Release); } - fn try_get_inner(&self) -> ImmutAfterInitResult<&T> { + /// Obtains the inner value of the cell, returning `Ok(T)` if the cell is + /// initialized or `Err(ImmutAfterInitError)` if not. + pub fn try_get_inner(&self) -> ImmutAfterInitResult<&T> { self.check_init()?; let r = unsafe { (*self.data.get()).assume_init_ref() }; Ok(r) From 84b7925652cf7b139d783b4d9507678a1914f927 Mon Sep 17 00:00:00 2001 From: Jon Lange Date: Fri, 10 Jan 2025 22:01:50 -0800 Subject: [PATCH 4/5] utils: permit move initialization of `ImmutAfterInitCell` For maximum usefulness, it should be possible to support `ImmutAfterInitCell` for types that do not implement `Copy`. For example, some structures that require global initialization may contain atomics, which do not support `Copy`. However, it is also important not to require all initialization to be performed using moves, since a move into a cell requires a temporary copy on the stack, and some global types are especially large. Therefore `ImmutAfterInitCell` can support move-based initialization for all types, and by-reference initialization for types that support `Copy`. Signed-off-by: Jon Lange --- kernel/src/console.rs | 4 +- kernel/src/hyperv/hv.rs | 4 +- kernel/src/mm/address_space.rs | 4 +- kernel/src/mm/pagetable.rs | 10 ++--- kernel/src/platform/mod.rs | 2 +- kernel/src/platform/snp.rs | 2 +- kernel/src/platform/tdp.rs | 2 +- kernel/src/sev/msr_protocol.rs | 2 +- kernel/src/sev/status.rs | 2 +- kernel/src/svsm.rs | 6 +-- kernel/src/utils/immut_after_init.rs | 61 +++++++++++++++++++++++----- 11 files changed, 69 insertions(+), 30 deletions(-) diff --git a/kernel/src/console.rs b/kernel/src/console.rs index 5c6ef3f22..16165caea 100644 --- a/kernel/src/console.rs +++ b/kernel/src/console.rs @@ -55,7 +55,7 @@ fn init_console(writer: &'static dyn Terminal) -> ImmutAfterInitResult<()> { pub fn init_svsm_console(writer: &'static dyn IOPort, port: u16) -> Result<(), SvsmError> { CONSOLE_SERIAL - .init(&SerialPort::new(writer, port)) + .init_from_ref(&SerialPort::new(writer, port)) .map_err(|_| SvsmError::Console)?; (*CONSOLE_SERIAL).init(); init_console(&*CONSOLE_SERIAL).map_err(|_| SvsmError::Console) @@ -138,7 +138,7 @@ impl log::Log for ConsoleLogger { static CONSOLE_LOGGER: ImmutAfterInitCell = ImmutAfterInitCell::uninit(); pub fn install_console_logger(component: &'static str) -> ImmutAfterInitResult<()> { - CONSOLE_LOGGER.init(&ConsoleLogger::new(component))?; + CONSOLE_LOGGER.init_from_ref(&ConsoleLogger::new(component))?; if let Err(e) = log::set_logger(&*CONSOLE_LOGGER) { // Failed to install the ConsoleLogger, presumably because something had diff --git a/kernel/src/hyperv/hv.rs b/kernel/src/hyperv/hv.rs index 095b390ca..114e6c367 100644 --- a/kernel/src/hyperv/hv.rs +++ b/kernel/src/hyperv/hv.rs @@ -186,7 +186,7 @@ pub fn hyperv_setup_hypercalls() -> Result<(), SvsmError> { .map_4k(hypercall_va, virt_to_phys(page), PTEntryFlags::exec())?; HYPERV_HYPERCALL_CODE_PAGE - .init(&hypercall_va) + .init(hypercall_va) .expect("Hypercall code page already allocated"); // Set the guest OS ID. The value is arbitrary. @@ -202,7 +202,7 @@ pub fn hyperv_setup_hypercalls() -> Result<(), SvsmError> { let vsm_status = hyperv::HvRegisterVsmVpStatus::from(vsm_status_value); let current_vtl = vsm_status.active_vtl(); CURRENT_VTL - .init(¤t_vtl) + .init(current_vtl) .expect("Current VTL already initialized"); Ok(()) diff --git a/kernel/src/mm/address_space.rs b/kernel/src/mm/address_space.rs index 499b46dbb..037de5950 100644 --- a/kernel/src/mm/address_space.rs +++ b/kernel/src/mm/address_space.rs @@ -59,7 +59,7 @@ pub fn init_kernel_mapping_info( heap_mapping, }; FIXED_MAPPING - .init(&mapping) + .init_from_ref(&mapping) .expect("Already initialized fixed mapping info"); } @@ -273,7 +273,7 @@ mod tests { kernel_mapping, heap_mapping: None, }; - KERNEL_MAPPING_TEST.init(&mapping).unwrap(); + KERNEL_MAPPING_TEST.init_from_ref(&mapping).unwrap(); *initialized = true; } diff --git a/kernel/src/mm/pagetable.rs b/kernel/src/mm/pagetable.rs index 0563c5e29..d95cbd380 100644 --- a/kernel/src/mm/pagetable.rs +++ b/kernel/src/mm/pagetable.rs @@ -57,15 +57,15 @@ pub fn paging_init(platform: &dyn SvsmPlatform, suppress_global: bool) -> ImmutA if suppress_global { feature_mask.remove(PTEntryFlags::GLOBAL); } - FEATURE_MASK.init(&feature_mask) + FEATURE_MASK.init(feature_mask) } /// Initializes the encrypt mask. fn init_encrypt_mask(platform: &dyn SvsmPlatform) -> ImmutAfterInitResult<()> { let masks = platform.get_page_encryption_masks(); - PRIVATE_PTE_MASK.init(&masks.private_pte_mask)?; - SHARED_PTE_MASK.init(&masks.shared_pte_mask)?; + PRIVATE_PTE_MASK.init(masks.private_pte_mask)?; + SHARED_PTE_MASK.init(masks.shared_pte_mask)?; let guest_phys_addr_size = (masks.phys_addr_sizes >> 16) & 0xff; let host_phys_addr_size = masks.phys_addr_sizes & 0xff; @@ -78,7 +78,7 @@ fn init_encrypt_mask(platform: &dyn SvsmPlatform) -> ImmutAfterInitResult<()> { guest_phys_addr_size }; - PHYS_ADDR_SIZE.init(&phys_addr_size)?; + PHYS_ADDR_SIZE.init(phys_addr_size)?; // If the C-bit is a physical address bit however, the guest physical // address space is effectively reduced by 1 bit. @@ -86,7 +86,7 @@ fn init_encrypt_mask(platform: &dyn SvsmPlatform) -> ImmutAfterInitResult<()> { let effective_phys_addr_size = cmp::min(masks.addr_mask_width, phys_addr_size); let max_addr = 1 << effective_phys_addr_size; - MAX_PHYS_ADDR.init(&max_addr) + MAX_PHYS_ADDR.init(max_addr) } /// Returns the private encrypt mask value. diff --git a/kernel/src/platform/mod.rs b/kernel/src/platform/mod.rs index 5e589a7b9..56bc3d946 100644 --- a/kernel/src/platform/mod.rs +++ b/kernel/src/platform/mod.rs @@ -225,7 +225,7 @@ impl DerefMut for SvsmPlatformCell { } pub fn init_platform_type(platform_type: SvsmPlatformType) { - SVSM_PLATFORM_TYPE.init(&platform_type).unwrap(); + SVSM_PLATFORM_TYPE.init(platform_type).unwrap(); } pub fn halt() { diff --git a/kernel/src/platform/snp.rs b/kernel/src/platform/snp.rs index ce3e64f94..9f8eec914 100644 --- a/kernel/src/platform/snp.rs +++ b/kernel/src/platform/snp.rs @@ -99,7 +99,7 @@ impl SvsmPlatform for SnpPlatform { fn env_setup(&mut self, _debug_serial_port: u16, vtom: usize) -> Result<(), SvsmError> { sev_status_init(); - VTOM.init(&vtom).map_err(|_| SvsmError::PlatformInit)?; + VTOM.init(vtom).map_err(|_| SvsmError::PlatformInit)?; Ok(()) } diff --git a/kernel/src/platform/tdp.rs b/kernel/src/platform/tdp.rs index c14710762..945f4d73e 100644 --- a/kernel/src/platform/tdp.rs +++ b/kernel/src/platform/tdp.rs @@ -48,7 +48,7 @@ impl SvsmPlatform for TdpPlatform { fn env_setup(&mut self, debug_serial_port: u16, vtom: usize) -> Result<(), SvsmError> { assert_ne!(vtom, 0); - VTOM.init(&vtom).map_err(|_| SvsmError::PlatformInit)?; + VTOM.init(vtom).map_err(|_| SvsmError::PlatformInit)?; // Serial console device can be initialized immediately init_svsm_console(&GHCI_IO_DRIVER, debug_serial_port) } diff --git a/kernel/src/sev/msr_protocol.rs b/kernel/src/sev/msr_protocol.rs index daa8fbc2f..3205a2c06 100644 --- a/kernel/src/sev/msr_protocol.rs +++ b/kernel/src/sev/msr_protocol.rs @@ -130,7 +130,7 @@ pub fn init_hypervisor_ghcb_features() -> Result<(), GhcbMsrError> { } GHCB_HV_FEATURES - .init(&features) + .init(features) .expect("Already initialized GHCB HV features"); Ok(()) } else { diff --git a/kernel/src/sev/status.rs b/kernel/src/sev/status.rs index 1af139a8c..7980708c3 100644 --- a/kernel/src/sev/status.rs +++ b/kernel/src/sev/status.rs @@ -142,7 +142,7 @@ pub fn sev_flags() -> SEVStatusFlags { pub fn sev_status_init() { let status: SEVStatusFlags = read_sev_status(); SEV_FLAGS - .init(&status) + .init(status) .expect("Already initialized SEV flags"); } diff --git a/kernel/src/svsm.rs b/kernel/src/svsm.rs index e74e7d98d..fe5464fb2 100755 --- a/kernel/src/svsm.rs +++ b/kernel/src/svsm.rs @@ -145,7 +145,7 @@ fn init_cpuid_table(addr: VirtAddr) { } CPUID_PAGE - .init(table) + .init_from_ref(table) .expect("Already initialized CPUID page"); register_cpuid_table(&CPUID_PAGE); } @@ -171,7 +171,7 @@ pub extern "C" fn svsm_start(li: &KernelLaunchInfo, vb_addr: usize) { let debug_serial_port = li.debug_serial_port; LAUNCH_INFO - .init(li) + .init_from_ref(li) .expect("Already initialized launch info"); let mut platform = SvsmPlatformCell::new(li.platform_type, li.suppress_svsm_interrupts); @@ -258,7 +258,7 @@ pub extern "C" fn svsm_start(li: &KernelLaunchInfo, vb_addr: usize) { .expect("Alternate injection required but not available"); SVSM_PLATFORM - .init(&platform) + .init(platform) .expect("Failed to initialize SVSM platform object"); sse_init(); diff --git a/kernel/src/utils/immut_after_init.rs b/kernel/src/utils/immut_after_init.rs index 75e4d95b8..1936d82b7 100644 --- a/kernel/src/utils/immut_after_init.rs +++ b/kernel/src/utils/immut_after_init.rs @@ -42,12 +42,12 @@ pub enum ImmutAfterInitError { /// # use svsm::utils::immut_after_init::ImmutAfterInitCell; /// static X: ImmutAfterInitCell = ImmutAfterInitCell::uninit(); /// pub fn main() { -/// unsafe { X.init(&123) }; +/// unsafe { X.init_from_ref(&123) }; /// assert_eq!(*X, 123); /// } /// ``` #[derive(Debug)] -pub struct ImmutAfterInitCell { +pub struct ImmutAfterInitCell { #[doc(hidden)] data: UnsafeCell>, #[doc(hidden)] @@ -58,7 +58,7 @@ const IMMUT_UNINIT: u8 = 0; const IMMUT_INIT_IN_PROGRESS: u8 = 1; const IMMUT_INITIALIZED: u8 = 2; -impl ImmutAfterInitCell { +impl ImmutAfterInitCell { /// Create an unitialized `ImmutAfterInitCell` instance. The value must get /// initialized by means of [`Self::init()`] before first usage. pub const fn uninit() -> Self { @@ -107,21 +107,41 @@ impl ImmutAfterInitCell { /// Will fail if called on an initialized instance. /// /// * `v` - Initialization value. - pub fn init(&self, v: &T) -> ImmutAfterInitResult<()> { + pub fn init(&self, v: T) -> ImmutAfterInitResult<()> { + self.try_init()?; + // SAFETY: Successful completion of `try_init` conveys the exclusive + // right to populate the contents of the cell. + unsafe { + let data = &mut *self.data.get(); + data.write(v); + self.complete_init(); + } + Ok(()) + } + + /// Initialize an uninitialized `ImmutAfterInitCell` instance from a + /// reference. + /// Will fail if called on an initialized instance. + /// + /// * `v` - Initialization reference. + pub fn init_from_ref(&self, r: &T) -> ImmutAfterInitResult<()> + where + T: Copy, + { self.try_init()?; // SAFETY: Successful completion of `try_init` conveys the exclusive // right to populate the contents of the cell. unsafe { (*self.data.get()) .as_mut_ptr() - .copy_from_nonoverlapping(v, 1); + .copy_from_nonoverlapping(r, 1); self.complete_init(); } Ok(()) } } -impl Deref for ImmutAfterInitCell { +impl Deref for ImmutAfterInitCell { type Target = T; /// Dereference the wrapped value. Will panic if called on an @@ -131,8 +151,27 @@ impl Deref for ImmutAfterInitCell { } } -unsafe impl Send for ImmutAfterInitCell {} -unsafe impl Sync for ImmutAfterInitCell {} +impl Drop for ImmutAfterInitCell { + fn drop(&mut self) { + // Dropping is only required if the cell has been initialized. + if self.init.load(Ordering::Relaxed) == IMMUT_INITIALIZED { + // SAFETY: the initialization check ensures that the cell holds + // initialized data. + unsafe { + let cell = &mut *self.data.get(); + // This drop will never occur for a cell that was initialized + // from a reference, because initialization from a reference + // requires `Copy` and types that implement `Copy` do not + // implement `Drop`, and thus this will have no effect for such + // types. + cell.assume_init_drop(); + } + } + } +} + +unsafe impl Send for ImmutAfterInitCell {} +unsafe impl Sync for ImmutAfterInitCell {} /// A reference to a memory location which is effectively immutable after /// initalization code has run. @@ -149,7 +188,7 @@ unsafe impl Sync for ImmutAfterInitCell {} /// static X: ImmutAfterInitCell = ImmutAfterInitCell::uninit(); /// static RX: ImmutAfterInitRef<'_, i32> = ImmutAfterInitRef::uninit(); /// fn main() { -/// unsafe { X.init(&123) }; +/// unsafe { X.init_from_ref(&123) }; /// unsafe { RX.init_from_cell(&X) }; /// assert_eq!(*RX, 123); /// } @@ -215,7 +254,7 @@ impl<'a, T: Copy> ImmutAfterInitRef<'a, T> { where 'b: 'a, { - self.ptr.init(&(r as *const T)) + self.ptr.init(r as *const T) } /// Dereference the referenced value with lifetime propagation. Will panic @@ -236,7 +275,7 @@ impl<'a, T: Copy> ImmutAfterInitRef<'a, T> { where 'b: 'a, { - self.ptr.init(&(cell.try_get_inner()? as *const T)) + self.ptr.init(cell.try_get_inner()? as *const T) } } From 339f388aa5ebed1b105b74652bface05ba24d393 Mon Sep 17 00:00:00 2001 From: Jon Lange Date: Tue, 21 Jan 2025 13:35:01 -0800 Subject: [PATCH 5/5] utils: add unit tests for `ImmutAfterInitCell` Add unit tests to validate the behavior of `ImmutAfterInitCell`. Signed-off-by: Jon Lange --- kernel/src/utils/immut_after_init.rs | 59 +++++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 1 deletion(-) diff --git a/kernel/src/utils/immut_after_init.rs b/kernel/src/utils/immut_after_init.rs index 1936d82b7..c7c6c3fb6 100644 --- a/kernel/src/utils/immut_after_init.rs +++ b/kernel/src/utils/immut_after_init.rs @@ -12,7 +12,7 @@ use core::sync::atomic::{AtomicU8, Ordering}; pub type ImmutAfterInitResult = Result; -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, PartialEq)] pub enum ImmutAfterInitError { AlreadyInit, Uninitialized, @@ -293,3 +293,60 @@ impl Deref for ImmutAfterInitRef<'_, T> { unsafe impl Send for ImmutAfterInitRef<'_, T> {} unsafe impl Sync for ImmutAfterInitRef<'_, T> {} + +#[cfg(test)] +mod tests { + + use crate::utils::immut_after_init::*; + use core::sync::atomic::{AtomicU32, Ordering}; + + #[test] + fn test_with_move() { + let v = AtomicU32::new(5); + let immut = ImmutAfterInitCell::::uninit(); + match immut.try_get_inner() { + Ok(_) => panic!("uninitialized cell returned Ok()"), + Err(e) => assert_eq!(e, ImmutAfterInitError::Uninitialized), + } + let init = immut.init(v); + if init.is_err() { + panic!("initializing uninitialized cell returned {:?}", init); + } + match immut.init(AtomicU32::new(0)) { + Ok(_) => panic!("reinitializing initialized cell returned Ok()"), + Err(e) => assert_eq!(e, ImmutAfterInitError::AlreadyInit), + } + + assert_eq!(immut.load(Ordering::Relaxed), 5); + } + + #[test] + fn test_with_copy() { + let v: u32 = 5; + let immut = ImmutAfterInitCell::::uninit(); + immut.init_from_ref(&v).expect("init failed"); + assert_eq!(*immut, 5); + } + + struct ItemWithDrop<'a> { + pub drop_count: &'a mut u32, + } + + impl Drop for ItemWithDrop<'_> { + fn drop(&mut self) { + *self.drop_count += 1; + } + } + + #[test] + fn test_with_drop() { + let mut local_drop_count: u32 = 0; + let item = ItemWithDrop { + drop_count: &mut local_drop_count, + }; + let immut = ImmutAfterInitCell::>::uninit(); + immut.init(item).expect("init failed"); + drop(immut); + assert_eq!(local_drop_count, 1); + } +}