From c6ae2c42d63b5ca5a78ce3e4e89b07073d671f93 Mon Sep 17 00:00:00 2001 From: Jon Lange Date: Mon, 8 Jan 2024 12:02:39 -0800 Subject: [PATCH] Move shared CPU state into a separate structure In order to preserve the Rust guarantees of mutually exclusive mutable and immutable references, it must never be possible for a PerCpu object to be accessible across threads, even to access fields that are designed to be thread-safe. This change moves globally shared per-CPU fields into a separate structure that supports only immutable references, and fields contained within are synchronized using appropriate locking primitives that permit safe interior mutability. Signed-off-by: Jon Lange --- src/cpu/percpu.rs | 105 ++++++++++++++++++++++++++---------------- src/protocols/core.rs | 2 +- src/svsm.rs | 2 +- 3 files changed, 67 insertions(+), 42 deletions(-) diff --git a/src/cpu/percpu.rs b/src/cpu/percpu.rs index 25cfef1ea..2d3e86e39 100644 --- a/src/cpu/percpu.rs +++ b/src/cpu/percpu.rs @@ -31,6 +31,7 @@ use crate::types::{PAGE_SHIFT, PAGE_SHIFT_2M, PAGE_SIZE, PAGE_SIZE_2M, SVSM_TR_F use alloc::sync::Arc; use alloc::vec::Vec; use core::cell::UnsafeCell; +use core::mem::size_of; use core::ptr; use core::sync::atomic::{AtomicBool, Ordering}; use cpuarch::vmsa::{VMSASegment, VMSA}; @@ -75,14 +76,14 @@ impl PerCpuAreas { } // Fails if no such area exists or its address is NULL - pub fn get(&self, apic_id: u32) -> Option<&'static PerCpu> { + pub fn get(&self, apic_id: u32) -> Option<&'static PerCpuShared> { // For this to not produce UB the only invariant we must // uphold is that there are no mutations or mutable aliases // going on when casting via as_ref(). This only happens via // Self::push(), which is intentionally unsafe and private. let ptr = unsafe { self.areas.get().as_ref().unwrap() }; ptr.iter().find(|info| info.apic_id == apic_id).map(|info| { - let ptr = info.addr.as_ptr::(); + let ptr = info.addr.as_ptr::(); unsafe { ptr.as_ref().unwrap() } }) } @@ -174,8 +175,49 @@ impl GuestVmsaRef { } } +#[derive(Debug)] +pub struct PerCpuShared { + guest_vmsa: SpinLock, +} + +impl PerCpuShared { + fn new() -> Self { + PerCpuShared { + guest_vmsa: SpinLock::new(GuestVmsaRef::new()), + } + } + + pub fn update_guest_vmsa_caa(&self, vmsa: PhysAddr, caa: PhysAddr) { + let mut locked = self.guest_vmsa.lock(); + locked.update_vmsa_caa(Some(vmsa), Some(caa)); + } + + pub fn update_guest_vmsa(&self, vmsa: PhysAddr) { + let mut locked = self.guest_vmsa.lock(); + locked.update_vmsa(Some(vmsa)); + } + + pub fn update_guest_caa(&self, caa: PhysAddr) { + let mut locked = self.guest_vmsa.lock(); + locked.update_caa(Some(caa)); + } + + pub fn clear_guest_vmsa_if_match(&self, paddr: PhysAddr) { + let mut locked = self.guest_vmsa.lock(); + if locked.vmsa.is_none() { + return; + } + + let vmsa_phys = locked.vmsa_phys(); + if vmsa_phys.unwrap() == paddr { + locked.update_vmsa(None); + } + } +} + #[derive(Debug)] pub struct PerCpu { + pub shared: &'static PerCpuShared, online: AtomicBool, apic_id: u32, pgtbl: SpinLock, @@ -184,7 +226,6 @@ pub struct PerCpu { ist: IstStacks, tss: X86Tss, svsm_vmsa: Option, - guest_vmsa: SpinLock, reset_ip: u64, /// PerCpu Virtual Memory Range @@ -200,8 +241,9 @@ pub struct PerCpu { } impl PerCpu { - fn new(apic_id: u32) -> Self { + fn new(apic_id: u32, shared: &'static PerCpuShared) -> Self { PerCpu { + shared, online: AtomicBool::new(false), apic_id, pgtbl: SpinLock::::new(PageTableRef::unset()), @@ -210,7 +252,6 @@ impl PerCpu { ist: IstStacks::new(), tss: X86Tss::new(), svsm_vmsa: None, - guest_vmsa: SpinLock::new(GuestVmsaRef::new()), reset_ip: 0xffff_fff0u64, vm_range: VMR::new(SVSM_PERCPU_BASE, SVSM_PERCPU_END, PTEntryFlags::GLOBAL), vrange_4k: VirtualRange::new(), @@ -222,9 +263,22 @@ impl PerCpu { pub fn alloc(apic_id: u32) -> Result<*mut PerCpu, SvsmError> { let vaddr = allocate_zeroed_page()?; unsafe { + // Within each CPU state page, the first portion is the private + // mutable state and remainder is the shared state. + let private_size = size_of::(); + let shared_size = size_of::(); + if private_size + shared_size > PAGE_SIZE { + panic!("Per-CPU data is larger than one page!"); + } + + let shared_vaddr = vaddr + private_size; + let percpu_shared = shared_vaddr.as_mut_ptr::(); + (*percpu_shared) = PerCpuShared::new(); + let percpu = vaddr.as_mut_ptr::(); - (*percpu) = PerCpu::new(apic_id); - PERCPU_AREAS.push(PerCpuInfo::new(apic_id, vaddr)); + (*percpu) = PerCpu::new(apic_id, &*percpu_shared); + + PERCPU_AREAS.push(PerCpuInfo::new(apic_id, shared_vaddr)); Ok(percpu) } } @@ -442,39 +496,12 @@ impl PerCpu { Ok(()) } - pub fn clear_guest_vmsa_if_match(&self, paddr: PhysAddr) { - let mut locked = self.guest_vmsa.lock(); - if locked.vmsa.is_none() { - return; - } - - let vmsa_phys = locked.vmsa_phys(); - if vmsa_phys.unwrap() == paddr { - locked.update_vmsa(None); - } - } - - pub fn update_guest_vmsa_caa(&self, vmsa: PhysAddr, caa: PhysAddr) { - let mut locked = self.guest_vmsa.lock(); - locked.update_vmsa_caa(Some(vmsa), Some(caa)); - } - - pub fn update_guest_vmsa(&self, vmsa: PhysAddr) { - let mut locked = self.guest_vmsa.lock(); - locked.update_vmsa(Some(vmsa)); - } - - pub fn update_guest_caa(&self, caa: PhysAddr) { - let mut locked = self.guest_vmsa.lock(); - locked.update_caa(Some(caa)); - } - pub fn guest_vmsa_ref(&self) -> LockGuard { - self.guest_vmsa.lock() + self.shared.guest_vmsa.lock() } pub fn guest_vmsa(&mut self) -> &mut VMSA { - let locked = self.guest_vmsa.lock(); + let locked = self.shared.guest_vmsa.lock(); assert!(locked.vmsa_phys().is_some()); @@ -488,7 +515,7 @@ impl PerCpu { let vmsa = vmsa_mut_ref_from_vaddr(vaddr); init_guest_vmsa(vmsa, self.reset_ip); - self.update_guest_vmsa(paddr); + self.shared.update_guest_vmsa(paddr); Ok(()) } @@ -508,7 +535,7 @@ impl PerCpu { } pub fn caa_addr(&self) -> Option { - let locked = self.guest_vmsa.lock(); + let locked = self.shared.guest_vmsa.lock(); let caa_phys = locked.caa_phys()?; let offset = caa_phys.page_offset(); @@ -579,8 +606,6 @@ impl PerCpu { } } -unsafe impl Sync for PerCpu {} - pub fn this_cpu() -> &'static PerCpu { unsafe { SVSM_PERCPU_BASE.as_ptr::().as_ref().unwrap() } } diff --git a/src/protocols/core.rs b/src/protocols/core.rs index 9179043f3..ad76020ba 100644 --- a/src/protocols/core.rs +++ b/src/protocols/core.rs @@ -343,7 +343,7 @@ fn core_remap_ca(params: &RequestParams) -> Result<(), SvsmReqError> { let pending = GuestPtr::::new(vaddr); pending.write(0)?; - this_cpu_mut().update_guest_caa(gpa); + this_cpu_mut().shared.update_guest_caa(gpa); Ok(()) } diff --git a/src/svsm.rs b/src/svsm.rs index 8dc076fe8..82c630629 100755 --- a/src/svsm.rs +++ b/src/svsm.rs @@ -195,7 +195,7 @@ fn prepare_fw_launch(fw_meta: &SevFWMetaData) -> Result<(), SvsmError> { let cpu = this_cpu_mut(); if let Some(caa) = fw_meta.caa_page { - cpu.update_guest_caa(caa); + cpu.shared.update_guest_caa(caa); } cpu.alloc_guest_vmsa()?;