Skip to content

Commit

Permalink
mm/vm/range: Permit VM ranges to start on other than PML4E boundaries
Browse files Browse the repository at this point in the history
The address limits of VM ranges may need to be aligned to boundaries
other than PML4E boundaries.  Lack of alignment is acceptable as long as
a single address space is guaranteed not to try to pack multiple VM
ranges into a single top-level entry.

Signed-off-by: Jon Lange <jlange@microsoft.com>
  • Loading branch information
msft-jlange committed Jan 2, 2025
1 parent 9f9813f commit 489d572
Show file tree
Hide file tree
Showing 4 changed files with 53 additions and 13 deletions.
6 changes: 5 additions & 1 deletion kernel/src/cpu/percpu.rs
Original file line number Diff line number Diff line change
Expand Up @@ -560,7 +560,11 @@ impl PerCpu {
}

pub fn init_page_table(&self, pgtable: PageBox<PageTable>) -> Result<(), SvsmError> {
self.vm_range.initialize()?;
// SAFETY: The per-CPU address range is fully aligned to top-level
// paging boundaries.
unsafe {
self.vm_range.initialize()?;
}
self.set_pgtable(PageBox::leak(pgtable));

Ok(())
Expand Down
2 changes: 1 addition & 1 deletion kernel/src/mm/address_space.rs
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ pub const STACK_SIZE: usize = PAGE_SIZE * STACK_PAGES;
pub const STACK_GUARD_SIZE: usize = STACK_SIZE;
pub const STACK_TOTAL_SIZE: usize = STACK_SIZE + STACK_GUARD_SIZE;

const fn virt_from_idx(idx: usize) -> VirtAddr {
pub const fn virt_from_idx(idx: usize) -> VirtAddr {
VirtAddr::new(idx << ((3 * 9) + 12))
}

Expand Down
46 changes: 37 additions & 9 deletions kernel/src/mm/vm/range.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ use crate::cpu::{flush_tlb_global_percpu, flush_tlb_global_sync};
use crate::error::SvsmError;
use crate::locking::RWLock;
use crate::mm::pagetable::{PTEntryFlags, PageTable, PageTablePart};
use crate::mm::virt_from_idx;
use crate::types::{PageSize, PAGE_SHIFT, PAGE_SIZE};
use crate::utils::{align_down, align_up};

Expand Down Expand Up @@ -102,9 +103,12 @@ impl VMR {
///
/// `Ok(())` on success, Err(SvsmError::Mem) on allocation error
fn alloc_page_tables(&self, lazy: bool) -> Result<(), SvsmError> {
let start = VirtAddr::from(self.start_pfn << PAGE_SHIFT);
let end = VirtAddr::from(self.end_pfn << PAGE_SHIFT);
let count = end.to_pgtbl_idx::<3>() - start.to_pgtbl_idx::<3>();
let first = VirtAddr::from(self.start_pfn << PAGE_SHIFT);
let first_idx = first.to_pgtbl_idx::<3>();
let start = virt_from_idx(first_idx);
let last = VirtAddr::from(self.end_pfn << PAGE_SHIFT) - 1;
let last_idx = last.to_pgtbl_idx::<3>();
let count = last_idx + 1 - first_idx;
let mut vec = self.pgtbl_parts.lock_write();

for idx in 0..count {
Expand Down Expand Up @@ -144,37 +148,61 @@ impl VMR {
/// Initialize this [`VMR`] by checking the `start` and `end` values and
/// allocating the [`PageTablePart`]s required for the mappings.
///
/// # Safety
/// Callers must ensure that the bounds of the address range are
/// appropriately aligned to prevent the possibilty that adjacent address
/// ranges may attempt to share top-level paging entries. If any overlap
/// is attempted, page tables may be corrupted.
///
/// # Arguments
///
/// * `lazy` - When `true`, use lazy allocation of [`PageTablePart`] pages.
///
/// # Returns
///
/// `Ok(())` on success, Err(SvsmError::Mem) on allocation error
fn initialize_common(&self, lazy: bool) -> Result<(), SvsmError> {
unsafe fn initialize_common(&self, lazy: bool) -> Result<(), SvsmError> {
let start = VirtAddr::from(self.start_pfn << PAGE_SHIFT);
let end = VirtAddr::from(self.end_pfn << PAGE_SHIFT);
assert!(start < end && start.is_aligned(VMR_GRANULE) && end.is_aligned(VMR_GRANULE));
assert!(start < end);

self.alloc_page_tables(lazy)
}

/// Initialize this [`VMR`] by calling `VMR::initialize_common` with `lazy = false`
///
/// # Safety
/// Callers must ensure that the bounds of the address range are
/// appropriately aligned to prevent the possibilty that adjacent address
/// ranges may attempt to share top-level paging entries. If any overlap
/// is attempted, page tables may be corrupted.
///
/// # Returns
///
/// `Ok(())` on success, Err(SvsmError::Mem) on allocation error
pub fn initialize(&self) -> Result<(), SvsmError> {
self.initialize_common(false)
pub unsafe fn initialize(&self) -> Result<(), SvsmError> {
// SAFETY: The caller takes responsibilty for ensuring that the address
// bounds of the range have appropriate alignment with respect to
// the page table alignment boundaries.
unsafe { self.initialize_common(false) }
}

/// Initialize this [`VMR`] by calling `VMR::initialize_common` with `lazy = true`
///
/// # Safety
/// Callers must ensure that the bounds of the address range are
/// appropriately aligned to prevent the possibilty that adjacent address
/// ranges may attempt to share top-level paging entries. If any overlap
/// is attempted, page tables may be corrupted.
///
/// # Returns
///
/// `Ok(())` on success, Err(SvsmError::Mem) on allocation error
pub fn initialize_lazy(&self) -> Result<(), SvsmError> {
self.initialize_common(true)
pub unsafe fn initialize_lazy(&self) -> Result<(), SvsmError> {
// SAFETY: The caller takes responsibilty for ensuring that the address
// bounds of the range have appropriate alignment with respect to
// the page table alignment boundaries.
unsafe { self.initialize_common(true) }
}

/// Returns the virtual start and end addresses for this region
Expand Down
12 changes: 10 additions & 2 deletions kernel/src/task/tasks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,11 @@ impl Task {
cpu.populate_page_table(&mut pgtable);

let vm_kernel_range = VMR::new(SVSM_PERTASK_BASE, SVSM_PERTASK_END, PTEntryFlags::empty());
vm_kernel_range.initialize()?;
// SAFETY: The kernel mode task address range is fully aligned to
// top-level paging boundaries.
unsafe {
vm_kernel_range.initialize()?;
}

let xsa = Self::allocate_xsave_area();
let xsa_addr = u64::from(xsa.vaddr()) as usize;
Expand Down Expand Up @@ -319,7 +323,11 @@ impl Task {
name: String,
) -> Result<TaskPointer, SvsmError> {
let vm_user_range = VMR::new(USER_MEM_START, USER_MEM_END, PTEntryFlags::USER);
vm_user_range.initialize_lazy()?;
// SAFETY: the user address range is fully aligned to top-level paging
// boundaries.
unsafe {
vm_user_range.initialize_lazy()?;
}
let create_args = CreateTaskArguments {
entry: user_entry,
name,
Expand Down

0 comments on commit 489d572

Please sign in to comment.