Skip to content

Commit

Permalink
Merge pull request #183 from 00xc/mm/vm
Browse files Browse the repository at this point in the history
mm/vm: minor cleanups
  • Loading branch information
joergroedel authored Dec 19, 2023
2 parents 22339f5 + 64ec3af commit 7a85454
Show file tree
Hide file tree
Showing 3 changed files with 74 additions and 74 deletions.
6 changes: 3 additions & 3 deletions src/mm/vm/mapping/api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use crate::error::SvsmError;
use crate::locking::{RWLock, ReadLockGuard, WriteLockGuard};
use crate::mm::pagetable::PTEntryFlags;
use crate::mm::vm::VMR;
use crate::types::{PAGE_SHIFT, PAGE_SIZE};
use crate::types::{PageSize, PAGE_SHIFT};

use intrusive_collections::rbtree::Link;
use intrusive_collections::{intrusive_adapter, KeyAdapter};
Expand Down Expand Up @@ -95,9 +95,9 @@ pub trait VirtualMapping: core::fmt::Debug {
/// # Returns
///
/// Either PAGE_SIZE or PAGE_SIZE_2M
fn page_size(&self) -> usize {
fn page_size(&self) -> PageSize {
// Default to system page-size
PAGE_SIZE
PageSize::Regular
}

/// Request whether the mapping is shared or private. Defaults to private
Expand Down
63 changes: 33 additions & 30 deletions src/mm/vm/mapping/file_mapping.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,15 @@ use alloc::vec::Vec;
use super::{Mapping, VMPhysMem};

use super::{RawAllocMapping, VMPageFaultResolution, VirtualMapping};
use crate::address::{Address, PhysAddr};
#[cfg(test)]
use crate::address::Address;
use crate::address::PhysAddr;
use crate::error::SvsmError;
use crate::fs::FileHandle;
use crate::mm::vm::VMR;
use crate::mm::PageRef;
use crate::mm::{pagetable::PTEntryFlags, PAGE_SIZE};
use crate::types::PAGE_SHIFT;
use crate::types::{PageSize, PAGE_SHIFT};
use crate::utils::align_up;

#[derive(Debug)]
Expand Down Expand Up @@ -147,11 +149,12 @@ fn copy_page(
file: &FileHandle,
offset: usize,
paddr_dst: PhysAddr,
page_size: usize,
page_size: PageSize,
) -> Result<(), SvsmError> {
let page_size = usize::from(page_size);
let temp_map = VMPhysMem::new(paddr_dst, page_size, true);
let vaddr_new_page = vmr.insert(Arc::new(Mapping::new(temp_map)))?;
let slice = unsafe { from_raw_parts_mut(vaddr_new_page.bits() as *mut u8, page_size) };
let slice = unsafe { from_raw_parts_mut(vaddr_new_page.as_mut_ptr::<u8>(), page_size) };
file.seek(offset);
file.read(slice)?;
vmr.remove(vaddr_new_page)?;
Expand All @@ -164,8 +167,9 @@ fn copy_page(
file: &FileHandle,
offset: usize,
paddr_dst: PhysAddr,
page_size: usize,
page_size: PageSize,
) -> Result<(), SvsmError> {
let page_size = usize::from(page_size);
// In the test environment the physical address is actually the virtual
// address. We can take advantage of this to copy the file contents into the
// mock physical address without worrying about VMRs and page tables.
Expand All @@ -186,10 +190,9 @@ impl VirtualMapping for VMFileMapping {
return None;
}
if let Some(write_copy) = &self.write_copy {
let write_addr = write_copy.map(offset);
if write_addr.is_some() {
return write_addr;
}
if let Some(write_addr) = write_copy.map(offset) {
return Some(write_addr);
};
}
self.pages[page_index].as_ref().map(|p| p.phys_addr())
}
Expand Down Expand Up @@ -219,28 +222,28 @@ impl VirtualMapping for VMFileMapping {
write: bool,
) -> Result<VMPageFaultResolution, SvsmError> {
let page_size = self.page_size();
if write {
if let Some(write_copy) = self.write_copy.as_mut() {
// This is a writeable region with copy-on-write access. The
// page fault will have occurred because the page has not yet
// been allocated. Allocate a page and copy the readonly source
// page into the new writeable page.
let offset_aligned = offset & !(page_size - 1);
if write_copy
.get_alloc_mut()
.alloc_page(offset_aligned)
.is_ok()
{
let paddr_new_page = write_copy.map(offset_aligned).ok_or(SvsmError::Mem)?;
copy_page(vmr, &self.file, offset_aligned, paddr_new_page, page_size)?;
return Ok(VMPageFaultResolution {
paddr: paddr_new_page,
flags: PTEntryFlags::task_data(),
});
}
}
let page_size_bytes = usize::from(page_size);

if !write {
return Err(SvsmError::Mem);
}
Err(SvsmError::Mem)

let Some(write_copy) = self.write_copy.as_mut() else {
return Err(SvsmError::Mem);
};

// This is a writeable region with copy-on-write access. The
// page fault will have occurred because the page has not yet
// been allocated. Allocate a page and copy the readonly source
// page into the new writeable page.
let offset_aligned = offset & !(page_size_bytes - 1);
write_copy.get_alloc_mut().alloc_page(offset_aligned)?;
let paddr_new_page = write_copy.map(offset_aligned).ok_or(SvsmError::Mem)?;
copy_page(vmr, &self.file, offset_aligned, paddr_new_page, page_size)?;
Ok(VMPageFaultResolution {
paddr: paddr_new_page,
flags: PTEntryFlags::task_data(),
})
}
}

Expand Down
79 changes: 38 additions & 41 deletions src/mm/vm/range.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use crate::cpu::flush_tlb_global_sync;
use crate::error::SvsmError;
use crate::locking::RWLock;
use crate::mm::pagetable::{PTEntryFlags, PageTable, PageTablePart, PageTableRef};
use crate::types::{PAGE_SHIFT, PAGE_SIZE, PAGE_SIZE_2M};
use crate::types::{PageSize, PAGE_SHIFT, PAGE_SIZE};
use crate::utils::{align_down, align_up};

use core::cmp::max;
Expand Down Expand Up @@ -168,13 +168,16 @@ impl VMR {
let idx = PageTable::index::<3>(VirtAddr::from(vmm_start - rstart));
if let Some(paddr) = mapping.map(offset) {
let pt_flags = self.pt_flags | mapping.pt_flags(offset) | PTEntryFlags::PRESENT;
if page_size == PAGE_SIZE {
pgtbl_parts[idx].map_4k(vmm_start + offset, paddr, pt_flags, shared)?;
} else if page_size == PAGE_SIZE_2M {
pgtbl_parts[idx].map_2m(vmm_start + offset, paddr, pt_flags, shared)?;
match page_size {
PageSize::Regular => {
pgtbl_parts[idx].map_4k(vmm_start + offset, paddr, pt_flags, shared)?
}
PageSize::Huge => {
pgtbl_parts[idx].map_2m(vmm_start + offset, paddr, pt_flags, shared)?
}
}
}
offset += page_size;
offset += usize::from(page_size);
}

Ok(())
Expand All @@ -195,17 +198,16 @@ impl VMR {

while vmm_start + offset < vmm_end {
let idx = PageTable::index::<3>(VirtAddr::from(vmm_start - rstart));
let result = if page_size == PAGE_SIZE {
pgtbl_parts[idx].unmap_4k(vmm_start + offset)
} else {
pgtbl_parts[idx].unmap_2m(vmm_start + offset)
let result = match page_size {
PageSize::Regular => pgtbl_parts[idx].unmap_4k(vmm_start + offset),
PageSize::Huge => pgtbl_parts[idx].unmap_2m(vmm_start + offset),
};

if result.is_some() {
mapping.unmap(offset);
}

offset += page_size;
offset += usize::from(page_size);
}
}

Expand Down Expand Up @@ -401,44 +403,39 @@ impl VMR {
// be done as a separate step, returning a reference to the mapping to
// avoid issues with the mapping page fault handler needing mutable access
// to `self.tree` via `insert()`.
let pf_mapping = {
let (pf_mapping, start) = {
let tree = self.tree.lock_read();
let addr = vaddr.pfn();
let cursor = tree.find(&addr);
if let Some(node) = cursor.get() {
let (start, end) = node.range();
if vaddr >= start && vaddr < end {
Some((node.get_mapping_clone(), start))
} else {
None
}
} else {
None
let node = cursor.get().ok_or(SvsmError::Mem)?;
let (start, end) = node.range();
if vaddr < start || vaddr >= end {
return Err(SvsmError::Mem);
}
(node.get_mapping_clone(), start)
};

if let Some((pf_mapping, start)) = pf_mapping {
let resolution = pf_mapping
.get_mut()
.handle_page_fault(self, vaddr - start, write)?;
// The handler has resolved the page fault by allocating a new page.
// Update the page table accordingly.
let vaddr = vaddr.page_align();
let page_size = pf_mapping.get().page_size();
let shared = pf_mapping.get().shared();
let mut pgtbl_parts = self.pgtbl_parts.lock_write();

let (rstart, _) = self.virt_range();
let idx = PageTable::index::<3>(VirtAddr::from(vaddr - rstart));
if page_size == PAGE_SIZE {
pgtbl_parts[idx].map_4k(vaddr, resolution.paddr, resolution.flags, shared)?;
} else if page_size == PAGE_SIZE_2M {
pgtbl_parts[idx].map_2m(vaddr, resolution.paddr, resolution.flags, shared)?;
let resolution = pf_mapping
.get_mut()
.handle_page_fault(self, vaddr - start, write)?;
// The handler has resolved the page fault by allocating a new page.
// Update the page table accordingly.
let vaddr = vaddr.page_align();
let page_size = pf_mapping.get().page_size();
let shared = pf_mapping.get().shared();
let mut pgtbl_parts = self.pgtbl_parts.lock_write();

let (rstart, _) = self.virt_range();
let idx = PageTable::index::<3>(VirtAddr::from(vaddr - rstart));
match page_size {
PageSize::Regular => {
pgtbl_parts[idx].map_4k(vaddr, resolution.paddr, resolution.flags, shared)?
}
PageSize::Huge => {
pgtbl_parts[idx].map_2m(vaddr, resolution.paddr, resolution.flags, shared)?
}
Ok(())
} else {
Err(SvsmError::Mem)
}
Ok(())
}
}

Expand Down

0 comments on commit 7a85454

Please sign in to comment.