diff --git a/src/mm/vm/mapping/mod.rs b/src/mm/vm/mapping/mod.rs index e6541e956..768c2b320 100644 --- a/src/mm/vm/mapping/mod.rs +++ b/src/mm/vm/mapping/mod.rs @@ -10,6 +10,7 @@ pub mod kernel_stack; pub mod phys_mem; pub mod rawalloc; pub mod reserved; +pub mod user_stack; pub mod vmalloc; pub use api::{Mapping, VMMAdapter, VMPageFaultResolution, VirtualMapping, VMM}; @@ -18,4 +19,5 @@ pub use kernel_stack::VMKernelStack; pub use phys_mem::VMPhysMem; pub use rawalloc::RawAllocMapping; pub use reserved::VMReserved; +pub use user_stack::VMUserStack; pub use vmalloc::VMalloc; diff --git a/src/mm/vm/mapping/user_stack.rs b/src/mm/vm/mapping/user_stack.rs new file mode 100644 index 000000000..d390732cf --- /dev/null +++ b/src/mm/vm/mapping/user_stack.rs @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 +// +// Copyright (c) 2022-2023 SUSE LLC +// +// Author: Joerg Roedel +// Author: Roy Hopkins + +use super::VirtualMapping; +use crate::address::{PhysAddr, VirtAddr}; +use crate::error::SvsmError; +use crate::mm::address_space::STACK_SIZE; +use crate::mm::pagetable::PTEntryFlags; +use crate::types::{PAGE_SHIFT, PAGE_SIZE}; +use crate::utils::page_align_up; + +use super::rawalloc::RawAllocMapping; +use super::Mapping; + +/// Mapping to be used as a user mode stack. This maps a stack including guard +/// pages at the top and bottom. +#[derive(Default, Debug)] +pub struct VMUserStack { + /// Allocation for stack pages + alloc: RawAllocMapping, + /// Number of guard pages to reserve address space for + guard_pages: usize, +} + +impl VMUserStack { + /// Returns the virtual address for the top of this user mode stack + /// + /// # Arguments + /// + /// * `base` - Virtual base address this stack is mapped at (including + /// guard pages). + /// + /// # Returns + /// + /// Virtual address to program into the hardware stack register + pub fn top_of_stack(&self, base: VirtAddr) -> VirtAddr { + let guard_size = self.guard_pages * PAGE_SIZE; + base + guard_size + self.alloc.mapping_size() + } + + /// Create a new [`VMUserStack`] with a given size. This function will + /// already allocate the backing pages for the stack. + /// + /// # Arguments + /// + /// * `size` - Size of the user stack, without guard pages + /// + /// # Returns + /// + /// Initialized stack on success, Err(SvsmError::Mem) on error + pub fn new_size(size: usize) -> Result { + // Make sure size is page-aligned + let size = page_align_up(size); + // At least two guard-pages needed + let total_size = (size + 2 * PAGE_SIZE).next_power_of_two(); + let guard_pages = ((total_size - size) >> PAGE_SHIFT) / 2; + let mut stack = VMUserStack { + alloc: RawAllocMapping::new(size), + guard_pages, + }; + stack.alloc_pages()?; + + Ok(stack) + } + + /// Create a new [`VMUserStack`] with the default size. This function + /// will already allocate the backing pages for the stack. + /// + /// # Returns + /// + /// Initialized stack on success, Err(SvsmError::Mem) on error + pub fn new() -> Result { + VMUserStack::new_size(STACK_SIZE) + } + + /// Create a new [`VMUserStack`] with the default size, packed into a + /// [`Mapping`]. This function / will already allocate the backing pages for + /// the stack. + /// + /// # Returns + /// + /// Initialized Mapping to stack on success, Err(SvsmError::Mem) on error + pub fn new_mapping() -> Result { + Ok(Mapping::new(Self::new()?)) + } + + fn alloc_pages(&mut self) -> Result<(), SvsmError> { + self.alloc.alloc_pages() + } +} + +impl VirtualMapping for VMUserStack { + fn mapping_size(&self) -> usize { + self.alloc.mapping_size() + ((self.guard_pages * 2) << PAGE_SHIFT) + } + + fn map(&self, offset: usize) -> Option { + let pfn = offset >> PAGE_SHIFT; + let guard_offset = self.guard_pages << PAGE_SHIFT; + + if pfn >= self.guard_pages { + self.alloc.map(offset - guard_offset) + } else { + None + } + } + + fn unmap(&self, offset: usize) { + let pfn = offset >> PAGE_SHIFT; + + if pfn >= self.guard_pages { + self.alloc.unmap(pfn - self.guard_pages); + } + } + + fn pt_flags(&self, _offset: usize) -> PTEntryFlags { + PTEntryFlags::WRITABLE + | PTEntryFlags::NX + | PTEntryFlags::ACCESSED + | PTEntryFlags::DIRTY + | PTEntryFlags::USER + } +} diff --git a/src/mm/vm/mod.rs b/src/mm/vm/mod.rs index 1c0d8baf2..404788009 100644 --- a/src/mm/vm/mod.rs +++ b/src/mm/vm/mod.rs @@ -9,6 +9,6 @@ mod range; pub use mapping::{ Mapping, RawAllocMapping, VMFileMapping, VMFileMappingPermission, VMKernelStack, VMMAdapter, - VMPhysMem, VMReserved, VMalloc, VirtualMapping, VMM, + VMPhysMem, VMReserved, VMUserStack, VMalloc, VirtualMapping, VMM, }; pub use range::{VMRMapping, VMR, VMR_GRANULE}; diff --git a/src/task/tasks.rs b/src/task/tasks.rs index f6f195569..19e702979 100644 --- a/src/task/tasks.rs +++ b/src/task/tasks.rs @@ -20,10 +20,10 @@ use crate::cpu::X86GeneralRegs; use crate::error::SvsmError; use crate::locking::SpinLock; use crate::mm::pagetable::{get_init_pgtable_locked, PTEntryFlags, PageTableRef}; -use crate::mm::vm::{Mapping, VMKernelStack, VMR}; +use crate::mm::vm::{Mapping, VMKernelStack, VMUserStack, VMR}; use crate::mm::{ PAGE_SIZE, SVSM_PERTASK_BASE, SVSM_PERTASK_BASE_CPL3, SVSM_PERTASK_END, SVSM_PERTASK_END_CPL3, - SVSM_PERTASK_STACK_BASE, + SVSM_PERTASK_STACK_BASE, SVSM_PERTASK_STACK_BASE_CPL3, }; use crate::types::PAGE_SHIFT; @@ -60,6 +60,12 @@ impl From for SvsmError { } } +#[derive(Clone, Copy, Debug)] +struct UserParams { + entry_point: usize, + param: u64, +} + pub const TASK_FLAG_SHARE_PT: u16 = 0x01; #[derive(Debug, Default)] @@ -257,6 +263,7 @@ impl Task { PTEntryFlags::USER, ); vm_user_range.initialize()?; + vm_user_range.populate(&mut pgtable); let task: Box = Box::new(Task { rsp: (SVSM_PERTASK_STACK_BASE.bits() + rsp_offset.bits()) as u64, @@ -274,6 +281,18 @@ impl Task { Ok(task) } + pub fn user_create(entry: usize, param: u64, flags: u16) -> Result, SvsmError> { + // Launch via the user-mode entry point + let entry_param = Box::new(UserParams { + entry_point: entry, + param, + }); + + let mut task = Self::create(launch_user_entry, Box::into_raw(entry_param) as u64, flags)?; + task.init_user_mode()?; + Ok(task) + } + pub fn set_current(&mut self, previous_task: *mut Task) { // This function is called by one task but returns in the context of // another task. The context of the current task is saved and execution @@ -361,6 +380,20 @@ impl Task { )) } + fn init_user_mode(&mut self) -> Result<(), SvsmError> { + let stack = VMUserStack::new()?; + let offset = stack.top_of_stack(VirtAddr::from(0u64)); + let mapping = Arc::new(Mapping::new(stack)); + self.vm_user_range + .insert_at(SVSM_PERTASK_STACK_BASE_CPL3, mapping)?; + + self.user = Some(UserTask { + user_rsp: offset.bits() as u64, + kernel_rsp: 0, + }); + Ok(()) + } + fn allocate_page_table() -> Result { // Base the new task page table on the initial SVSM kernel page table. // When the pagetable is schedule to a CPU, the per CPU entry will also @@ -369,6 +402,60 @@ impl Task { } } +extern "C" fn launch_user_entry(entry: u64) { + unsafe { + let params = *Box::from_raw(entry as *mut UserParams); + let task_node = this_cpu() + .runqueue() + .lock_read() + .current_task() + .expect("Task entry point called when not the current task."); + let (user_rsp, kernel_rsp) = { + let task = task_node.task.lock_write(); + let user = task + .user + .as_ref() + .expect("User entry point called from kernel task"); + let kernel_rsp = &user.kernel_rsp as *const u64; + (user.user_rsp, kernel_rsp) + }; + + asm!( + r#" + // user mode might change non-volatile registers + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + + // Save the address after the sysretq so when the task + // exits it can jump there. + leaq 1f(%rip), %r8 + pushq %r8 + + movq %rsp, (%rsi) + movq %rax, %rsp + movq $0x202, %r11 + sysretq + + 1: + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %rbp + pop %rbx + "#, + in("rcx") params.entry_point, + in("rdi") params.param, + in("rax") user_rsp, + in("rsi") kernel_rsp, + options(att_syntax)); + } +} + extern "C" fn task_exit() { unsafe { current_task_terminated();