Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable paging for 64-bit vexpress #1575

Merged
merged 13 commits into from
Jun 13, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,7 @@ script:

# QEMU-ARMv8A
- $make PLATFORM=vexpress-qemu_armv8a CFG_ARM64_core=y
- $make PLATFORM=vexpress-qemu_armv8a CFG_ARM64_core=y CFG_WITH_PAGER=y
- $make PLATFORM=vexpress-qemu_armv8a CFG_ARM64_core=y CFG_RPMB_FS=y
- $make PLATFORM=vexpress-qemu_armv8a CFG_ARM64_core=y CFG_TA_GPROF_SUPPORT=y CFG_ULIBS_GPROF=y

Expand Down
4 changes: 2 additions & 2 deletions core/arch/arm/arm.mk
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
CFG_LTC_OPTEE_THREAD ?= y
# Size of emulated TrustZone protected SRAM, 360 kB.
# Size of emulated TrustZone protected SRAM, 448 kB.
# Only applicable when paging is enabled.
CFG_CORE_TZSRAM_EMUL_SIZE ?= 368640
CFG_CORE_TZSRAM_EMUL_SIZE ?= 458752
CFG_LPAE_ADDR_SPACE_SIZE ?= (1ull << 32)

ifeq ($(CFG_ARM64_core),y)
Expand Down
22 changes: 12 additions & 10 deletions core/arch/arm/kernel/generic_entry_a64.S
Original file line number Diff line number Diff line change
Expand Up @@ -87,26 +87,28 @@ FUNC _start , :

#ifdef CFG_WITH_PAGER
/*
* Move init code into correct location
* Move init code into correct location and move hashes to a
* temporary safe location until the heap is initialized.
*
* The binary is built as:
* [Pager code, rodata and data] : In correct location
* [Init code and rodata] : Should be copied to __init_start
* [Hashes] : Should be saved before clearing bss
* [Hashes] : Should be saved before initializing pager
*
* When we copy init code and rodata into correct location we don't
* need to worry about hashes being overwritten as size of .bss,
* .heap, .nozi and .heap3 is much larger than the size of init
* code and rodata and hashes.
*/
adr x0, __init_start /* dst */
adr x1, __data_end /* src */
adr x2, __init_end /* dst limit */
adr x2, __tmp_hashes_end /* dst limit */
/* Copy backwards (as memmove) in case we're overlapping */
sub x2, x2, x0 /* len */
add x0, x0, x2 /* __init_start + len = __init_end */
add x1, x1, x2 /* __data_end + len */
adr x2, __init_start
copy_init:
ldp x3, x4, [x1], #16
stp x3, x4, [x0], #16
ldp x3, x4, [x1, #-16]!
stp x3, x4, [x0, #-16]!
cmp x0, x2
b.lt copy_init
b.gt copy_init
#endif

/*
Expand Down
4 changes: 2 additions & 2 deletions core/arch/arm/kernel/kern.ld.S
Original file line number Diff line number Diff line change
Expand Up @@ -216,9 +216,9 @@ SECTIONS
.got : { *(.got.plt) *(.got) }
.dynamic : { *(.dynamic) }

__data_end = .;
/* unintialized data */
.bss : ALIGN(8) {
.bss : {
__data_end = .;
__bss_start = .;
*(.bss .bss.*)
*(.gnu.linkonce.b.*)
Expand Down
1 change: 1 addition & 0 deletions core/arch/arm/kernel/mutex.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ static void __mutex_lock(struct mutex *m, const char *fname, int lineno)
if (old_value == MUTEX_VALUE_LOCKED) {
wq_wait_init(&m->wq, &wqe);
owner = m->owner_id;
assert(owner != thread_get_id_may_fail());
} else {
m->value = MUTEX_VALUE_LOCKED;
thread_add_mutex(m);
Expand Down
4 changes: 4 additions & 0 deletions core/arch/arm/kernel/ssvce_a32.S
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@
FUNC secure_mmu_unifiedtlbinvall , :
UNWIND( .fnstart)

dsb /* Ensure visibility of the update to translation table walks */
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can use dsb ishst here and dsb ish below after tlbi invalidation. ARMv7 support these.

This can apply to modified secure_mmu_unifiedtlbinvXXX() below.
(Yet this could come in a later patch, I am preparing something to invalidate by mva when necessary)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, I'd rather take that in a later patch.

write_tlbiallis

DSB
Expand All @@ -73,6 +74,7 @@ FUNC secure_mmu_unifiedtlbinvbymva , :
UNWIND( .fnstart)

b . @ Wrong code to force fix/check the routine before using it
dsb /* Ensure visibility of the update to translation table walks */

MRC p15, 0, R1, c13, c0, 1 /* Read CP15 Context ID Register (CONTEXTIDR) */
ANDS R1, R1, #0xFF /* Get current ASID */
Expand All @@ -96,6 +98,7 @@ FUNC secure_mmu_unifiedtlbinv_curasid , :
UNWIND( .fnstart)
read_contextidr r0
and r0, r0, #0xff /* Get current ASID */
dsb /* Ensure visibility of the update to translation table walks */
/* Invalidate unified TLB by ASID Inner Sharable */
write_tlbiasidis r0
dsb
Expand All @@ -112,6 +115,7 @@ END_FUNC secure_mmu_unifiedtlbinv_curasid
FUNC secure_mmu_unifiedtlbinv_byasid , :
UNWIND( .fnstart)
and r0, r0, #0xff /* Get ASID */
dsb /* Ensure visibility of the update to translation table walks */
/* Invalidate unified TLB by ASID Inner Sharable */
write_tlbiasidis r0
dsb
Expand Down
20 changes: 18 additions & 2 deletions core/arch/arm/kernel/ssvce_a64.S
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,12 @@

/* void secure_mmu_unifiedtlbinvall(void); */
FUNC secure_mmu_unifiedtlbinvall , :
tlbi vmalle1
/* Ensure visibility of the update to translation table walks */
dsb ishst

tlbi vmalle1is

dsb ish /* Ensure completion of TLB invalidation */
isb
ret
END_FUNC secure_mmu_unifiedtlbinvall
Expand All @@ -45,7 +50,13 @@ END_FUNC secure_mmu_unifiedtlbinv_curasid
/* void secure_mmu_unifiedtlbinv_byasid(unsigned int asid); */
FUNC secure_mmu_unifiedtlbinv_byasid , :
and x0, x0, #TTBR_ASID_MASK
tlbi aside1, x0

/* Ensure visibility of the update to translation table walks */
dsb ishst

tlbi aside1is, x0

dsb ish /* Ensure completion of TLB invalidation */
isb
ret
END_FUNC secure_mmu_unifiedtlbinv_byasid
Expand Down Expand Up @@ -101,6 +112,11 @@ END_FUNC arm_cl1_d_cleaninvbyva
/* void arm_cl1_i_inv_all( void ); */
FUNC arm_cl1_i_inv_all , :
ic ialluis
/*
* ensure completion of the ICache and branch predictor
* invalidation on all processors.
*/
dsb ish
isb
ret
END_FUNC arm_cl1_i_inv_all
Expand Down
18 changes: 15 additions & 3 deletions core/arch/arm/kernel/thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -658,16 +658,28 @@ void thread_state_free(void)
}

#ifdef CFG_WITH_PAGER
static void release_unused_kernel_stack(struct thread_ctx *thr)
static void release_unused_kernel_stack(struct thread_ctx *thr,
uint32_t cpsr __maybe_unused)
{
#ifdef ARM64
/*
* If we're from user mode then thr->regs.sp is the saved user
* stack pointer and thr->kern_sp holds the last kernel stack
* pointer. But if we're from kernel mode then thr->kern_sp isn't
* up to date so we need to read from thr->regs.sp instead.
*/
vaddr_t sp = is_from_user(cpsr) ? thr->kern_sp : thr->regs.sp;
#else
vaddr_t sp = thr->regs.svc_sp;
#endif
vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE;
size_t len = sp - base;

tee_pager_release_phys((void *)base, len);
}
#else
static void release_unused_kernel_stack(struct thread_ctx *thr __unused)
static void release_unused_kernel_stack(struct thread_ctx *thr __unused,
uint32_t cpsr __unused)
{
}
#endif
Expand All @@ -681,7 +693,7 @@ int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc)

thread_check_canaries();

release_unused_kernel_stack(threads + ct);
release_unused_kernel_stack(threads + ct, cpsr);

if (is_from_user(cpsr)) {
thread_user_save_vfp();
Expand Down
6 changes: 0 additions & 6 deletions core/arch/arm/mm/core_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -877,12 +877,6 @@ enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa)

int core_tlb_maintenance(int op, unsigned int a)
{
/*
* We're doing TLB invalidation because we've changed mapping.
* The dsb() makes sure that written data is visible.
*/
dsb();

switch (op) {
case TLBINV_UNIFIEDTLB:
secure_mmu_unifiedtlbinvall();
Expand Down
7 changes: 6 additions & 1 deletion core/arch/arm/mm/tee_pager.c
Original file line number Diff line number Diff line change
Expand Up @@ -820,6 +820,9 @@ static void rem_area(struct tee_pager_area_head *area_head,
}
}

/* TODO only invalidate entries touched above */
core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);

pager_unlock(exceptions);
free_area(area);
}
Expand Down Expand Up @@ -1403,6 +1406,8 @@ static void pager_save_and_release_entry(struct tee_pager_pmem *pmem)

area_get_entry(pmem->area, pmem->pgidx, NULL, &attr);
area_set_entry(pmem->area, pmem->pgidx, 0, 0);
/* TODO only invalidate entry touched above */
core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
Copy link
Contributor

@etienne-lms etienne-lms Jun 13, 2017

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

in tee_pager_release_one_phys(), there may also need a tlb invalidation.
edited: discard my comment, TLB maintenance is already implemented from tee_pager_release_phys() once all target pages where released.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, I noticed that too. :-)

tee_pager_save_page(pmem, attr);
assert(pmem->area->pgt->num_used_entries);
pmem->area->pgt->num_used_entries--;
Expand Down Expand Up @@ -1449,7 +1454,7 @@ void tee_pager_release_phys(void *addr, size_t size)
struct tee_pager_area *area;
uint32_t exceptions;

if (!size)
if (end <= begin)
return;

area = find_area(&tee_pager_area_head, begin);
Expand Down
25 changes: 15 additions & 10 deletions core/arch/arm/plat-vexpress/platform_config.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,6 @@
/* Make stacks aligned to data cache line length */
#define STACK_ALIGNMENT 64

#ifdef ARM64
#ifdef CFG_WITH_PAGER
#error "Pager not supported for ARM64"
#endif
#endif /*ARM64*/

/* SDP enable but no pool defined: reserve 4MB for SDP tests */
#if defined(CFG_SECURE_DATA_PATH) && !defined(CFG_TEE_SDP_MEM_BASE)
#define CFG_TEE_SDP_MEM_TEST_SIZE 0x00400000
Expand Down Expand Up @@ -225,10 +219,6 @@

#elif defined(PLATFORM_FLAVOR_qemu_armv8a)

#ifdef CFG_WITH_PAGER
#error "Pager not supported for platform vexpress-qemu_armv8a"
#endif

#define DRAM0_BASE UINTPTR_C(0x40000000)
#define DRAM0_SIZE (UINTPTR_C(0x40000000) - CFG_SHMEM_SIZE)

Expand All @@ -238,10 +228,25 @@
#define SECRAM_BASE 0x0e000000
#define SECRAM_SIZE 0x01000000


#ifdef CFG_WITH_PAGER

/* Emulated SRAM */
/* First 1MByte of the secure RAM is reserved to ARM-TF runtime services */
#define TZSRAM_BASE (SECRAM_BASE + 0x00100000)
#define TZSRAM_SIZE CFG_CORE_TZSRAM_EMUL_SIZE

#define TZDRAM_BASE (TZSRAM_BASE + TZSRAM_SIZE)
#define TZDRAM_SIZE (SECRAM_SIZE - TZSRAM_SIZE - 0x00100000)

#else /* CFG_WITH_PAGER */

/* First 1MByte of the secure RAM is reserved to ARM-TF runtime services */
#define TZDRAM_BASE (SECRAM_BASE + 0x00100000)
#define TZDRAM_SIZE (SECRAM_SIZE - 0x00100000)

#endif /* CFG_WITH_PAGER */

#define CFG_TEE_CORE_NB_CORE 2

#define CFG_SHMEM_START (DRAM0_TEERES_BASE + \
Expand Down
2 changes: 1 addition & 1 deletion core/lib/libtomcrypt/src/tee_ltc_provider.c
Original file line number Diff line number Diff line change
Expand Up @@ -505,7 +505,7 @@ static mpa_scratch_mem get_mpa_scratch_memory_pool(size_t *size_pool)
/* release unused pageable_zi vmem */
static void release_unused_mpa_scratch_memory(void)
{
mpa_scratch_mem pool = (mpa_scratch_mem)_ltc_mempool_u32;
mpa_scratch_mem pool = (void *)_ltc_mempool_u32;
struct mpa_scratch_item *item;
vaddr_t start;
vaddr_t end;
Expand Down
3 changes: 2 additions & 1 deletion core/tee/tee_ree_fs.c
Original file line number Diff line number Diff line change
Expand Up @@ -643,7 +643,8 @@ static TEE_Result ree_fs_create(struct tee_pobj *po, bool overwrite,
if (res) {
put_dirh(dirh);
if (*fh) {
ree_fs_close(fh);
ree_fs_close_primitive(*fh);
*fh = NULL;
tee_fs_rpc_remove_dfh(OPTEE_MSG_RPC_CMD_FS, &dfh);
}
}
Expand Down