From 439afaeb3f31608d1d5eec72df99a57d2c089318 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Thu, 1 Jun 2017 16:17:15 +0200 Subject: [PATCH 01/13] core: assert against recursive mutex locking Adds an assert to check that the thread holding a mutex tries to lock it again. Reviewed-by: Etienne Carriere Signed-off-by: Jens Wiklander --- core/arch/arm/kernel/mutex.c | 1 + 1 file changed, 1 insertion(+) diff --git a/core/arch/arm/kernel/mutex.c b/core/arch/arm/kernel/mutex.c index a25ca123470..6d3cd0ceca2 100644 --- a/core/arch/arm/kernel/mutex.c +++ b/core/arch/arm/kernel/mutex.c @@ -63,6 +63,7 @@ static void __mutex_lock(struct mutex *m, const char *fname, int lineno) if (old_value == MUTEX_VALUE_LOCKED) { wq_wait_init(&m->wq, &wqe); owner = m->owner_id; + assert(owner != thread_get_id_may_fail()); } else { m->value = MUTEX_VALUE_LOCKED; thread_add_mutex(m); From f0bb736cee1a2fbcc077ee7343389d6717f34d59 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Thu, 1 Jun 2017 16:17:24 +0200 Subject: [PATCH 02/13] core: REE_FS: avoid deadlock in ree_fs_create() ree_fs_close() can't be called in ree_fs_create() cleanup as ree_fs_close() tries to acquire the mutex already acquired in ree_fs_create(). Copy relevant content from ree_fs_close() instead. Reviewed-by: Etienne Carriere Signed-off-by: Jens Wiklander --- core/tee/tee_ree_fs.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/tee/tee_ree_fs.c b/core/tee/tee_ree_fs.c index dcae78812bc..275ba107a0d 100644 --- a/core/tee/tee_ree_fs.c +++ b/core/tee/tee_ree_fs.c @@ -643,7 +643,8 @@ static TEE_Result ree_fs_create(struct tee_pobj *po, bool overwrite, if (res) { put_dirh(dirh); if (*fh) { - ree_fs_close(fh); + ree_fs_close_primitive(*fh); + *fh = NULL; tee_fs_rpc_remove_dfh(OPTEE_MSG_RPC_CMD_FS, &dfh); } } From 8df34748a3adf3373ff7db5fcecc7a344bb12b6f Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Thu, 1 Jun 2017 16:17:31 +0200 Subject: [PATCH 03/13] ltc: fix 64-bit warning Reviewed-by: Etienne Carriere Signed-off-by: Jens Wiklander --- core/lib/libtomcrypt/src/tee_ltc_provider.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/lib/libtomcrypt/src/tee_ltc_provider.c b/core/lib/libtomcrypt/src/tee_ltc_provider.c index a89640b8766..3bdec4f4f8e 100644 --- a/core/lib/libtomcrypt/src/tee_ltc_provider.c +++ b/core/lib/libtomcrypt/src/tee_ltc_provider.c @@ -505,7 +505,7 @@ static mpa_scratch_mem get_mpa_scratch_memory_pool(size_t *size_pool) /* release unused pageable_zi vmem */ static void release_unused_mpa_scratch_memory(void) { - mpa_scratch_mem pool = (mpa_scratch_mem)_ltc_mempool_u32; + mpa_scratch_mem pool = (void *)_ltc_mempool_u32; struct mpa_scratch_item *item; vaddr_t start; vaddr_t end; From 7c74bb33b1246429b14e1a84486b7ded4b39115d Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Thu, 1 Jun 2017 16:17:58 +0200 Subject: [PATCH 04/13] core: make 64-bit tlb invalidation inner shareable Reviewed-by: Etienne Carriere Signed-off-by: Jens Wiklander --- core/arch/arm/kernel/ssvce_a64.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/arch/arm/kernel/ssvce_a64.S b/core/arch/arm/kernel/ssvce_a64.S index 6c9bbac84da..827bde11b33 100644 --- a/core/arch/arm/kernel/ssvce_a64.S +++ b/core/arch/arm/kernel/ssvce_a64.S @@ -30,7 +30,7 @@ /* void secure_mmu_unifiedtlbinvall(void); */ FUNC secure_mmu_unifiedtlbinvall , : - tlbi vmalle1 + tlbi vmalle1is isb ret END_FUNC secure_mmu_unifiedtlbinvall @@ -45,7 +45,7 @@ END_FUNC secure_mmu_unifiedtlbinv_curasid /* void secure_mmu_unifiedtlbinv_byasid(unsigned int asid); */ FUNC secure_mmu_unifiedtlbinv_byasid , : and x0, x0, #TTBR_ASID_MASK - tlbi aside1, x0 + tlbi aside1is, x0 isb ret END_FUNC secure_mmu_unifiedtlbinv_byasid From 159efbc19399bbd46aeec034b515d14edaeab8a6 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Thu, 1 Jun 2017 16:18:07 +0200 Subject: [PATCH 05/13] core: add dsb instructions for tlb invalidation Adds DSB instructions needed for correct visibility of TLB invalidations. Reviewed-by: Etienne Carriere Signed-off-by: Jens Wiklander --- core/arch/arm/kernel/ssvce_a32.S | 4 ++++ core/arch/arm/kernel/ssvce_a64.S | 16 ++++++++++++++++ core/arch/arm/mm/core_mmu.c | 6 ------ 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/core/arch/arm/kernel/ssvce_a32.S b/core/arch/arm/kernel/ssvce_a32.S index e2850f1eb08..00ed138ffd7 100644 --- a/core/arch/arm/kernel/ssvce_a32.S +++ b/core/arch/arm/kernel/ssvce_a32.S @@ -55,6 +55,7 @@ FUNC secure_mmu_unifiedtlbinvall , : UNWIND( .fnstart) + dsb /* Ensure visibility of the update to translation table walks */ write_tlbiallis DSB @@ -73,6 +74,7 @@ FUNC secure_mmu_unifiedtlbinvbymva , : UNWIND( .fnstart) b . @ Wrong code to force fix/check the routine before using it + dsb /* Ensure visibility of the update to translation table walks */ MRC p15, 0, R1, c13, c0, 1 /* Read CP15 Context ID Register (CONTEXTIDR) */ ANDS R1, R1, #0xFF /* Get current ASID */ @@ -96,6 +98,7 @@ FUNC secure_mmu_unifiedtlbinv_curasid , : UNWIND( .fnstart) read_contextidr r0 and r0, r0, #0xff /* Get current ASID */ + dsb /* Ensure visibility of the update to translation table walks */ /* Invalidate unified TLB by ASID Inner Sharable */ write_tlbiasidis r0 dsb @@ -112,6 +115,7 @@ END_FUNC secure_mmu_unifiedtlbinv_curasid FUNC secure_mmu_unifiedtlbinv_byasid , : UNWIND( .fnstart) and r0, r0, #0xff /* Get ASID */ + dsb /* Ensure visibility of the update to translation table walks */ /* Invalidate unified TLB by ASID Inner Sharable */ write_tlbiasidis r0 dsb diff --git a/core/arch/arm/kernel/ssvce_a64.S b/core/arch/arm/kernel/ssvce_a64.S index 827bde11b33..48de7876d37 100644 --- a/core/arch/arm/kernel/ssvce_a64.S +++ b/core/arch/arm/kernel/ssvce_a64.S @@ -30,7 +30,12 @@ /* void secure_mmu_unifiedtlbinvall(void); */ FUNC secure_mmu_unifiedtlbinvall , : + /* Ensure visibility of the update to translation table walks */ + dsb ishst + tlbi vmalle1is + + dsb ish /* Ensure completion of TLB invalidation */ isb ret END_FUNC secure_mmu_unifiedtlbinvall @@ -45,7 +50,13 @@ END_FUNC secure_mmu_unifiedtlbinv_curasid /* void secure_mmu_unifiedtlbinv_byasid(unsigned int asid); */ FUNC secure_mmu_unifiedtlbinv_byasid , : and x0, x0, #TTBR_ASID_MASK + + /* Ensure visibility of the update to translation table walks */ + dsb ishst + tlbi aside1is, x0 + + dsb ish /* Ensure completion of TLB invalidation */ isb ret END_FUNC secure_mmu_unifiedtlbinv_byasid @@ -101,6 +112,11 @@ END_FUNC arm_cl1_d_cleaninvbyva /* void arm_cl1_i_inv_all( void ); */ FUNC arm_cl1_i_inv_all , : ic ialluis + /* + * ensure completion of the ICache and branch predictor + * invalidation on all processors. + */ + dsb ish isb ret END_FUNC arm_cl1_i_inv_all diff --git a/core/arch/arm/mm/core_mmu.c b/core/arch/arm/mm/core_mmu.c index c40849f3abf..d0936d1d19a 100644 --- a/core/arch/arm/mm/core_mmu.c +++ b/core/arch/arm/mm/core_mmu.c @@ -877,12 +877,6 @@ enum teecore_memtypes core_mmu_get_type_by_pa(paddr_t pa) int core_tlb_maintenance(int op, unsigned int a) { - /* - * We're doing TLB invalidation because we've changed mapping. - * The dsb() makes sure that written data is visible. - */ - dsb(); - switch (op) { case TLBINV_UNIFIEDTLB: secure_mmu_unifiedtlbinvall(); From 4387c26b17ff23d96a233c8d8674102fdbac9b05 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Mon, 12 Jun 2017 14:59:23 +0200 Subject: [PATCH 06/13] core: pager: invalidate tlb when clearing entry When clearing an entry in a translation table corresponding TLB entry must always be invalidated. With this patch two missing places are addressed. This fixes problem in xtest regression suite case 1016. Signed-off-by: Jens Wiklander --- core/arch/arm/mm/tee_pager.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/core/arch/arm/mm/tee_pager.c b/core/arch/arm/mm/tee_pager.c index 1dec539d2c0..cf45cdebe52 100644 --- a/core/arch/arm/mm/tee_pager.c +++ b/core/arch/arm/mm/tee_pager.c @@ -820,6 +820,9 @@ static void rem_area(struct tee_pager_area_head *area_head, } } + /* TODO only invalidate entries touched above */ + core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); + pager_unlock(exceptions); free_area(area); } @@ -1403,6 +1406,8 @@ static void pager_save_and_release_entry(struct tee_pager_pmem *pmem) area_get_entry(pmem->area, pmem->pgidx, NULL, &attr); area_set_entry(pmem->area, pmem->pgidx, 0, 0); + /* TODO only invalidate entry touched above */ + core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); tee_pager_save_page(pmem, attr); assert(pmem->area->pgt->num_used_entries); pmem->area->pgt->num_used_entries--; From cc358cca88270f7a052d3a2e80d4f893d0244b26 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Thu, 1 Jun 2017 16:18:36 +0200 Subject: [PATCH 07/13] core: link script: .bss alignment .bss may need a larger alignment than 8. Instead of trying to guess let the linker chose it and to avoid having an unaccounted hole before .bss set __data_end first thing inside the .bss section. This makes sure that the data section is properly padded out when assembling a paged tee.bin. Acked-by: Etienne Carriere Signed-off-by: Jens Wiklander --- core/arch/arm/kernel/kern.ld.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/arch/arm/kernel/kern.ld.S b/core/arch/arm/kernel/kern.ld.S index 552e1ddd87d..409d1e13bb8 100644 --- a/core/arch/arm/kernel/kern.ld.S +++ b/core/arch/arm/kernel/kern.ld.S @@ -216,9 +216,9 @@ SECTIONS .got : { *(.got.plt) *(.got) } .dynamic : { *(.dynamic) } - __data_end = .; /* unintialized data */ - .bss : ALIGN(8) { + .bss : { + __data_end = .; __bss_start = .; *(.bss .bss.*) *(.gnu.linkonce.b.*) From 7fd107a98b17e5dda162c8545dc0738bcd612860 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Thu, 1 Jun 2017 20:27:30 +0200 Subject: [PATCH 08/13] core: bugfix tee_pager_release_phys() Fixes the case where less than a page is to be released by ignoring the request. Reviewed-by: Etienne Carriere Signed-off-by: Jens Wiklander --- core/arch/arm/mm/tee_pager.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/arch/arm/mm/tee_pager.c b/core/arch/arm/mm/tee_pager.c index cf45cdebe52..ba93fd054fb 100644 --- a/core/arch/arm/mm/tee_pager.c +++ b/core/arch/arm/mm/tee_pager.c @@ -1454,7 +1454,7 @@ void tee_pager_release_phys(void *addr, size_t size) struct tee_pager_area *area; uint32_t exceptions; - if (!size) + if (end <= begin) return; area = find_area(&tee_pager_area_head, begin); From a1c791e73fd820401174c10856dd8d765c6cf982 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Thu, 1 Jun 2017 16:18:43 +0200 Subject: [PATCH 09/13] core: 64-bit update release_unused_kernel_stack() release_unused_kernel_stack() is called when the pager is enabled when the state of a thread is saved in order to release unused stack pages. Update release_unused_kernel_stack() for 64-bit mode. Acked-by: Etienne Carriere Signed-off-by: Jens Wiklander --- core/arch/arm/kernel/thread.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/core/arch/arm/kernel/thread.c b/core/arch/arm/kernel/thread.c index 1bd035efede..59516762b7b 100644 --- a/core/arch/arm/kernel/thread.c +++ b/core/arch/arm/kernel/thread.c @@ -658,16 +658,28 @@ void thread_state_free(void) } #ifdef CFG_WITH_PAGER -static void release_unused_kernel_stack(struct thread_ctx *thr) +static void release_unused_kernel_stack(struct thread_ctx *thr, + uint32_t cpsr __maybe_unused) { +#ifdef ARM64 + /* + * If we're from user mode then thr->regs.sp is the saved user + * stack pointer and thr->kern_sp holds the last kernel stack + * pointer. But if we're from kernel mode then thr->kern_sp isn't + * up to date so we need to read from thr->regs.sp instead. + */ + vaddr_t sp = is_from_user(cpsr) ? thr->kern_sp : thr->regs.sp; +#else vaddr_t sp = thr->regs.svc_sp; +#endif vaddr_t base = thr->stack_va_end - STACK_THREAD_SIZE; size_t len = sp - base; tee_pager_release_phys((void *)base, len); } #else -static void release_unused_kernel_stack(struct thread_ctx *thr __unused) +static void release_unused_kernel_stack(struct thread_ctx *thr __unused, + uint32_t cpsr __unused) { } #endif @@ -681,7 +693,7 @@ int thread_state_suspend(uint32_t flags, uint32_t cpsr, vaddr_t pc) thread_check_canaries(); - release_unused_kernel_stack(threads + ct); + release_unused_kernel_stack(threads + ct, cpsr); if (is_from_user(cpsr)) { thread_user_save_vfp(); From f2bdb6dbda421ab5bd965195021356858abbae10 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Thu, 1 Jun 2017 16:18:53 +0200 Subject: [PATCH 10/13] core: update 64-bit copy_init from 32-bit version Updates the copy_init part in generic_entry_a64.S from generic_entry_a32.S Reviewed-by: Etienne Carriere Signed-off-by: Jens Wiklander --- core/arch/arm/kernel/generic_entry_a64.S | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/core/arch/arm/kernel/generic_entry_a64.S b/core/arch/arm/kernel/generic_entry_a64.S index b7290e6cac3..9f6c0ecde8c 100644 --- a/core/arch/arm/kernel/generic_entry_a64.S +++ b/core/arch/arm/kernel/generic_entry_a64.S @@ -87,26 +87,28 @@ FUNC _start , : #ifdef CFG_WITH_PAGER /* - * Move init code into correct location + * Move init code into correct location and move hashes to a + * temporary safe location until the heap is initialized. * * The binary is built as: * [Pager code, rodata and data] : In correct location * [Init code and rodata] : Should be copied to __init_start - * [Hashes] : Should be saved before clearing bss + * [Hashes] : Should be saved before initializing pager * - * When we copy init code and rodata into correct location we don't - * need to worry about hashes being overwritten as size of .bss, - * .heap, .nozi and .heap3 is much larger than the size of init - * code and rodata and hashes. */ adr x0, __init_start /* dst */ adr x1, __data_end /* src */ - adr x2, __init_end /* dst limit */ + adr x2, __tmp_hashes_end /* dst limit */ + /* Copy backwards (as memmove) in case we're overlapping */ + sub x2, x2, x0 /* len */ + add x0, x0, x2 /* __init_start + len = __init_end */ + add x1, x1, x2 /* __data_end + len */ + adr x2, __init_start copy_init: - ldp x3, x4, [x1], #16 - stp x3, x4, [x0], #16 + ldp x3, x4, [x1, #-16]! + stp x3, x4, [x0, #-16]! cmp x0, x2 - b.lt copy_init + b.gt copy_init #endif /* From 7c930bef7832565126f4fca2cd1fc764cb31559c Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Thu, 1 Jun 2017 16:18:58 +0200 Subject: [PATCH 11/13] core: arm: increase emulated SRAM Increases emulated TrustZone protected SRAM to 448 kB to increase the pager performance especially for 64-bit mode. Reviewed-by: Etienne Carriere Signed-off-by: Jens Wiklander --- core/arch/arm/arm.mk | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/arch/arm/arm.mk b/core/arch/arm/arm.mk index 40253d06111..a39d90667eb 100644 --- a/core/arch/arm/arm.mk +++ b/core/arch/arm/arm.mk @@ -1,7 +1,7 @@ CFG_LTC_OPTEE_THREAD ?= y -# Size of emulated TrustZone protected SRAM, 360 kB. +# Size of emulated TrustZone protected SRAM, 448 kB. # Only applicable when paging is enabled. -CFG_CORE_TZSRAM_EMUL_SIZE ?= 368640 +CFG_CORE_TZSRAM_EMUL_SIZE ?= 458752 CFG_LPAE_ADDR_SPACE_SIZE ?= (1ull << 32) ifeq ($(CFG_ARM64_core),y) From 1559b4436c0c4cc225b47e8c0f2165aa5c1cae56 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Thu, 1 Jun 2017 16:19:05 +0200 Subject: [PATCH 12/13] plat-vexpress: enable 64-bit paging Tested-by: Jerome Forissier (Hikey GP) Tested-by: Jens Wiklander (Juno AArch64) Tested-by: Jens Wiklander (FVP AArch64) Tested-by: Jens Wiklander (QEMU AArch64) Signed-off-by: Jens Wiklander --- core/arch/arm/plat-vexpress/platform_config.h | 25 +++++++++++-------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/core/arch/arm/plat-vexpress/platform_config.h b/core/arch/arm/plat-vexpress/platform_config.h index 45d99933235..a98cbca5467 100644 --- a/core/arch/arm/plat-vexpress/platform_config.h +++ b/core/arch/arm/plat-vexpress/platform_config.h @@ -33,12 +33,6 @@ /* Make stacks aligned to data cache line length */ #define STACK_ALIGNMENT 64 -#ifdef ARM64 -#ifdef CFG_WITH_PAGER -#error "Pager not supported for ARM64" -#endif -#endif /*ARM64*/ - /* SDP enable but no pool defined: reserve 4MB for SDP tests */ #if defined(CFG_SECURE_DATA_PATH) && !defined(CFG_TEE_SDP_MEM_BASE) #define CFG_TEE_SDP_MEM_TEST_SIZE 0x00400000 @@ -225,10 +219,6 @@ #elif defined(PLATFORM_FLAVOR_qemu_armv8a) -#ifdef CFG_WITH_PAGER -#error "Pager not supported for platform vexpress-qemu_armv8a" -#endif - #define DRAM0_BASE UINTPTR_C(0x40000000) #define DRAM0_SIZE (UINTPTR_C(0x40000000) - CFG_SHMEM_SIZE) @@ -238,10 +228,25 @@ #define SECRAM_BASE 0x0e000000 #define SECRAM_SIZE 0x01000000 + +#ifdef CFG_WITH_PAGER + +/* Emulated SRAM */ +/* First 1MByte of the secure RAM is reserved to ARM-TF runtime services */ +#define TZSRAM_BASE (SECRAM_BASE + 0x00100000) +#define TZSRAM_SIZE CFG_CORE_TZSRAM_EMUL_SIZE + +#define TZDRAM_BASE (TZSRAM_BASE + TZSRAM_SIZE) +#define TZDRAM_SIZE (SECRAM_SIZE - TZSRAM_SIZE - 0x00100000) + +#else /* CFG_WITH_PAGER */ + /* First 1MByte of the secure RAM is reserved to ARM-TF runtime services */ #define TZDRAM_BASE (SECRAM_BASE + 0x00100000) #define TZDRAM_SIZE (SECRAM_SIZE - 0x00100000) +#endif /* CFG_WITH_PAGER */ + #define CFG_TEE_CORE_NB_CORE 2 #define CFG_SHMEM_START (DRAM0_TEERES_BASE + \ From ff3799dce78a7cb6937623db8ee899e40495fbf8 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Thu, 1 Jun 2017 16:19:53 +0200 Subject: [PATCH 13/13] travis: compile QEMU v8 with CFG_WITH_PAGER=y Signed-off-by: Jens Wiklander --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 6866dfe6487..ad9da7fa40c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -168,6 +168,7 @@ script: # QEMU-ARMv8A - $make PLATFORM=vexpress-qemu_armv8a CFG_ARM64_core=y + - $make PLATFORM=vexpress-qemu_armv8a CFG_ARM64_core=y CFG_WITH_PAGER=y - $make PLATFORM=vexpress-qemu_armv8a CFG_ARM64_core=y CFG_RPMB_FS=y - $make PLATFORM=vexpress-qemu_armv8a CFG_ARM64_core=y CFG_TA_GPROF_SUPPORT=y CFG_ULIBS_GPROF=y