Skip to content

Commit

Permalink
arm64: kernel: Add support for Privileged Access Never
Browse files Browse the repository at this point in the history
'Privileged Access Never' is a new arm8.1 feature which prevents
privileged code from accessing any virtual address where read or write
access is also permitted at EL0.

This patch enables the PAN feature on all CPUs, and modifies {get,put}_user
helpers temporarily to permit access.

This will catch kernel bugs where user memory is accessed directly.
'Unprivileged loads and stores' using ldtrb et al are unaffected by PAN.

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
[will: use ALTERNATIVE in asm and tidy up pan_enable check]
Signed-off-by: Will Deacon <will.deacon@arm.com>
  • Loading branch information
James Morse authored and wildea01 committed Jul 27, 2015
1 parent 9ded63a commit 338d4f4
Show file tree
Hide file tree
Showing 14 changed files with 121 additions and 2 deletions.
14 changes: 14 additions & 0 deletions arch/arm64/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -596,6 +596,20 @@ config FORCE_MAX_ZONEORDER
default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
default "11"

config ARM64_PAN
bool "Enable support for Privileged Access Never (PAN)"
default y
help
Privileged Access Never (PAN; part of the ARMv8.1 Extensions)
prevents the kernel or hypervisor from accessing user-space (EL0)
memory directly.

Choosing this option will cause any unprotected (not using
copy_to_user et al) memory access to fail with a permission fault.

The feature is detected at runtime, and will remain as a 'nop'
instruction if the cpu does not implement the feature.

menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on COMPAT
Expand Down
3 changes: 2 additions & 1 deletion arch/arm64/include/asm/cpufeature.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,9 @@
#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
#define ARM64_WORKAROUND_845719 2
#define ARM64_HAS_SYSREG_GIC_CPUIF 3
#define ARM64_HAS_PAN 4

#define ARM64_NCAPS 4
#define ARM64_NCAPS 5

#ifndef __ASSEMBLY__

Expand Down
8 changes: 8 additions & 0 deletions arch/arm64/include/asm/futex.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,16 @@

#include <linux/futex.h>
#include <linux/uaccess.h>

#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/errno.h>
#include <asm/sysreg.h>

#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
asm volatile( \
ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
"1: ldxr %w1, %2\n" \
insn "\n" \
"2: stlxr %w3, %w0, %2\n" \
Expand All @@ -39,6 +45,8 @@
" .align 3\n" \
" .quad 1b, 4b, 2b, 4b\n" \
" .popsection\n" \
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
: "r" (oparg), "Ir" (-EFAULT) \
: "memory")
Expand Down
2 changes: 2 additions & 0 deletions arch/arm64/include/asm/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -186,4 +186,6 @@ static inline void spin_lock_prefetch(const void *x)

#endif

void cpu_enable_pan(void);

#endif /* __ASM_PROCESSOR_H */
8 changes: 8 additions & 0 deletions arch/arm64/include/asm/sysreg.h
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@
#ifndef __ASM_SYSREG_H
#define __ASM_SYSREG_H

#include <asm/opcodes.h>

#define SCTLR_EL1_CP15BEN (0x1 << 5)
#define SCTLR_EL1_SED (0x1 << 8)

Expand All @@ -36,6 +38,12 @@
#define sys_reg(op0, op1, crn, crm, op2) \
((((op0)&3)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))

#define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
#define SCTLR_EL1_SPAN (1 << 23)

#define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\
(!!x)<<8 | 0x1f)

#ifdef __ASSEMBLY__

.irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
Expand Down
11 changes: 11 additions & 0 deletions arch/arm64/include/asm/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,10 @@
#include <linux/string.h>
#include <linux/thread_info.h>

#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/ptrace.h>
#include <asm/sysreg.h>
#include <asm/errno.h>
#include <asm/memory.h>
#include <asm/compiler.h>
Expand Down Expand Up @@ -131,6 +134,8 @@ static inline void set_fs(mm_segment_t fs)
do { \
unsigned long __gu_val; \
__chk_user_ptr(ptr); \
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)); \
switch (sizeof(*(ptr))) { \
case 1: \
__get_user_asm("ldrb", "%w", __gu_val, (ptr), (err)); \
Expand All @@ -148,6 +153,8 @@ do { \
BUILD_BUG(); \
} \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)); \
} while (0)

#define __get_user(x, ptr) \
Expand Down Expand Up @@ -194,6 +201,8 @@ do { \
do { \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)); \
switch (sizeof(*(ptr))) { \
case 1: \
__put_user_asm("strb", "%w", __pu_val, (ptr), (err)); \
Expand All @@ -210,6 +219,8 @@ do { \
default: \
BUILD_BUG(); \
} \
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)); \
} while (0)

#define __put_user(x, ptr) \
Expand Down
1 change: 1 addition & 0 deletions arch/arm64/include/uapi/asm/ptrace.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
#define PSR_I_BIT 0x00000080
#define PSR_A_BIT 0x00000100
#define PSR_D_BIT 0x00000200
#define PSR_PAN_BIT 0x00400000
#define PSR_Q_BIT 0x08000000
#define PSR_V_BIT 0x10000000
#define PSR_C_BIT 0x20000000
Expand Down
8 changes: 7 additions & 1 deletion arch/arm64/kernel/armv8_deprecated.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
#include <linux/slab.h>
#include <linux/sysctl.h>

#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/insn.h>
#include <asm/opcodes.h>
#include <asm/sysreg.h>
Expand Down Expand Up @@ -280,6 +282,8 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
*/
#define __user_swpX_asm(data, addr, res, temp, B) \
__asm__ __volatile__( \
ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
" mov %w2, %w1\n" \
"0: ldxr"B" %w1, [%3]\n" \
"1: stxr"B" %w0, %w2, [%3]\n" \
Expand All @@ -295,7 +299,9 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
" .align 3\n" \
" .quad 0b, 3b\n" \
" .quad 1b, 3b\n" \
" .popsection" \
" .popsection\n" \
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
: "=&r" (res), "+r" (data), "=&r" (temp) \
: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
: "memory")
Expand Down
20 changes: 20 additions & 0 deletions arch/arm64/kernel/cpufeature.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
#include <linux/types.h>
#include <asm/cpu.h>
#include <asm/cpufeature.h>
#include <asm/processor.h>

static bool
feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
Expand All @@ -39,6 +40,15 @@ has_id_aa64pfr0_feature(const struct arm64_cpu_capabilities *entry)
return feature_matches(val, entry);
}

static bool __maybe_unused
has_id_aa64mmfr1_feature(const struct arm64_cpu_capabilities *entry)
{
u64 val;

val = read_cpuid(id_aa64mmfr1_el1);
return feature_matches(val, entry);
}

static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
Expand All @@ -47,6 +57,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.field_pos = 24,
.min_field_value = 1,
},
#ifdef CONFIG_ARM64_PAN
{
.desc = "Privileged Access Never",
.capability = ARM64_HAS_PAN,
.matches = has_id_aa64mmfr1_feature,
.field_pos = 20,
.min_field_value = 1,
.enable = cpu_enable_pan,
},
#endif /* CONFIG_ARM64_PAN */
{},
};

Expand Down
8 changes: 8 additions & 0 deletions arch/arm64/lib/clear_user.S
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,11 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>

#include <asm/alternative.h>
#include <asm/assembler.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>

.text

Expand All @@ -29,6 +33,8 @@
* Alignment fixed up by hardware.
*/
ENTRY(__clear_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
mov x2, x1 // save the size for fixup return
subs x1, x1, #8
b.mi 2f
Expand All @@ -48,6 +54,8 @@ USER(9f, strh wzr, [x0], #2 )
b.mi 5f
USER(9f, strb wzr, [x0] )
5: mov x0, #0
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
ret
ENDPROC(__clear_user)

Expand Down
8 changes: 8 additions & 0 deletions arch/arm64/lib/copy_from_user.S
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,11 @@
*/

#include <linux/linkage.h>

#include <asm/alternative.h>
#include <asm/assembler.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>

/*
* Copy from user space to a kernel buffer (alignment handled by the hardware)
Expand All @@ -28,6 +32,8 @@
* x0 - bytes not copied
*/
ENTRY(__copy_from_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
add x5, x1, x2 // upper user buffer boundary
subs x2, x2, #16
b.mi 1f
Expand Down Expand Up @@ -56,6 +62,8 @@ USER(9f, ldrh w3, [x1], #2 )
USER(9f, ldrb w3, [x1] )
strb w3, [x0]
5: mov x0, #0
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
ret
ENDPROC(__copy_from_user)

Expand Down
8 changes: 8 additions & 0 deletions arch/arm64/lib/copy_in_user.S
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,11 @@
*/

#include <linux/linkage.h>

#include <asm/alternative.h>
#include <asm/assembler.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>

/*
* Copy from user space to user space (alignment handled by the hardware)
Expand All @@ -30,6 +34,8 @@
* x0 - bytes not copied
*/
ENTRY(__copy_in_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
add x5, x0, x2 // upper user buffer boundary
subs x2, x2, #16
b.mi 1f
Expand Down Expand Up @@ -58,6 +64,8 @@ USER(9f, strh w3, [x0], #2 )
USER(9f, ldrb w3, [x1] )
USER(9f, strb w3, [x0] )
5: mov x0, #0
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
ret
ENDPROC(__copy_in_user)

Expand Down
8 changes: 8 additions & 0 deletions arch/arm64/lib/copy_to_user.S
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,11 @@
*/

#include <linux/linkage.h>

#include <asm/alternative.h>
#include <asm/assembler.h>
#include <asm/cpufeature.h>
#include <asm/sysreg.h>

/*
* Copy to user space from a kernel buffer (alignment handled by the hardware)
Expand All @@ -28,6 +32,8 @@
* x0 - bytes not copied
*/
ENTRY(__copy_to_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
add x5, x0, x2 // upper user buffer boundary
subs x2, x2, #16
b.mi 1f
Expand Down Expand Up @@ -56,6 +62,8 @@ USER(9f, strh w3, [x0], #2 )
ldrb w3, [x1]
USER(9f, strb w3, [x0] )
5: mov x0, #0
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
ret
ENDPROC(__copy_to_user)

Expand Down
16 changes: 16 additions & 0 deletions arch/arm64/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,11 @@
#include <linux/highmem.h>
#include <linux/perf_event.h>

#include <asm/cpufeature.h>
#include <asm/exception.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
#include <asm/sysreg.h>
#include <asm/system_misc.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
Expand Down Expand Up @@ -223,6 +225,13 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
mm_flags |= FAULT_FLAG_WRITE;
}

/*
* PAN bit set implies the fault happened in kernel space, but not
* in the arch's user access functions.
*/
if (IS_ENABLED(CONFIG_ARM64_PAN) && (regs->pstate & PSR_PAN_BIT))
goto no_context;

/*
* As per x86, we may deadlock here. However, since the kernel only
* validly references user space from well defined areas of the code,
Expand Down Expand Up @@ -536,3 +545,10 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,

return 0;
}

#ifdef CONFIG_ARM64_PAN
void cpu_enable_pan(void)
{
config_sctlr_el1(SCTLR_EL1_SPAN, 0);
}
#endif /* CONFIG_ARM64_PAN */

0 comments on commit 338d4f4

Please sign in to comment.