Skip to content

Commit

Permalink
Merge tag 'v4.9.32' into linux-4.9.x-unofficial_grsec
Browse files Browse the repository at this point in the history
This is the 4.9.32 stable release
  • Loading branch information
minipli committed Jun 14, 2017
2 parents c497a76 + 05afd4c commit 0b72e47
Show file tree
Hide file tree
Showing 152 changed files with 1,026 additions and 589 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
SUBLEVEL = 31
SUBLEVEL = 32
EXTRAVERSION =
NAME = Roaring Lionus

Expand Down
10 changes: 8 additions & 2 deletions arch/arm/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ __setup("fpe=", fpe_setup);
extern void init_default_cache_policy(unsigned long);
extern void paging_init(const struct machine_desc *desc);
extern void early_paging_init(const struct machine_desc *);
extern void sanity_check_meminfo(void);
extern void adjust_lowmem_bounds(void);
extern enum reboot_mode reboot_mode;
extern void setup_dma_zone(const struct machine_desc *desc);

Expand Down Expand Up @@ -1099,8 +1099,14 @@ void __init setup_arch(char **cmdline_p)
setup_dma_zone(mdesc);
xen_early_init();
efi_init();
sanity_check_meminfo();
/*
* Make sure the calculation for lowmem/highmem is set appropriately
* before reserving/allocating any mmeory
*/
adjust_lowmem_bounds();
arm_memblock_init(mdesc);
/* Memory may have been removed so recalculate the bounds. */
adjust_lowmem_bounds();

early_ioremap_reset();

Expand Down
5 changes: 2 additions & 3 deletions arch/arm/kvm/init.S
Original file line number Diff line number Diff line change
Expand Up @@ -95,16 +95,15 @@ __do_hyp_init:
@ - Write permission implies XN: disabled
@ - Instruction cache: enabled
@ - Data/Unified cache: enabled
@ - Memory alignment checks: enabled
@ - MMU: enabled (this code must be run from an identity mapping)
mrc p15, 4, r0, c1, c0, 0 @ HSCR
ldr r2, =HSCTLR_MASK
bic r0, r0, r2
mrc p15, 0, r1, c1, c0, 0 @ SCTLR
ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
and r1, r1, r2
ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) )
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) )
ARM( ldr r2, =(HSCTLR_M) )
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
orr r1, r1, r2
orr r0, r0, r1
mcr p15, 4, r0, c1, c0, 0 @ HSCR
Expand Down
3 changes: 3 additions & 0 deletions arch/arm/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -872,6 +872,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
pmd_t *pmd;

pud = stage2_get_pud(kvm, cache, addr);
if (!pud)
return NULL;

if (stage2_pud_none(*pud)) {
if (!cache)
return NULL;
Expand Down
69 changes: 27 additions & 42 deletions arch/arm/mm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1193,13 +1193,12 @@ early_param("vmalloc", early_vmalloc);

phys_addr_t arm_lowmem_limit __initdata = 0;

void __init sanity_check_meminfo(void)
void __init adjust_lowmem_bounds(void)
{
phys_addr_t memblock_limit = 0;
int highmem = 0;
u64 vmalloc_limit;
struct memblock_region *reg;
bool should_use_highmem = false;
phys_addr_t lowmem_limit = 0;

/*
* Let's use our own (unoptimized) equivalent of __pa() that is
Expand All @@ -1213,43 +1212,18 @@ void __init sanity_check_meminfo(void)
for_each_memblock(memory, reg) {
phys_addr_t block_start = reg->base;
phys_addr_t block_end = reg->base + reg->size;
phys_addr_t size_limit = reg->size;

if (reg->base >= vmalloc_limit)
highmem = 1;
else
size_limit = vmalloc_limit - reg->base;


if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {

if (highmem) {
pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
&block_start, &block_end);
memblock_remove(reg->base, reg->size);
should_use_highmem = true;
continue;
}

if (reg->size > size_limit) {
phys_addr_t overlap_size = reg->size - size_limit;

pr_notice("Truncating RAM at %pa-%pa",
&block_start, &block_end);
block_end = vmalloc_limit;
pr_cont(" to -%pa", &block_end);
memblock_remove(vmalloc_limit, overlap_size);
should_use_highmem = true;
}
}

if (!highmem) {
if (block_end > arm_lowmem_limit) {
if (reg->size > size_limit)
arm_lowmem_limit = vmalloc_limit;
else
arm_lowmem_limit = block_end;
}
if (reg->base < vmalloc_limit) {
if (block_end > lowmem_limit)
/*
* Compare as u64 to ensure vmalloc_limit does
* not get truncated. block_end should always
* fit in phys_addr_t so there should be no
* issue with assignment.
*/
lowmem_limit = min_t(u64,
vmalloc_limit,
block_end);

/*
* Find the first non-pmd-aligned page, and point
Expand All @@ -1268,14 +1242,13 @@ void __init sanity_check_meminfo(void)
if (!IS_ALIGNED(block_start, PMD_SIZE))
memblock_limit = block_start;
else if (!IS_ALIGNED(block_end, PMD_SIZE))
memblock_limit = arm_lowmem_limit;
memblock_limit = lowmem_limit;
}

}
}

if (should_use_highmem)
pr_notice("Consider using a HIGHMEM enabled kernel.\n");
arm_lowmem_limit = lowmem_limit;

high_memory = __va(arm_lowmem_limit - 1) + 1;

Expand All @@ -1289,6 +1262,18 @@ void __init sanity_check_meminfo(void)
if (!memblock_limit)
memblock_limit = arm_lowmem_limit;

if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
if (memblock_end_of_DRAM() > arm_lowmem_limit) {
phys_addr_t end = memblock_end_of_DRAM();

pr_notice("Ignoring RAM at %pa-%pa\n",
&memblock_limit, &end);
pr_notice("Consider using a HIGHMEM enabled kernel.\n");

memblock_remove(memblock_limit, end - memblock_limit);
}
}

memblock_set_current_limit(memblock_limit);
}

Expand Down
8 changes: 4 additions & 4 deletions arch/arm/mm/nommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ static unsigned long irbar_read(void)
}

/* MPU initialisation functions */
void __init sanity_check_meminfo_mpu(void)
void __init adjust_lowmem_bounds_mpu(void)
{
phys_addr_t phys_offset = PHYS_OFFSET;
phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size;
Expand Down Expand Up @@ -274,7 +274,7 @@ void __init mpu_setup(void)
}
}
#else
static void sanity_check_meminfo_mpu(void) {}
static void adjust_lowmem_bounds_mpu(void) {}
static void __init mpu_setup(void) {}
#endif /* CONFIG_ARM_MPU */

Expand All @@ -295,10 +295,10 @@ void __init arm_mm_memblock_reserve(void)
#endif
}

void __init sanity_check_meminfo(void)
void __init adjust_lowmem_bounds(void)
{
phys_addr_t end;
sanity_check_meminfo_mpu();
adjust_lowmem_bounds_mpu();
end = memblock_end_of_DRAM();
high_memory = __va(end - 1) + 1;
memblock_set_current_limit(end);
Expand Down
13 changes: 13 additions & 0 deletions arch/arm64/include/asm/asm-uaccess.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#ifndef __ASM_ASM_UACCESS_H
#define __ASM_ASM_UACCESS_H

/*
* Remove the address tag from a virtual address, if present.
*/
.macro clear_address_tag, dst, addr
tst \addr, #(1 << 55)
bic \dst, \addr, #(0xff << 56)
csel \dst, \dst, \addr, eq
.endm

#endif
4 changes: 4 additions & 0 deletions arch/arm64/include/asm/sysreg.h
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,10 @@
#define SCTLR_ELx_A (1 << 1)
#define SCTLR_ELx_M 1

#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \
(1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \
(1 << 28) | (1 << 29))

#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
SCTLR_ELx_SA | SCTLR_ELx_I)

Expand Down
6 changes: 3 additions & 3 deletions arch/arm64/include/asm/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,9 +105,9 @@ static inline void set_fs(mm_segment_t fs)
})

/*
* When dealing with data aborts or instruction traps we may end up with
* a tagged userland pointer. Clear the tag to get a sane pointer to pass
* on to access_ok(), for instance.
* When dealing with data aborts, watchpoints, or instruction traps we may end
* up with a tagged userland pointer. Clear the tag to get a sane pointer to
* pass on to access_ok(), for instance.
*/
#define untagged_addr(addr) sign_extend64(addr, 55)

Expand Down
6 changes: 4 additions & 2 deletions arch/arm64/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
#include <asm/irq.h>
#include <asm/memory.h>
#include <asm/thread_info.h>
#include <asm/asm-uaccess.h>
#include <asm/unistd.h>

/*
Expand Down Expand Up @@ -369,12 +370,13 @@ el1_da:
/*
* Data abort handling
*/
mrs x0, far_el1
mrs x3, far_el1
enable_dbg
// re-enable interrupts if they were enabled in the aborted context
tbnz x23, #7, 1f // PSR_I_BIT
enable_irq
1:
clear_address_tag x0, x3
mov x2, sp // struct pt_regs
bl do_mem_abort

Expand Down Expand Up @@ -535,7 +537,7 @@ el0_da:
// enable interrupts before calling the main handler
enable_dbg_and_irq
ct_user_exit
bic x0, x26, #(0xff << 56)
clear_address_tag x0, x26
mov x1, x25
mov x2, sp
bl do_mem_abort
Expand Down
3 changes: 2 additions & 1 deletion arch/arm64/kernel/hw_breakpoint.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
#include <asm/traps.h>
#include <asm/cputype.h>
#include <asm/system_misc.h>
#include <asm/uaccess.h>

/* Breakpoint currently in use for each BRP. */
static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
Expand Down Expand Up @@ -696,7 +697,7 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,

/* Check if the watchpoint value matches. */
val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
if (val != (addr & ~alignment_mask))
if (val != (untagged_addr(addr) & ~alignment_mask))
goto unlock;

/* Possible match, check the byte address select to confirm. */
Expand Down
4 changes: 2 additions & 2 deletions arch/arm64/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -435,7 +435,7 @@ int cpu_enable_cache_maint_trap(void *__unused)
}

#define __user_cache_maint(insn, address, res) \
if (untagged_addr(address) >= user_addr_max()) \
if (address >= user_addr_max()) \
res = -EFAULT; \
else \
asm volatile ( \
Expand All @@ -458,7 +458,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
int ret = 0;

address = (rt == 31) ? 0 : regs->regs[rt];
address = (rt == 31) ? 0 : untagged_addr(regs->regs[rt]);

switch (crm) {
case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */
Expand Down
11 changes: 7 additions & 4 deletions arch/arm64/kvm/hyp-init.S
Original file line number Diff line number Diff line change
Expand Up @@ -102,10 +102,13 @@ __do_hyp_init:
tlbi alle2
dsb sy

mrs x4, sctlr_el2
and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2
ldr x5, =SCTLR_ELx_FLAGS
orr x4, x4, x5
/*
* Preserve all the RES1 bits while setting the default flags,
* as well as the EE bit on BE. Drop the A flag since the compiler
* is allowed to generate unaligned accesses.
*/
ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
msr sctlr_el2, x4
isb

Expand Down
14 changes: 14 additions & 0 deletions arch/powerpc/include/asm/topology.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void);
extern int sysfs_add_device_to_node(struct device *dev, int nid);
extern void sysfs_remove_device_from_node(struct device *dev, int nid);

static inline int early_cpu_to_node(int cpu)
{
int nid;

nid = numa_cpu_lookup_table[cpu];

/*
* Fall back to node 0 if nid is unset (it should be, except bugs).
* This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
*/
return (nid < 0) ? 0 : nid;
}
#else

static inline int early_cpu_to_node(int cpu) { return 0; }

static inline void dump_numa_cpu_topology(void) {}

static inline int sysfs_add_device_to_node(struct device *dev, int nid)
Expand Down
3 changes: 3 additions & 0 deletions arch/powerpc/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -1659,6 +1659,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
#ifdef CONFIG_VSX
current->thread.used_vsr = 0;
#endif
current->thread.load_fp = 0;
memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
current->thread.fp_save_area = NULL;
#ifdef CONFIG_ALTIVEC
Expand All @@ -1667,6 +1668,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.vr_save_area = NULL;
current->thread.vrsave = 0;
current->thread.used_vr = 0;
current->thread.load_vec = 0;
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
memset(current->thread.evr, 0, sizeof(current->thread.evr));
Expand All @@ -1678,6 +1680,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.tm_tfhar = 0;
current->thread.tm_texasr = 0;
current->thread.tm_tfiar = 0;
current->thread.load_tm = 0;
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
}
EXPORT_SYMBOL(start_thread);
Expand Down
4 changes: 2 additions & 2 deletions arch/powerpc/kernel/setup_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -595,7 +595,7 @@ void __init emergency_stack_init(void)

static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
{
return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
__pa(MAX_DMA_ADDRESS));
}

Expand All @@ -606,7 +606,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)

static int pcpu_cpu_distance(unsigned int from, unsigned int to)
{
if (cpu_to_node(from) == cpu_to_node(to))
if (early_cpu_to_node(from) == early_cpu_to_node(to))
return LOCAL_DISTANCE;
else
return REMOTE_DISTANCE;
Expand Down
Loading

0 comments on commit 0b72e47

Please sign in to comment.