Skip to content

Commit

Permalink
Merge tag 'v4.9.74' into linux-4.9.x-unofficial_grsec
Browse files Browse the repository at this point in the history
This is the 4.9.74 stable release

Signed-off-by: Mathias Krause <minipli@googlemail.com>

Conflicts:
	arch/x86/Kconfig
	arch/x86/include/asm/mmu_context.h
	arch/x86/kernel/cpu/common.c
	arch/x86/mm/tlb.c
	drivers/usb/usbip/vhci_rx.c
  • Loading branch information
minipli committed Jan 3, 2018
2 parents 244c130 + 07bcb24 commit da56f7b
Show file tree
Hide file tree
Showing 94 changed files with 537 additions and 437 deletions.
5 changes: 4 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
SUBLEVEL = 73
SUBLEVEL = 74
EXTRAVERSION =
NAME = Roaring Lionus

Expand Down Expand Up @@ -790,6 +790,9 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)

# Make sure -fstack-check isn't enabled (like gentoo apparently did)
KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,)

# conserve stack if available
KBUILD_CFLAGS += $(call cc-option,-fconserve-stack)

Expand Down
2 changes: 1 addition & 1 deletion arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ config X86
select ARCH_USE_CMPXCHG_LOCKREF if X86_64
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_IPC_PARSE_VERSION if X86_32
select BUILDTIME_EXTABLE_SORT
Expand Down
4 changes: 3 additions & 1 deletion arch/x86/include/asm/disabled-features.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,13 @@
# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
# define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31))
# define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31))
# define DISABLE_PCID 0
#else
# define DISABLE_VME 0
# define DISABLE_K6_MTRR 0
# define DISABLE_CYRIX_ARR 0
# define DISABLE_CENTAUR_MCR 0
# define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31))
#endif /* CONFIG_X86_64 */

#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
Expand All @@ -43,7 +45,7 @@
#define DISABLED_MASK1 0
#define DISABLED_MASK2 0
#define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
#define DISABLED_MASK4 0
#define DISABLED_MASK4 (DISABLE_PCID)
#define DISABLED_MASK5 0
#define DISABLED_MASK6 0
#define DISABLED_MASK7 0
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/include/asm/hardirq.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ typedef struct {
#ifdef CONFIG_SMP
unsigned int irq_resched_count;
unsigned int irq_call_count;
unsigned int irq_tlb_count;
#endif
unsigned int irq_tlb_count;
#ifdef CONFIG_X86_THERMAL_VECTOR
unsigned int irq_thermal_count;
#endif
Expand Down
6 changes: 0 additions & 6 deletions arch/x86/include/asm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,12 +45,6 @@ typedef struct {
#endif
} mm_context_t;

#ifdef CONFIG_SMP
void leave_mm(int cpu);
#else
static inline void leave_mm(int cpu)
{
}
#endif

#endif /* _ASM_X86_MMU_H */
2 changes: 0 additions & 2 deletions arch/x86/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -130,10 +130,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
}
#endif

#ifdef CONFIG_SMP
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
#endif
}

static inline int init_new_context(struct task_struct *tsk,
Expand Down
99 changes: 13 additions & 86 deletions arch/x86/include/asm/tlbflush.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#include <asm/processor.h>
#include <asm/cpufeature.h>
#include <asm/special_insns.h>
#include <asm/smp.h>

static inline void __invpcid(unsigned long pcid, unsigned long addr,
unsigned long type)
Expand Down Expand Up @@ -65,10 +66,8 @@ static inline void invpcid_flush_all_nonglobals(void)
#endif

struct tlb_state {
#ifdef CONFIG_SMP
struct mm_struct *active_mm;
int state;
#endif

/*
* Access to this CR4 shadow and to H/W CR4 is protected by
Expand Down Expand Up @@ -248,6 +247,14 @@ static inline void __flush_tlb_all(void)
__flush_tlb_global();
else
__flush_tlb();

/*
* Note: if we somehow had PCID but not PGE, then this wouldn't work --
* we'd end up flushing kernel translations for the current ASID but
* we might fail to flush kernel translations for other cached ASIDs.
*
* To avoid this issue, we force PCID off if PGE is off.
*/
}

static inline void __flush_tlb_one(unsigned long addr)
Expand All @@ -261,7 +268,6 @@ static inline void __flush_tlb_one(unsigned long addr)
/*
* TLB flushing:
*
* - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
Expand All @@ -273,84 +279,6 @@ static inline void __flush_tlb_one(unsigned long addr)
* and page-granular flushes are available only on i486 and up.
*/

#ifndef CONFIG_SMP

/* "_up" is for UniProcessor.
*
* This is a helper for other header functions. *Not* intended to be called
* directly. All global TLB flushes need to either call this, or to bump the
* vm statistics themselves.
*/
static inline void __flush_tlb_up(void)
{
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb();
}

static inline void flush_tlb_all(void)
{
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb_all();
}

static inline void flush_tlb(void)
{
__flush_tlb_up();
}

static inline void local_flush_tlb(void)
{
__flush_tlb_up();
}

static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (mm == current->active_mm)
__flush_tlb_up();
}

static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
if (vma->vm_mm == current->active_mm)
__flush_tlb_one(addr);
}

static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
if (vma->vm_mm == current->active_mm)
__flush_tlb_up();
}

static inline void flush_tlb_mm_range(struct mm_struct *mm,
unsigned long start, unsigned long end, unsigned long vmflag)
{
if (mm == current->active_mm)
__flush_tlb_up();
}

static inline void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
}

static inline void reset_lazy_tlbstate(void)
{
}

static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
flush_tlb_all();
}

#else /* SMP */

#include <asm/smp.h>

#define local_flush_tlb() __flush_tlb()

#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
Expand All @@ -359,13 +287,14 @@ static inline void flush_tlb_kernel_range(unsigned long start,
flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)

extern void flush_tlb_all(void);
extern void flush_tlb_current_task(void);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag);
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);

#define flush_tlb() flush_tlb_current_task()
static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
{
flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
}

void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
Expand All @@ -380,8 +309,6 @@ static inline void reset_lazy_tlbstate(void)
this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
}

#endif /* SMP */

#ifndef CONFIG_PARAVIRT
#define flush_tlb_others(mask, mm, start, end) \
native_flush_tlb_others(mask, mm, start, end)
Expand Down
8 changes: 8 additions & 0 deletions arch/x86/kernel/cpu/bugs.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,14 @@

void __init check_bugs(void)
{
#ifdef CONFIG_X86_32
/*
* Regardless of whether PCID is enumerated, the SDM says
* that it can't be enabled in 32-bit mode.
*/
setup_clear_cpu_cap(X86_FEATURE_PCID);
#endif

identify_boot_cpu();
#ifndef CONFIG_SMP
pr_info("CPU: ");
Expand Down
35 changes: 30 additions & 5 deletions arch/x86/kernel/cpu/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -306,6 +306,14 @@ early_param("pax_nouderef", setup_pax_nouderef);
#ifdef CONFIG_X86_64
static __init int setup_disable_pcid(char *arg)
{
/* require an exact match without trailing characters */
if (strlen(arg))
return 0;

/* do not emit a message if the feature is not present */
if (!boot_cpu_has(X86_FEATURE_PCID))
return 1;

setup_clear_cpu_cap(X86_FEATURE_PCID);
setup_clear_cpu_cap(X86_FEATURE_INVPCID);

Expand All @@ -314,15 +322,34 @@ static __init int setup_disable_pcid(char *arg)
pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
#endif

pr_info("nopcid: PCID feature disabled\n");
return 1;
}
__setup("nopcid", setup_disable_pcid);
#endif

static void setup_pcid(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_PCID)) {
if (cpu_has(c, X86_FEATURE_PGE)) {
cr4_set_bits(X86_CR4_PCIDE);
} else {
/*
* flush_tlb_all(), as currently implemented, won't
* work if PCID is on but PGE is not. Since that
* combination doesn't exist on real hardware, there's
* no reason to try to fully support it, but it's
* polite to avoid corrupting data if we're on
* an improperly configured VM.
*/
clear_cpu_cap(c, X86_FEATURE_PCID);
}
}

#ifdef CONFIG_X86_64
if (cpu_has(c, X86_FEATURE_PCID)) {
printk("PAX: PCID detected\n");
cr4_set_bits(X86_CR4_PCIDE);
BUG_ON(!(__read_cr4() & X86_CR4_PCIDE));
} else
clear_cpu_cap(c, X86_FEATURE_INVPCID);

Expand Down Expand Up @@ -355,9 +382,8 @@ static void setup_pcid(struct cpuinfo_x86 *c)
printk("PAX: strong UDEREF enabled\n");
}
#endif

}
#endif
}

/*
* Protection Keys are not available in 32-bit mode.
Expand Down Expand Up @@ -1129,9 +1155,8 @@ static void identify_cpu(struct cpuinfo_x86 *c)
#endif
#endif

#ifdef CONFIG_X86_64
/* Set up PCID */
setup_pcid(c);
#endif

/*
* The vendor-specific functions might have changed features.
Expand Down
4 changes: 4 additions & 0 deletions arch/x86/kernel/reboot.c
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,10 @@ void __noreturn machine_real_restart(unsigned int type)
load_cr3(initial_page_table);
#else
write_cr3(real_mode_header->trampoline_pgd);

/* Exiting long mode will fail if CR4.PCIDE is set. */
if (static_cpu_has(X86_FEATURE_PCID))
cr4_clear_bits(X86_CR4_PCIDE);
#endif

/* Jump to the identity-mapped low memory code */
Expand Down
9 changes: 0 additions & 9 deletions arch/x86/kernel/smpboot.c
Original file line number Diff line number Diff line change
Expand Up @@ -115,25 +115,16 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
spin_lock_irqsave(&rtc_lock, flags);
CMOS_WRITE(0xa, 0xf);
spin_unlock_irqrestore(&rtc_lock, flags);
local_flush_tlb();
pr_debug("1.\n");
*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
start_eip >> 4;
pr_debug("2.\n");
*((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
start_eip & 0xf;
pr_debug("3.\n");
}

static inline void smpboot_restore_warm_reset_vector(void)
{
unsigned long flags;

/*
* Install writable page 0 entry to set BIOS data area.
*/
local_flush_tlb();

/*
* Paranoid: Set warm reset code and vector here back
* to default values.
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/vm86_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
pte_unmap_unlock(pte, ptl);
out:
up_write(&mm->mmap_sem);
flush_tlb();
flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, 0UL);
}


Expand Down
2 changes: 0 additions & 2 deletions arch/x86/mm/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -834,10 +834,8 @@ void __init zone_sizes_init(void)
}

DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
#ifdef CONFIG_SMP
.active_mm = &init_mm,
.state = 0,
#endif
.cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
};
EXPORT_SYMBOL_GPL(cpu_tlbstate);
Expand Down
Loading

0 comments on commit da56f7b

Please sign in to comment.