Skip to content

Commit

Permalink
Merge tag 'v4.9.51' into linux-4.9.x-unofficial_grsec
Browse files Browse the repository at this point in the history
This is the 4.9.51 stable release

Signed-off-by: Mathias Krause <minipli@googlemail.com>

Conflicts:
	arch/x86/kernel/process_64.c
  • Loading branch information
minipli committed Sep 20, 2017
2 parents de4a808 + 089d772 commit 154063b
Show file tree
Hide file tree
Showing 79 changed files with 1,245 additions and 668 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
SUBLEVEL = 50
SUBLEVEL = 51
EXTRAVERSION =
NAME = Roaring Lionus

Expand Down
5 changes: 3 additions & 2 deletions arch/x86/include/asm/elf.h
Original file line number Diff line number Diff line change
Expand Up @@ -201,6 +201,7 @@ void set_personality_ia32(bool);

#define ELF_CORE_COPY_REGS(pr_reg, regs) \
do { \
unsigned long base; \
unsigned v; \
(pr_reg)[0] = (regs)->r15; \
(pr_reg)[1] = (regs)->r14; \
Expand All @@ -223,8 +224,8 @@ do { \
(pr_reg)[18] = (regs)->flags; \
(pr_reg)[19] = (regs)->sp; \
(pr_reg)[20] = (regs)->ss; \
(pr_reg)[21] = current->thread.fsbase; \
(pr_reg)[22] = current->thread.gsbase; \
rdmsrl(MSR_FS_BASE, base); (pr_reg)[21] = base; \
rdmsrl(MSR_KERNEL_GS_BASE, base); (pr_reg)[22] = base; \
asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \
Expand Down
236 changes: 131 additions & 105 deletions arch/x86/kernel/process_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,123 @@ void release_thread(struct task_struct *dead_task)
}
}

enum which_selector {
FS,
GS
};

/*
* Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
* not available. The goal is to be reasonably fast on non-FSGSBASE systems.
* It's forcibly inlined because it'll generate better code and this function
* is hot.
*/
static __always_inline void save_base_legacy(struct task_struct *prev_p,
unsigned short selector,
enum which_selector which)
{
if (likely(selector == 0)) {
/*
* On Intel (without X86_BUG_NULL_SEG), the segment base could
* be the pre-existing saved base or it could be zero. On AMD
* (with X86_BUG_NULL_SEG), the segment base could be almost
* anything.
*
* This branch is very hot (it's hit twice on almost every
* context switch between 64-bit programs), and avoiding
* the RDMSR helps a lot, so we just assume that whatever
* value is already saved is correct. This matches historical
* Linux behavior, so it won't break existing applications.
*
* To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
* report that the base is zero, it needs to actually be zero:
* see the corresponding logic in load_seg_legacy.
*/
} else {
/*
* If the selector is 1, 2, or 3, then the base is zero on
* !X86_BUG_NULL_SEG CPUs and could be anything on
* X86_BUG_NULL_SEG CPUs. In the latter case, Linux
* has never attempted to preserve the base across context
* switches.
*
* If selector > 3, then it refers to a real segment, and
* saving the base isn't necessary.
*/
if (which == FS)
prev_p->thread.fsbase = 0;
else
prev_p->thread.gsbase = 0;
}
}

static __always_inline void save_fsgs(struct task_struct *task)
{
savesegment(fs, task->thread.fsindex);
savesegment(gs, task->thread.gsindex);
save_base_legacy(task, task->thread.fsindex, FS);
save_base_legacy(task, task->thread.gsindex, GS);
}

static __always_inline void loadseg(enum which_selector which,
unsigned short sel)
{
if (which == FS)
loadsegment(fs, sel);
else
load_gs_index(sel);
}

static __always_inline void load_seg_legacy(unsigned short prev_index,
unsigned long prev_base,
unsigned short next_index,
unsigned long next_base,
enum which_selector which)
{
if (likely(next_index <= 3)) {
/*
* The next task is using 64-bit TLS, is not using this
* segment at all, or is having fun with arcane CPU features.
*/
if (next_base == 0) {
/*
* Nasty case: on AMD CPUs, we need to forcibly zero
* the base.
*/
if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
loadseg(which, __USER_DS);
loadseg(which, next_index);
} else {
/*
* We could try to exhaustively detect cases
* under which we can skip the segment load,
* but there's really only one case that matters
* for performance: if both the previous and
* next states are fully zeroed, we can skip
* the load.
*
* (This assumes that prev_base == 0 has no
* false positives. This is the case on
* Intel-style CPUs.)
*/
if (likely(prev_index | next_index | prev_base))
loadseg(which, next_index);
}
} else {
if (prev_index != next_index)
loadseg(which, next_index);
wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
next_base);
}
} else {
/*
* The next task is using a real segment. Loading the selector
* is sufficient.
*/
loadseg(which, next_index);
}
}

int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
unsigned long arg, struct task_struct *p, unsigned long tls)
{
Expand Down Expand Up @@ -220,10 +337,19 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
unsigned long new_sp,
unsigned int _cs, unsigned int _ss, unsigned int _ds)
{
WARN_ON_ONCE(regs != current_pt_regs());

if (static_cpu_has(X86_BUG_NULL_SEG)) {
/* Loading zero below won't clear the base. */
loadsegment(fs, __USER_DS);
load_gs_index(__USER_DS);
}

loadsegment(fs, 0);
loadsegment(es, _ds);
loadsegment(ds, _ds);
load_gs_index(0);

regs->ip = new_ip;
regs->sp = new_sp;
regs->cs = _cs;
Expand Down Expand Up @@ -268,7 +394,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
struct fpu *next_fpu = &next->fpu;
int cpu = smp_processor_id();
struct tss_struct *tss = cpu_tss + cpu;
unsigned prev_fsindex, prev_gsindex;
fpu_switch_t fpu_switch;

fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
Expand All @@ -278,8 +403,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
*
* (e.g. xen_load_tls())
*/
savesegment(fs, prev_fsindex);
savesegment(gs, prev_gsindex);
save_fsgs(prev_p);

/*
* Load TLS before restoring any segments so that segment loads
Expand Down Expand Up @@ -322,108 +446,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
if (unlikely(next->ss != prev->ss))
loadsegment(ss, next->ss);

/*
* Switch FS and GS.
*
* These are even more complicated than DS and ES: they have
* 64-bit bases are that controlled by arch_prctl. The bases
* don't necessarily match the selectors, as user code can do
* any number of things to cause them to be inconsistent.
*
* We don't promise to preserve the bases if the selectors are
* nonzero. We also don't promise to preserve the base if the
* selector is zero and the base doesn't match whatever was
* most recently passed to ARCH_SET_FS/GS. (If/when the
* FSGSBASE instructions are enabled, we'll need to offer
* stronger guarantees.)
*
* As an invariant,
* (fsbase != 0 && fsindex != 0) || (gsbase != 0 && gsindex != 0) is
* impossible.
*/
if (next->fsindex) {
/* Loading a nonzero value into FS sets the index and base. */
loadsegment(fs, next->fsindex);
} else {
if (next->fsbase) {
/* Next index is zero but next base is nonzero. */
if (prev_fsindex)
loadsegment(fs, 0);
wrmsrl(MSR_FS_BASE, next->fsbase);
} else {
/* Next base and index are both zero. */
if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
/*
* We don't know the previous base and can't
* find out without RDMSR. Forcibly clear it.
*/
loadsegment(fs, __USER_DS);
loadsegment(fs, 0);
} else {
/*
* If the previous index is zero and ARCH_SET_FS
* didn't change the base, then the base is
* also zero and we don't need to do anything.
*/
if (prev->fsbase || prev_fsindex)
loadsegment(fs, 0);
}
}
}
/*
* Save the old state and preserve the invariant.
* NB: if prev_fsindex == 0, then we can't reliably learn the base
* without RDMSR because Intel user code can zero it without telling
* us and AMD user code can program any 32-bit value without telling
* us.
*/
if (prev_fsindex)
prev->fsbase = 0;
prev->fsindex = prev_fsindex;

if (next->gsindex) {
/* Loading a nonzero value into GS sets the index and base. */
load_gs_index(next->gsindex);
} else {
if (next->gsbase) {
/* Next index is zero but next base is nonzero. */
if (prev_gsindex)
load_gs_index(0);
wrmsrl(MSR_KERNEL_GS_BASE, next->gsbase);
} else {
/* Next base and index are both zero. */
if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
/*
* We don't know the previous base and can't
* find out without RDMSR. Forcibly clear it.
*
* This contains a pointless SWAPGS pair.
* Fixing it would involve an explicit check
* for Xen or a new pvop.
*/
load_gs_index(__USER_DS);
load_gs_index(0);
} else {
/*
* If the previous index is zero and ARCH_SET_GS
* didn't change the base, then the base is
* also zero and we don't need to do anything.
*/
if (prev->gsbase || prev_gsindex)
load_gs_index(0);
}
}
}
/*
* Save the old state and preserve the invariant.
* NB: if prev_gsindex == 0, then we can't reliably learn the base
* without RDMSR because Intel user code can zero it without telling
* us and AMD user code can program any 32-bit value without telling
* us.
*/
if (prev_gsindex)
prev->gsbase = 0;
prev->gsindex = prev_gsindex;
load_seg_legacy(prev->fsindex, prev->fsbase,
next->fsindex, next->fsbase, FS);
load_seg_legacy(prev->gsindex, prev->gsbase,
next->gsindex, next->gsbase, GS);

switch_fpu_finish(next_fpu, fpu_switch);

Expand Down
2 changes: 2 additions & 0 deletions drivers/md/raid5.c
Original file line number Diff line number Diff line change
Expand Up @@ -5852,6 +5852,8 @@ static void raid5_do_work(struct work_struct *work)

spin_unlock_irq(&conf->device_lock);

r5l_flush_stripe_to_raid(conf->log);

async_tx_issue_pending_all();
blk_finish_plug(&plug);

Expand Down
6 changes: 3 additions & 3 deletions drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
Original file line number Diff line number Diff line change
Expand Up @@ -317,12 +317,12 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,

if (v != MBOX_OWNER_DRV) {
ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
t4_record_mbox(adap, cmd, size, access, ret);
return ret;
}

/* Copy in the new mailbox command and send it on its way ... */
t4_record_mbox(adap, cmd, MBOX_LEN, access, 0);
t4_record_mbox(adap, cmd, size, access, 0);
for (i = 0; i < size; i += 8)
t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));

Expand Down Expand Up @@ -371,7 +371,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
}

ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
t4_record_mbox(adap, cmd, size, access, ret);
dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
*(const u8 *)cmd, mbox);
t4_report_fw_error(adap);
Expand Down
3 changes: 3 additions & 0 deletions drivers/net/ethernet/freescale/fman/mac.c
Original file line number Diff line number Diff line change
Expand Up @@ -622,6 +622,9 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
goto no_mem;
}

pdev->dev.of_node = node;
pdev->dev.parent = priv->dev;

ret = platform_device_add_data(pdev, &data, sizeof(data));
if (ret)
goto err;
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/ethernet/freescale/gianfar.c
Original file line number Diff line number Diff line change
Expand Up @@ -3690,7 +3690,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
u32 tempval1 = gfar_read(&regs->maccfg1);
u32 tempval = gfar_read(&regs->maccfg2);
u32 ecntrl = gfar_read(&regs->ecntrl);
u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);

if (phydev->duplex != priv->oldduplex) {
if (!(phydev->duplex))
Expand Down
6 changes: 6 additions & 0 deletions drivers/net/ethernet/mellanox/mlxsw/spectrum.c
Original file line number Diff line number Diff line change
Expand Up @@ -4172,6 +4172,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
return -EINVAL;
if (!info->linking)
break;
if (netdev_has_any_upper_dev(upper_dev))
return -EINVAL;
/* HW limitation forbids to put ports to multiple bridges. */
if (netif_is_bridge_master(upper_dev) &&
!mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
Expand All @@ -4185,6 +4187,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
!netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
return -EINVAL;
if (!info->linking)
break;
if (netdev_has_any_upper_dev(upper_dev))
return -EINVAL;
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
Original file line number Diff line number Diff line change
Expand Up @@ -724,7 +724,7 @@ static void ql_build_coredump_seg_header(
seg_hdr->cookie = MPI_COREDUMP_COOKIE;
seg_hdr->segNum = seg_number;
seg_hdr->segSize = seg_size;
memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
}

/*
Expand Down
7 changes: 6 additions & 1 deletion drivers/net/hyperv/netvsc_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -1084,7 +1084,12 @@ static void netvsc_link_change(struct work_struct *w)
bool notify = false, reschedule = false;
unsigned long flags, next_reconfig, delay;

rtnl_lock();
/* if changes are happening, comeback later */
if (!rtnl_trylock()) {
schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
return;
}

if (ndev_ctx->start_remove)
goto out_unlock;

Expand Down
Loading

0 comments on commit 154063b

Please sign in to comment.