Skip to content
This repository has been archived by the owner on Nov 13, 2019. It is now read-only.

Commit

Permalink
Merge branch 'staging/neutrino-msm-mata-4.4' of https://github.com/0c…
Browse files Browse the repository at this point in the history
…tobot/neutrino-staging into neutrino-msm-mata-4.4

* staging/neutrino-msm-mata-4.4: (27 commits)
  smp: Avoid sending needless IPI in smp_call_function_many()
  smp, cpumask: Use non-atomic cpumask_{set,clear}_cpu()
  smp: Avoid using two cache lines for struct call_single_data
  cpuidle: Don't enable all governors by default
  cpuidle:lpm: Fix div by zero bug
  drivers: cpuidle: lpm-levels: Correct missing list initialize
  ANDROID: clean up uninitilized variable
  msm: ipa: avoid printing UL data stall
  msm: sps: Whitelist %p and %pa addresses in ipc logs for debugging.
  net: rmnet_data: Fix incorrect netlink handling
  net: rmnet_data: Skip UL aggregation for non linear packets
  net: rmnet_data: Check for endpoint validity when demuxing
  net: rmnet_data: Fix comments on code review
  net: rmnet_data: Fix assignments, reads, and logic
  net: rmnet_data: Skip UL aggregation for ping packets
  net: rmnet_data: Optimize the UL aggregation skip logic
  net: rmnet_data: Always try to linearize when UL aggregation is on
  net: rmnet_data: Switch aggregation from delayed work to hrtimer
  net: rmnet_data: Go back to worker thread for UL aggregation
  net: rmnet_data: Use hrtimer for UL aggregation timer
  net: rmnet_data: Remove invalid error message
  net: rmnet_data: Check for context when freeing packing in TX path
  net: rmnet_data: compute rx hash before napi_gro_receive
  rmnet_ipa: Fix netdev watchdog triggering on suspend
  Revert "msm: kgsl: Replace scm call api with its atomic version"
  soc: qcom: scm: Add a noretry variant for scm_call2
  msm: kgsl: Replace scm call api with its noretry version

Signed-off-by: Adam W. Willis <return.of.octobot@gmail.com>
  • Loading branch information
0ctobot committed Dec 16, 2018
2 parents 6ed48d2 + cc1d5f0 commit 1e6f159
Show file tree
Hide file tree
Showing 34 changed files with 366 additions and 307 deletions.
6 changes: 3 additions & 3 deletions arch/mips/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -469,12 +469,12 @@ EXPORT_SYMBOL(dump_send_ipi);
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST

static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd);

void tick_broadcast(const struct cpumask *mask)
{
atomic_t *count;
struct call_single_data *csd;
call_single_data_t *csd;
int cpu;

for_each_cpu(cpu, mask) {
Expand All @@ -495,7 +495,7 @@ static void tick_broadcast_callee(void *info)

static int __init tick_broadcast_init(void)
{
struct call_single_data *csd;
call_single_data_t *csd;
int cpu;

for (cpu = 0; cpu < NR_CPUS; cpu++) {
Expand Down
2 changes: 1 addition & 1 deletion block/blk-softirq.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ static void trigger_softirq(void *data)
static int raise_blk_irq(int cpu, struct request *rq)
{
if (cpu_online(cpu)) {
struct call_single_data *data = &rq->csd;
call_single_data_t *data = &rq->csd;

data->func = trigger_softirq;
data->info = rq;
Expand Down
2 changes: 1 addition & 1 deletion drivers/block/null_blk.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
struct nullb_cmd {
struct list_head list;
struct llist_node ll_list;
struct call_single_data csd;
call_single_data_t csd;
struct request *rq;
struct bio *bio;
unsigned int tag;
Expand Down
2 changes: 0 additions & 2 deletions drivers/cpuidle/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,9 @@ config CPU_IDLE_MULTIPLE_DRIVERS

config CPU_IDLE_GOV_LADDER
bool "Ladder governor (for periodic timer tick)"
default y

config CPU_IDLE_GOV_MENU
bool "Menu governor (for tickless system)"
default y

config DT_IDLE_STATES
bool
Expand Down
10 changes: 5 additions & 5 deletions drivers/cpuidle/coupled.c
Original file line number Diff line number Diff line change
Expand Up @@ -119,13 +119,13 @@ struct cpuidle_coupled {

#define CPUIDLE_COUPLED_NOT_IDLE (-1)

static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
static DEFINE_PER_CPU(call_single_data_t, cpuidle_coupled_poke_cb);

/*
* The cpuidle_coupled_poke_pending mask is used to avoid calling
* __smp_call_function_single with the per cpu call_single_data struct already
* __smp_call_function_single with the per cpu call_single_data_t struct already
* in use. This prevents a deadlock where two cpus are waiting for each others
* call_single_data struct to be available
* call_single_data_t struct to be available
*/
static cpumask_t cpuidle_coupled_poke_pending;

Expand Down Expand Up @@ -339,7 +339,7 @@ static void cpuidle_coupled_handle_poke(void *info)
*/
static void cpuidle_coupled_poke(int cpu)
{
struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
call_single_data_t *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);

if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
smp_call_function_single_async(cpu, csd);
Expand Down Expand Up @@ -651,7 +651,7 @@ int cpuidle_coupled_register_device(struct cpuidle_device *dev)
{
int cpu;
struct cpuidle_device *other_dev;
struct call_single_data *csd;
call_single_data_t *csd;
struct cpuidle_coupled *coupled;

if (cpumask_empty(&dev->coupled_cpus))
Expand Down
4 changes: 3 additions & 1 deletion drivers/cpuidle/lpm-levels-of.c
Original file line number Diff line number Diff line change
Expand Up @@ -745,7 +745,8 @@ static int calculate_residency(struct power_params *base_pwr,
((int32_t)(next_pwr->ss_power * next_pwr->time_overhead_us)
- (int32_t)(base_pwr->ss_power * base_pwr->time_overhead_us));

residency /= (int32_t)(base_pwr->ss_power - next_pwr->ss_power);
if (base_pwr->ss_power != next_pwr->ss_power)
residency /= (int32_t)(base_pwr->ss_power - next_pwr->ss_power);

if (residency < 0) {
pr_err("%s: residency < 0 for LPM\n",
Expand Down Expand Up @@ -896,6 +897,7 @@ struct lpm_cluster *parse_cluster(struct device_node *node,
if (ret)
goto failed_parse_params;

INIT_LIST_HEAD(&c->list);
INIT_LIST_HEAD(&c->child);
c->parent = parent;
spin_lock_init(&c->sync_lock);
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/msm/adreno_a5xx.c
Original file line number Diff line number Diff line change
Expand Up @@ -2266,7 +2266,7 @@ static int a5xx_microcode_load(struct adreno_device *adreno_dev)
desc.args[1] = 13;
desc.arginfo = SCM_ARGS(2);

ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT, 0xA), &desc);
ret = scm_call2_noretry(SCM_SIP_FNID(SCM_SVC_BOOT, 0xA), &desc);
if (ret) {
pr_err("SCM resume call failed with error %d\n", ret);
return ret;
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/ethernet/cavium/liquidio/lio_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -1860,7 +1860,7 @@ static void liquidio_napi_drv_callback(void *arg)
if (droq->cpu_id == this_cpu) {
napi_schedule(&droq->napi);
} else {
struct call_single_data *csd = &droq->csd;
call_single_data_t *csd = &droq->csd;

csd->func = napi_schedule_wrapper;
csd->info = &droq->napi;
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/ethernet/cavium/liquidio/octeon_droq.h
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,7 @@ struct octeon_droq {

u32 cpu_id;

struct call_single_data csd;
call_single_data_t csd;
};

#define OCT_DROQ_SIZE (sizeof(struct octeon_droq))
Expand Down
11 changes: 9 additions & 2 deletions drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
Original file line number Diff line number Diff line change
Expand Up @@ -1186,7 +1186,11 @@ static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev)

static void ipa3_wwan_tx_timeout(struct net_device *dev)
{
IPAWANERR("[%s] ipa3_wwan_tx_timeout(), data stall in UL\n", dev->name);
struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);

if (atomic_read(&wwan_ptr->outstanding_pkts) != 0)
IPAWANERR("[%s] data stall in UL, %d outstanding\n",
dev->name, atomic_read(&wwan_ptr->outstanding_pkts));
}

/**
Expand Down Expand Up @@ -2440,6 +2444,7 @@ static int rmnet_ipa_ap_suspend(struct device *dev)

/* Make sure that there is no Tx operation ongoing */
netif_stop_queue(netdev);
netif_device_detach(netdev);
ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
ret = 0;

Expand All @@ -2465,8 +2470,10 @@ static int rmnet_ipa_ap_resume(struct device *dev)
struct net_device *netdev = IPA_NETDEV();

IPAWANDBG("Enter...\n");
if (netdev)
if (netdev){
netif_wake_queue(netdev);
netif_device_attach(netdev);
}
IPAWANDBG("Exit\n");

return 0;
Expand Down
4 changes: 2 additions & 2 deletions drivers/platform/msm/sps/bam.c
Original file line number Diff line number Diff line change
Expand Up @@ -834,7 +834,7 @@ static inline void bam_write_reg(void *base, enum bam_regs reg,
return;
}
iowrite32(val, dev->base + offset);
SPS_DBG(dev, "sps:bam 0x%pK(va) write reg 0x%x w_val 0x%x.\n",
SPS_DBG(dev, "sps:bam 0x%pP(va) write reg 0x%x w_val 0x%x.\n",
dev->base, offset, val);
}

Expand Down Expand Up @@ -870,7 +870,7 @@ static inline void bam_write_reg_field(void *base, enum bam_regs reg,
tmp &= ~mask; /* clear written bits */
val = tmp | (val << shift);
iowrite32(val, dev->base + offset);
SPS_DBG(dev, "sps:bam 0x%pK(va) write reg 0x%x w_val 0x%x.\n",
SPS_DBG(dev, "sps:bam 0x%pP(va) write reg 0x%x w_val 0x%x.\n",
dev->base, offset, val);
}

Expand Down
2 changes: 1 addition & 1 deletion drivers/platform/msm/sps/sps_bam.c
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ static irqreturn_t bam_isr(int irq, void *ctxt)
&ready);
} else {
SPS_DBG1(dev,
"sps:bam_isr: BAM is not ready and thus skip IRQ for bam:%pa IRQ #:%d.\n",
"sps:bam_isr: BAM is not ready and thus skip IRQ for bam:%paP IRQ #:%d.\n",
BAM_ID(dev), irq);
}
} else {
Expand Down
70 changes: 44 additions & 26 deletions drivers/soc/qcom/scm.c
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
/* Copyright (c) 2010-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
Expand Down Expand Up @@ -635,28 +635,7 @@ static int allocate_extra_arg_buffer(struct scm_desc *desc, gfp_t flags)
return 0;
}

/**
* scm_call2() - Invoke a syscall in the secure world
* @fn_id: The function ID for this syscall
* @desc: Descriptor structure containing arguments and return values
*
* Sends a command to the SCM and waits for the command to finish processing.
* This should *only* be called in pre-emptible context.
*
* A note on cache maintenance:
* Note that any buffers that are expected to be accessed by the secure world
* must be flushed before invoking scm_call and invalidated in the cache
* immediately after scm_call returns. An important point that must be noted
* is that on ARMV8 architectures, invalidation actually also causes a dirty
* cache line to be cleaned (flushed + unset-dirty-bit). Therefore it is of
* paramount importance that the buffer be flushed before invoking scm_call2,
* even if you don't care about the contents of that buffer.
*
* Note that cache maintenance on the argument buffer (desc->args) is taken care
* of by scm_call2; however, callers are responsible for any other cached
* buffers passed over to the secure world.
*/
int scm_call2(u32 fn_id, struct scm_desc *desc)
static int __scm_call2(u32 fn_id, struct scm_desc *desc, bool retry)
{
int arglen = desc->arginfo & 0xf;
int ret, retry_count = 0;
Expand All @@ -670,7 +649,6 @@ int scm_call2(u32 fn_id, struct scm_desc *desc)
return ret;

x0 = fn_id | scm_version_mask;

do {
mutex_lock(&scm_lock);

Expand Down Expand Up @@ -700,13 +678,15 @@ int scm_call2(u32 fn_id, struct scm_desc *desc)
mutex_unlock(&scm_lmh_lock);

mutex_unlock(&scm_lock);
if (!retry)
goto out;

if (ret == SCM_V2_EBUSY)
msleep(SCM_EBUSY_WAIT_MS);
if (retry_count == 33)
pr_warn("scm: secure world has been busy for 1 second!\n");
} while (ret == SCM_V2_EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY));

} while (ret == SCM_V2_EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY));
out:
if (ret < 0)
pr_err("scm_call failed: func id %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
x0, ret, desc->ret[0], desc->ret[1], desc->ret[2]);
Expand All @@ -717,8 +697,46 @@ int scm_call2(u32 fn_id, struct scm_desc *desc)
return scm_remap_error(ret);
return 0;
}

/**
* scm_call2() - Invoke a syscall in the secure world
* @fn_id: The function ID for this syscall
* @desc: Descriptor structure containing arguments and return values
*
* Sends a command to the SCM and waits for the command to finish processing.
* This should *only* be called in pre-emptible context.
*
* A note on cache maintenance:
* Note that any buffers that are expected to be accessed by the secure world
* must be flushed before invoking scm_call and invalidated in the cache
* immediately after scm_call returns. An important point that must be noted
* is that on ARMV8 architectures, invalidation actually also causes a dirty
* cache line to be cleaned (flushed + unset-dirty-bit). Therefore it is of
* paramount importance that the buffer be flushed before invoking scm_call2,
* even if you don't care about the contents of that buffer.
*
* Note that cache maintenance on the argument buffer (desc->args) is taken care
* of by scm_call2; however, callers are responsible for any other cached
* buffers passed over to the secure world.
*/
int scm_call2(u32 fn_id, struct scm_desc *desc)
{
return __scm_call2(fn_id, desc, true);
}
EXPORT_SYMBOL(scm_call2);

/**
* scm_call2_noretry() - Invoke a syscall in the secure world
*
* Similar to scm_call2 except that there is no retry mechanism
* implemented.
*/
int scm_call2_noretry(u32 fn_id, struct scm_desc *desc)
{
return __scm_call2(fn_id, desc, false);
}
EXPORT_SYMBOL(scm_call2_noretry);

/**
* scm_call2_atomic() - Invoke a syscall in the secure world
*
Expand Down
2 changes: 1 addition & 1 deletion drivers/tty/serial/msm_serial_hs.c
Original file line number Diff line number Diff line change
Expand Up @@ -2000,7 +2000,7 @@ static void msm_hs_sps_rx_callback(struct sps_event_notify *notify)

uport = &(msm_uport->uport);
msm_uport->notify = *notify;
MSM_HS_INFO("rx_cb: addr=0x%pa, size=0x%x, flags=0x%x\n",
MSM_HS_INFO("rx_cb: addr=0x%paP, size=0x%x, flags=0x%x\n",
&addr, notify->data.transfer.iovec.size,
notify->data.transfer.iovec.flags);

Expand Down
2 changes: 1 addition & 1 deletion drivers/usb/gadget/function/f_fs.c
Original file line number Diff line number Diff line change
Expand Up @@ -1275,7 +1275,7 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
unsigned long value)
{
struct ffs_epfile *epfile = file->private_data;
int ret;
int ret = 0;

ENTER();

Expand Down
2 changes: 1 addition & 1 deletion include/linux/blkdev.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ enum rq_cmd_type_bits {
struct request {
struct list_head queuelist;
union {
struct call_single_data csd;
call_single_data_t csd;
unsigned long fifo_time;
};

Expand Down
11 changes: 11 additions & 0 deletions include/linux/cpumask.h
Original file line number Diff line number Diff line change
Expand Up @@ -307,6 +307,12 @@ static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}

static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
{
__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}


/**
* cpumask_clear_cpu - clear a cpu in a cpumask
* @cpu: cpu number (< nr_cpu_ids)
Expand All @@ -317,6 +323,11 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
}

static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
{
__clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
}

/**
* cpumask_test_cpu - test for a cpu in a cpumask
* @cpu: cpu number (< nr_cpu_ids)
Expand Down
4 changes: 2 additions & 2 deletions include/linux/netdevice.h
Original file line number Diff line number Diff line change
Expand Up @@ -2610,8 +2610,8 @@ struct softnet_data {
struct sk_buff *completion_queue;

#ifdef CONFIG_RPS
/* Elements below can be accessed between CPUs for RPS */
struct call_single_data csd ____cacheline_aligned_in_smp;
/* Elements below can be accessed between CPUs for RPS/RFS */
call_single_data_t csd ____cacheline_aligned_in_smp;
struct softnet_data *rps_ipi_next;
unsigned int cpu;
unsigned int input_queue_head;
Expand Down
8 changes: 6 additions & 2 deletions include/linux/smp.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,17 @@
#include <linux/llist.h>

typedef void (*smp_call_func_t)(void *info);
struct call_single_data {
struct __call_single_data {
struct llist_node llist;
smp_call_func_t func;
void *info;
unsigned int flags;
};

/* Use __aligned() to avoid to use 2 cache lines for 1 csd */
typedef struct __call_single_data call_single_data_t
__aligned(sizeof(struct __call_single_data));

/* total number of cpus in this system (may exceed NR_CPUS) */
extern unsigned int total_cpus;

Expand Down Expand Up @@ -48,7 +52,7 @@ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
smp_call_func_t func, void *info, bool wait,
gfp_t gfp_flags);

int smp_call_function_single_async(int cpu, struct call_single_data *csd);
int smp_call_function_single_async(int cpu, call_single_data_t *csd);

#ifdef CONFIG_SMP

Expand Down
Loading

0 comments on commit 1e6f159

Please sign in to comment.