Skip to content

Commit

Permalink
Backport memory and CPU hotplug to 6.0.6.0 (openzfs#242)
Browse files Browse the repository at this point in the history
ZFS currently doesn't react to hotplugging cpu or memory into the
system in any way. This patch changes that by adding logic to the ARC
that allows the system to take advantage of new memory that is added
for caching purposes. It also adds logic to the taskq infrastructure
to support dynamically expanding the number of threads allocated to a
taskq.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Co-authored-by: Matthew Ahrens <matthew.ahrens@delphix.com>
Co-authored-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes openzfs#11212
  • Loading branch information
pcd1193182 authored Dec 14, 2020
1 parent 1045699 commit 5592f89
Show file tree
Hide file tree
Showing 14 changed files with 290 additions and 36 deletions.
26 changes: 26 additions & 0 deletions config/kernel-hotplug.m4
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
dnl #
dnl # 4.6 API change
dnl # Added CPU hotplug APIs
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_CPU_HOTPLUG], [
ZFS_LINUX_TEST_SRC([cpu_hotplug], [
#include <linux/cpuhotplug.h>
],[
enum cpuhp_state state = CPUHP_ONLINE;
int (*fp)(unsigned int, struct hlist_node *) = NULL;
cpuhp_state_add_instance_nocalls(0, (struct hlist_node *)NULL);
cpuhp_state_remove_instance_nocalls(0, (struct hlist_node *)NULL);
cpuhp_setup_state_multi(state, "", fp, fp);
cpuhp_remove_multi_state(0);
])
])

AC_DEFUN([ZFS_AC_KERNEL_CPU_HOTPLUG], [
AC_MSG_CHECKING([whether CPU hotplug APIs exist])
ZFS_LINUX_TEST_RESULT([cpu_hotplug], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_CPU_HOTPLUG, 1, [yes])
],[
AC_MSG_RESULT(no)
])
])
2 changes: 2 additions & 0 deletions config/kernel.m4
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [
ZFS_AC_KERNEL_SRC_TOTALHIGH_PAGES
ZFS_AC_KERNEL_SRC_KSTRTOUL
ZFS_AC_KERNEL_SRC_PERCPU
ZFS_AC_KERNEL_SRC_CPU_HOTPLUG
AC_MSG_CHECKING([for available kernel interfaces])
ZFS_LINUX_TEST_COMPILE_ALL([kabi])
Expand Down Expand Up @@ -218,6 +219,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [
ZFS_AC_KERNEL_TOTALHIGH_PAGES
ZFS_AC_KERNEL_KSTRTOUL
ZFS_AC_KERNEL_PERCPU
ZFS_AC_KERNEL_CPU_HOTPLUG
])

dnl #
Expand Down
5 changes: 5 additions & 0 deletions include/os/linux/spl/sys/taskq.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,8 @@ typedef struct taskq {
int tq_nthreads; /* # of existing threads */
int tq_nspawn; /* # of threads being spawned */
int tq_maxthreads; /* # of threads maximum */
/* If PERCPU flag is set, percent of NCPUs to have as threads */
int tq_cpu_pct;
int tq_pri; /* priority */
int tq_minalloc; /* min taskq_ent_t pool size */
int tq_maxalloc; /* max taskq_ent_t pool size */
Expand All @@ -100,6 +102,9 @@ typedef struct taskq {
spl_wait_queue_head_t tq_work_waitq; /* new work waitq */
spl_wait_queue_head_t tq_wait_waitq; /* wait waitq */
tq_lock_role_t tq_lock_class; /* class when taking tq_lock */
/* list node for the cpu hotplug callback */
struct hlist_node tq_hp_cb_node;
boolean_t tq_hp_support;
} taskq_t;

typedef struct taskq_ent {
Expand Down
1 change: 1 addition & 0 deletions include/sys/arc.h
Original file line number Diff line number Diff line change
Expand Up @@ -305,6 +305,7 @@ int arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg);
uint64_t arc_all_memory(void);
uint64_t arc_default_max(uint64_t min, uint64_t allmem);
uint64_t arc_target_bytes(void);
void arc_set_limits(uint64_t);
void arc_init(void);
void arc_fini(void);

Expand Down
2 changes: 2 additions & 0 deletions include/sys/arc_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -926,6 +926,8 @@ extern int arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg);
extern uint64_t arc_free_memory(void);
extern int64_t arc_available_memory(void);
extern void arc_tuning_update(boolean_t);
extern void arc_register_hotplug(void);
extern void arc_unregister_hotplug(void);

extern int param_set_arc_long(ZFS_MODULE_PARAM_ARGS);
extern int param_set_arc_int(ZFS_MODULE_PARAM_ARGS);
Expand Down
10 changes: 10 additions & 0 deletions module/os/freebsd/zfs/arc_os.c
Original file line number Diff line number Diff line change
Expand Up @@ -243,3 +243,13 @@ arc_lowmem_fini(void)
if (arc_event_lowmem != NULL)
EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem);
}

void
arc_register_hotplug(void)
{
}

void
arc_unregister_hotplug(void)
{
}
132 changes: 127 additions & 5 deletions module/os/linux/spl/spl-taskq.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,17 @@
#include <sys/kmem.h>
#include <sys/tsd.h>
#include <sys/trace_spl.h>
#ifdef HAVE_CPU_HOTPLUG
#include <linux/cpuhotplug.h>
#endif

int spl_taskq_thread_bind = 0;
module_param(spl_taskq_thread_bind, int, 0644);
MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default");


int spl_taskq_thread_dynamic = 1;
module_param(spl_taskq_thread_dynamic, int, 0644);
module_param(spl_taskq_thread_dynamic, int, 0444);
MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads");

int spl_taskq_thread_priority = 1;
Expand All @@ -60,6 +63,11 @@ EXPORT_SYMBOL(system_delay_taskq);
static taskq_t *dynamic_taskq;
static taskq_thread_t *taskq_thread_create(taskq_t *);

#ifdef HAVE_CPU_HOTPLUG
/* Multi-callback id for cpu hotplugging. */
static int spl_taskq_cpuhp_state;
#endif

/* List of all taskqs */
LIST_HEAD(tq_list);
struct rw_semaphore tq_list_sem;
Expand Down Expand Up @@ -1025,13 +1033,14 @@ taskq_thread_create(taskq_t *tq)
}

taskq_t *
taskq_create(const char *name, int nthreads, pri_t pri,
taskq_create(const char *name, int threads_arg, pri_t pri,
int minalloc, int maxalloc, uint_t flags)
{
taskq_t *tq;
taskq_thread_t *tqt;
int count = 0, rc = 0, i;
unsigned long irqflags;
int nthreads = threads_arg;

ASSERT(name != NULL);
ASSERT(minalloc >= 0);
Expand All @@ -1042,15 +1051,27 @@ taskq_create(const char *name, int nthreads, pri_t pri,
if (flags & TASKQ_THREADS_CPU_PCT) {
ASSERT(nthreads <= 100);
ASSERT(nthreads >= 0);
nthreads = MIN(nthreads, 100);
nthreads = MIN(threads_arg, 100);
nthreads = MAX(nthreads, 0);
nthreads = MAX((num_online_cpus() * nthreads) / 100, 1);
nthreads = MAX((num_online_cpus() * nthreads) /100, 1);
}

tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE);
if (tq == NULL)
return (NULL);

tq->tq_hp_support = B_FALSE;
#ifdef HAVE_CPU_HOTPLUG
if (flags & TASKQ_THREADS_CPU_PCT) {
tq->tq_hp_support = B_TRUE;
if (cpuhp_state_add_instance_nocalls(spl_taskq_cpuhp_state,
&tq->tq_hp_cb_node) != 0) {
kmem_free(tq, sizeof (*tq));
return (NULL);
}
}
#endif

spin_lock_init(&tq->tq_lock);
INIT_LIST_HEAD(&tq->tq_thread_list);
INIT_LIST_HEAD(&tq->tq_active_list);
Expand All @@ -1059,6 +1080,7 @@ taskq_create(const char *name, int nthreads, pri_t pri,
tq->tq_nthreads = 0;
tq->tq_nspawn = 0;
tq->tq_maxthreads = nthreads;
tq->tq_cpu_pct = threads_arg;
tq->tq_pri = pri;
tq->tq_minalloc = minalloc;
tq->tq_maxalloc = maxalloc;
Expand Down Expand Up @@ -1132,6 +1154,12 @@ taskq_destroy(taskq_t *tq)
tq->tq_flags &= ~TASKQ_ACTIVE;
spin_unlock_irqrestore(&tq->tq_lock, flags);

#ifdef HAVE_CPU_HOTPLUG
if (tq->tq_hp_support) {
VERIFY0(cpuhp_state_remove_instance_nocalls(
spl_taskq_cpuhp_state, &tq->tq_hp_cb_node));
}
#endif
/*
* When TASKQ_ACTIVE is clear new tasks may not be added nor may
* new worker threads be spawned for dynamic taskq.
Expand Down Expand Up @@ -1199,7 +1227,6 @@ taskq_destroy(taskq_t *tq)
}
EXPORT_SYMBOL(taskq_destroy);


static unsigned int spl_taskq_kick = 0;

/*
Expand Down Expand Up @@ -1256,12 +1283,96 @@ module_param_call(spl_taskq_kick, param_set_taskq_kick, param_get_uint,
MODULE_PARM_DESC(spl_taskq_kick,
"Write nonzero to kick stuck taskqs to spawn more threads");

#ifdef HAVE_CPU_HOTPLUG
/*
* This callback will be called exactly once for each core that comes online,
* for each dynamic taskq. We attempt to expand taskqs that have
* TASKQ_THREADS_CPU_PCT set. We need to redo the percentage calculation every
* time, to correctly determine whether or not to add a thread.
*/
static int
spl_taskq_expand(unsigned int cpu, struct hlist_node *node)
{
taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node);
unsigned long flags;
int err = 0;

ASSERT(tq);
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);

if (!(tq->tq_flags & TASKQ_ACTIVE))
goto out;

ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
int nthreads = MIN(tq->tq_cpu_pct, 100);
nthreads = MAX(((num_online_cpus() + 1) * nthreads) / 100, 1);
tq->tq_maxthreads = nthreads;

if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) &&
tq->tq_maxthreads > tq->tq_nthreads) {
ASSERT3U(tq->tq_maxthreads, ==, tq->tq_nthreads + 1);
taskq_thread_t *tqt = taskq_thread_create(tq);
if (tqt == NULL)
err = -1;
}

out:
spin_unlock_irqrestore(&tq->tq_lock, flags);
return (err);
}

/*
* While we don't support offlining CPUs, it is possible that CPUs will fail
* to online successfully. We do need to be able to handle this case
* gracefully.
*/
static int
spl_taskq_prepare_down(unsigned int cpu, struct hlist_node *node)
{
taskq_t *tq = list_entry(node, taskq_t, tq_hp_cb_node);
unsigned long flags;

ASSERT(tq);
spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);

if (!(tq->tq_flags & TASKQ_ACTIVE))
goto out;

ASSERT(tq->tq_flags & TASKQ_THREADS_CPU_PCT);
int nthreads = MIN(tq->tq_cpu_pct, 100);
nthreads = MAX(((num_online_cpus()) * nthreads) / 100, 1);
tq->tq_maxthreads = nthreads;

if (!((tq->tq_flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic) &&
tq->tq_maxthreads < tq->tq_nthreads) {
ASSERT3U(tq->tq_maxthreads, ==, tq->tq_nthreads - 1);
taskq_thread_t *tqt = list_entry(tq->tq_thread_list.next,
taskq_thread_t, tqt_thread_list);
struct task_struct *thread = tqt->tqt_thread;
spin_unlock_irqrestore(&tq->tq_lock, flags);

kthread_stop(thread);

return (0);
}

out:
spin_unlock_irqrestore(&tq->tq_lock, flags);
return (0);
}
#endif

int
spl_taskq_init(void)
{
init_rwsem(&tq_list_sem);
tsd_create(&taskq_tsd, NULL);

#ifdef HAVE_CPU_HOTPLUG
spl_taskq_cpuhp_state = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
"fs/spl_taskq:online", spl_taskq_expand, spl_taskq_prepare_down);
#endif

system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64),
maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
if (system_taskq == NULL)
Expand All @@ -1270,13 +1381,19 @@ spl_taskq_init(void)
system_delay_taskq = taskq_create("spl_delay_taskq", MAX(boot_ncpus, 4),
maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
if (system_delay_taskq == NULL) {
#ifdef HAVE_CPU_HOTPLUG
cpuhp_remove_multi_state(spl_taskq_cpuhp_state);
#endif
taskq_destroy(system_taskq);
return (1);
}

dynamic_taskq = taskq_create("spl_dynamic_taskq", 1,
maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE);
if (dynamic_taskq == NULL) {
#ifdef HAVE_CPU_HOTPLUG
cpuhp_remove_multi_state(spl_taskq_cpuhp_state);
#endif
taskq_destroy(system_taskq);
taskq_destroy(system_delay_taskq);
return (1);
Expand Down Expand Up @@ -1305,4 +1422,9 @@ spl_taskq_fini(void)
system_taskq = NULL;

tsd_destroy(&taskq_tsd);

#ifdef HAVE_CPU_HOTPLUG
cpuhp_remove_multi_state(spl_taskq_cpuhp_state);
spl_taskq_cpuhp_state = 0;
#endif
}
Loading

0 comments on commit 5592f89

Please sign in to comment.