From 703371d8c734bc2cc6350f1bca014f08245dcc69 Mon Sep 17 00:00:00 2001 From: Andrey Vesnovaty Date: Wed, 28 Aug 2013 05:09:25 +0300 Subject: [PATCH] Evenly distribute the taskq threads across available CPUs The problem is described in commit aeeb4e0c0ae75b99ebbaa3056f0afc8e12949532. However, instead of disabling the binding to CPU altogether we just keep the last CPU index across calls to taskq_create() and thus achieve even distribution of the taskq threads across all available CPUs. The implementation based on assumption that task queues initialization performed in serial manner. Signed-off-by: Andrey Vesnovaty Signed-off-by: Andrey Vesnovaty Signed-off-by: Brian Behlendorf Closes #336 --- man/man5/spl-module-parameters.5 | 12 ++++++++++++ module/spl/spl-taskq.c | 9 +++++++++ 2 files changed, 21 insertions(+) diff --git a/man/man5/spl-module-parameters.5 b/man/man5/spl-module-parameters.5 index 3c134f775..9b351762c 100644 --- a/man/man5/spl-module-parameters.5 +++ b/man/man5/spl-module-parameters.5 @@ -124,3 +124,15 @@ Spin a maximum of N times to acquire lock .sp .ne -4 Default value: \fB0\fR. +.RE + +.sp +.ne 2 +.na +\fBspl_taskq_thread_bind\fR (int) +.ad +.RS 12n +Bind taskq thread to CPU +.sp +Default value: \fB0\fR. +.RE diff --git a/module/spl/spl-taskq.c b/module/spl/spl-taskq.c index 48feb1d22..0cb2ceeaf 100644 --- a/module/spl/spl-taskq.c +++ b/module/spl/spl-taskq.c @@ -34,6 +34,10 @@ #define SS_DEBUG_SUBSYS SS_TASKQ +int spl_taskq_thread_bind = 0; +module_param(spl_taskq_thread_bind, int, 0644); +MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default"); + /* Global system-wide dynamic task queue available for all consumers */ taskq_t *system_taskq; EXPORT_SYMBOL(system_taskq); @@ -781,6 +785,7 @@ taskq_t * taskq_create(const char *name, int nthreads, pri_t pri, int minalloc, int maxalloc, uint_t flags) { + static int last_used_cpu = 0; taskq_t *tq; taskq_thread_t *tqt; int rc = 0, i, j = 0; @@ -843,6 +848,10 @@ taskq_create(const char *name, int nthreads, pri_t pri, "%s/%d", name, i); if (tqt->tqt_thread) { list_add(&tqt->tqt_thread_list, &tq->tq_thread_list); + if (spl_taskq_thread_bind) { + last_used_cpu = (last_used_cpu + 1) % num_online_cpus(); + kthread_bind(tqt->tqt_thread, last_used_cpu); + } set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(pri)); wake_up_process(tqt->tqt_thread); j++; -- 2.40.0