]> granicus.if.org Git - zfs/commitdiff
Evenly distribute the taskq threads across available CPUs
authorAndrey Vesnovaty <andrey.vesnovaty@gmail.com>
Wed, 28 Aug 2013 02:09:25 +0000 (05:09 +0300)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Fri, 25 Apr 2014 22:29:18 +0000 (15:29 -0700)
The problem is described in commit aeeb4e0c0ae75b99ebbaa3056f0afc8e12949532.
However, instead of disabling the binding to CPU altogether we just keep the
last CPU index across calls to taskq_create() and thus achieve even
distribution of the taskq threads across all available CPUs.

The implementation based on assumption that task queues initialization
performed in serial manner.

Signed-off-by: Andrey Vesnovaty <andrey.vesnovaty@gmail.com>
Signed-off-by: Andrey Vesnovaty <andreyv@infinidat.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #336

man/man5/spl-module-parameters.5
module/spl/spl-taskq.c

index 3c134f7757ad349da89dd43591438d7df2020917..9b351762cbc9281e0bba466a8541456489e765f9 100644 (file)
@@ -124,3 +124,15 @@ Spin a maximum of N times to acquire lock
 .sp
 .ne -4
 Default value: \fB0\fR.
+.RE
+
+.sp
+.ne 2
+.na
+\fBspl_taskq_thread_bind\fR (int)
+.ad
+.RS 12n
+Bind taskq thread to CPU
+.sp
+Default value: \fB0\fR.
+.RE
index 48feb1d220eb12d41d990ab00d9b224d90955eed..0cb2ceeaf15f0818b36ca520e4a75c90318d5300 100644 (file)
 
 #define SS_DEBUG_SUBSYS SS_TASKQ
 
+int spl_taskq_thread_bind = 0;
+module_param(spl_taskq_thread_bind, int, 0644);
+MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default");
+
 /* Global system-wide dynamic task queue available for all consumers */
 taskq_t *system_taskq;
 EXPORT_SYMBOL(system_taskq);
@@ -781,6 +785,7 @@ taskq_t *
 taskq_create(const char *name, int nthreads, pri_t pri,
     int minalloc, int maxalloc, uint_t flags)
 {
+       static int last_used_cpu = 0;
        taskq_t *tq;
        taskq_thread_t *tqt;
        int rc = 0, i, j = 0;
@@ -843,6 +848,10 @@ taskq_create(const char *name, int nthreads, pri_t pri,
                    "%s/%d", name, i);
                if (tqt->tqt_thread) {
                        list_add(&tqt->tqt_thread_list, &tq->tq_thread_list);
+                       if (spl_taskq_thread_bind) {
+                               last_used_cpu = (last_used_cpu + 1) % num_online_cpus();
+                               kthread_bind(tqt->tqt_thread, last_used_cpu);
+                       }
                        set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(pri));
                        wake_up_process(tqt->tqt_thread);
                        j++;