]> granicus.if.org Git - spl/commitdiff
Create a new thread during recursive taskq dispatch if necessary
authorTim Chase <tim@chase2k.com>
Thu, 27 Aug 2015 16:13:20 +0000 (11:13 -0500)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Mon, 31 Aug 2015 22:52:06 +0000 (15:52 -0700)
When dynamic taskq is enabled and all threads for a taskq are occupied,
a recursive dispatch can cause a deadlock if calling thread depends on
the recursively-dispatched thread for its return condition.

This patch attempts to create a new thread for recursive dispatch when
none are available.

Signed-off-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #472

module/spl/spl-taskq.c

index 7a756af37aed97ea9e10d48e677e926e05436f24..2202aced78b8c30a8ee79b53ef6c8f7879948101 100644 (file)
@@ -53,6 +53,7 @@ EXPORT_SYMBOL(system_taskq);
 /* Private dedicated taskq for creating new taskq threads on demand. */
 static taskq_t *dynamic_taskq;
 static taskq_thread_t *taskq_thread_create(taskq_t *);
+static int taskq_thread_spawn(taskq_t *tq, int seq_tasks);
 
 static int
 task_km_flags(uint_t flags)
@@ -533,6 +534,7 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
 {
        taskq_ent_t *t;
        taskqid_t rc = 0;
+       boolean_t threadlimit = B_FALSE;
 
        ASSERT(tq);
        ASSERT(func);
@@ -574,7 +576,13 @@ taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
 
        wake_up(&tq->tq_work_waitq);
 out:
+       threadlimit = (tq->tq_nactive == tq->tq_nthreads);
        spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+
+       /* Spawn additional taskq threads if required. */
+       if (threadlimit && taskq_member(tq, current))
+               (void) taskq_thread_spawn(tq, spl_taskq_thread_sequential + 1);
+
        return (rc);
 }
 EXPORT_SYMBOL(taskq_dispatch);
@@ -585,6 +593,7 @@ taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
 {
        taskqid_t rc = 0;
        taskq_ent_t *t;
+       boolean_t threadlimit = B_FALSE;
 
        ASSERT(tq);
        ASSERT(func);
@@ -617,7 +626,13 @@ taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
 
        spin_unlock(&t->tqent_lock);
 out:
+       threadlimit = (tq->tq_nactive == tq->tq_nthreads);
        spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+
+       /* Spawn additional taskq threads if required. */
+       if (threadlimit && taskq_member(tq, current))
+               (void) taskq_thread_spawn(tq, spl_taskq_thread_sequential + 1);
+
        return (rc);
 }
 EXPORT_SYMBOL(taskq_dispatch_delay);
@@ -626,6 +641,8 @@ void
 taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
    taskq_ent_t *t)
 {
+       boolean_t threadlimit = B_FALSE;
+
        ASSERT(tq);
        ASSERT(func);
 
@@ -661,7 +678,12 @@ taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
 
        wake_up(&tq->tq_work_waitq);
 out:
+       threadlimit = (tq->tq_nactive == tq->tq_nthreads);
        spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+
+       /* Spawn additional taskq threads if required. */
+       if (threadlimit && taskq_member(tq, current))
+               (void) taskq_thread_spawn(tq, spl_taskq_thread_sequential + 1);
 }
 EXPORT_SYMBOL(taskq_dispatch_ent);