}
EXPORT_SYMBOL(taskq_wait);
-int
-taskq_member(taskq_t *tq, void *t)
+static int
+taskq_member_impl(taskq_t *tq, void *t)
{
struct list_head *l;
taskq_thread_t *tqt;
ASSERT(tq);
ASSERT(t);
+ ASSERT(spin_is_locked(&tq->tq_lock));
- spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
list_for_each(l, &tq->tq_thread_list) {
tqt = list_entry(l, taskq_thread_t, tqt_thread_list);
if (tqt->tqt_thread == (struct task_struct *)t) {
break;
}
}
+ return (found);
+}
+
+int
+taskq_member(taskq_t *tq, void *t)
+{
+ int found;
+
+ spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+ found = taskq_member_impl(tq, t);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
return (found);
}
EXPORT_SYMBOL(taskq_cancel_id);
+static int taskq_thread_spawn(taskq_t *tq, int seq_tasks);
+
taskqid_t
taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
{
wake_up(&tq->tq_work_waitq);
out:
+ /* Spawn additional taskq threads if required. */
+ if (tq->tq_nactive == tq->tq_nthreads &&
+ taskq_member_impl(tq, current))
+ (void) taskq_thread_spawn(tq, spl_taskq_thread_sequential + 1);
+
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
return (rc);
}
spin_unlock(&t->tqent_lock);
out:
+ /* Spawn additional taskq threads if required. */
+ if (tq->tq_nactive == tq->tq_nthreads &&
+ taskq_member_impl(tq, current))
+ (void) taskq_thread_spawn(tq, spl_taskq_thread_sequential + 1);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
return (rc);
}
wake_up(&tq->tq_work_waitq);
out:
+ /* Spawn additional taskq threads if required. */
+ if (tq->tq_nactive == tq->tq_nthreads &&
+ taskq_member_impl(tq, current))
+ (void) taskq_thread_spawn(tq, spl_taskq_thread_sequential + 1);
spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
}
EXPORT_SYMBOL(taskq_dispatch_ent);