]> granicus.if.org Git - zfs/commitdiff
Implement taskq_dispatch_prealloc() interface
authorPrakash Surya <surya1@llnl.gov>
Tue, 6 Dec 2011 18:04:51 +0000 (10:04 -0800)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Wed, 14 Dec 2011 00:10:57 +0000 (16:10 -0800)
This patch implements the taskq_dispatch_prealloc() interface which
was introduced by the following illumos-gate commit.  It allows for
a preallocated taskq_ent_t to be used when dispatching items to a
taskq.  This eliminates a memory allocation which helps minimize
lock contention in the taskq when dispatching functions.

    commit 5aeb94743e3be0c51e86f73096334611ae3a058e
    Author: Garrett D'Amore <garrett@nexenta.com>
    Date:   Wed Jul 27 07:13:44 2011 -0700

    734 taskq_dispatch_prealloc() desired
    943 zio_interrupt ends up calling taskq_dispatch with TQ_SLEEP

Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #65

include/sys/taskq.h
module/spl/spl-taskq.c

index 4ea29cb3bb3b21c3ef31f8cce8c8dbf6729508d7..54d869afec852cd3da51ca58db40e74df13e2220 100644 (file)
@@ -51,8 +51,11 @@ typedef struct taskq_ent {
         taskqid_t               tqent_id;
         task_func_t             *tqent_func;
         void                    *tqent_arg;
+        uintptr_t               tqent_flags;
 } taskq_ent_t;
 
+#define TQENT_FLAG_PREALLOC     0x1
+
 /*
  * Flags for taskq_dispatch. TQ_SLEEP/TQ_NOSLEEP should be same as
  * KM_SLEEP/KM_NOSLEEP.  TQ_NOQUEUE/TQ_NOALLOC are set particularly
@@ -100,6 +103,9 @@ typedef struct taskq_thread {
 extern taskq_t *system_taskq;
 
 extern taskqid_t __taskq_dispatch(taskq_t *, task_func_t, void *, uint_t);
+extern void __taskq_dispatch_ent(taskq_t *, task_func_t, void *, uint_t, taskq_ent_t *);
+extern int __taskq_empty_ent(taskq_ent_t *);
+extern void __taskq_init_ent(taskq_ent_t *);
 extern taskq_t *__taskq_create(const char *, int, pri_t, int, int, uint_t);
 extern void __taskq_destroy(taskq_t *);
 extern void __taskq_wait_id(taskq_t *, taskqid_t);
@@ -113,6 +119,9 @@ void spl_taskq_fini(void);
 #define taskq_wait_id(tq, id)              __taskq_wait_id(tq, id)
 #define taskq_wait(tq)                     __taskq_wait(tq)
 #define taskq_dispatch(tq, f, p, fl)       __taskq_dispatch(tq, f, p, fl)
+#define taskq_dispatch_ent(tq, f, p, fl, t) __taskq_dispatch_ent(tq, f, p, fl, t)
+#define taskq_empty_ent(t)                 __taskq_empty_ent(t)
+#define taskq_init_ent(t)                  __taskq_init_ent(t)
 #define taskq_create(n, th, p, mi, ma, fl) __taskq_create(n, th, p, mi, ma, fl)
 #define taskq_create_proc(n, th, p, mi, ma, pr, fl)    \
        __taskq_create(n, th, p, mi, ma, fl)
index 5c22544b8b14c53b9234da28ba0af5e59e463ae6..b2b0e6ca86e2d59b44c84850a7b0227833ac4419 100644 (file)
@@ -57,6 +57,9 @@ retry:
         /* Acquire taskq_ent_t's from free list if available */
         if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) {
                 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
+
+                ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
+
                 list_del_init(&t->tqent_list);
                 SRETURN(t);
         }
@@ -93,11 +96,7 @@ retry:
         spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
 
         if (t) {
-                spin_lock_init(&t->tqent_lock);
-                INIT_LIST_HEAD(&t->tqent_list);
-                t->tqent_id = 0;
-                t->tqent_func = NULL;
-                t->tqent_arg = NULL;
+                taskq_init_ent(t);
                 tq->tq_nalloc++;
         }
 
@@ -136,12 +135,18 @@ task_done(taskq_t *tq, taskq_ent_t *t)
        ASSERT(t);
        ASSERT(spin_is_locked(&tq->tq_lock));
 
+       /* For prealloc'd tasks, we don't free anything. */
+       if ((!(tq->tq_flags & TASKQ_DYNAMIC)) &&
+           (t->tqent_flags & TQENT_FLAG_PREALLOC))
+               return;
+
        list_del_init(&t->tqent_list);
 
         if (tq->tq_nalloc <= tq->tq_minalloc) {
                t->tqent_id = 0;
                t->tqent_func = NULL;
                t->tqent_arg = NULL;
+               t->tqent_flags = 0;
                 list_add_tail(&t->tqent_list, &tq->tq_free_list);
        } else {
                task_free(tq, t);
@@ -281,6 +286,9 @@ __taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
        tq->tq_next_id++;
         t->tqent_func = func;
         t->tqent_arg = arg;
+
+       ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
+
        spin_unlock(&t->tqent_lock);
 
        wake_up(&tq->tq_work_waitq);
@@ -289,6 +297,72 @@ out:
        SRETURN(rc);
 }
 EXPORT_SYMBOL(__taskq_dispatch);
+
+void
+__taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
+   taskq_ent_t *t)
+{
+       SENTRY;
+
+       ASSERT(tq);
+       ASSERT(func);
+       ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC));
+
+       spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
+
+       /* Taskq being destroyed and all tasks drained */
+       if (!(tq->tq_flags & TQ_ACTIVE)) {
+               t->tqent_id = 0;
+               goto out;
+       }
+
+       spin_lock(&t->tqent_lock);
+
+       /*
+        * Mark it as a prealloc'd task.  This is important
+        * to ensure that we don't free it later.
+        */
+       t->tqent_flags |= TQENT_FLAG_PREALLOC;
+
+       /* Queue to the priority list instead of the pending list */
+       if (flags & TQ_FRONT)
+               list_add_tail(&t->tqent_list, &tq->tq_prio_list);
+       else
+               list_add_tail(&t->tqent_list, &tq->tq_pend_list);
+
+       t->tqent_id = tq->tq_next_id;
+       tq->tq_next_id++;
+       t->tqent_func = func;
+       t->tqent_arg = arg;
+
+       spin_unlock(&t->tqent_lock);
+
+       wake_up(&tq->tq_work_waitq);
+out:
+       spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
+       SEXIT;
+}
+EXPORT_SYMBOL(__taskq_dispatch_ent);
+
+int
+__taskq_empty_ent(taskq_ent_t *t)
+{
+       return list_empty(&t->tqent_list);
+}
+EXPORT_SYMBOL(__taskq_empty_ent);
+
+void
+__taskq_init_ent(taskq_ent_t *t)
+{
+       spin_lock_init(&t->tqent_lock);
+       INIT_LIST_HEAD(&t->tqent_list);
+       t->tqent_id = 0;
+       t->tqent_func = NULL;
+       t->tqent_arg = NULL;
+       t->tqent_flags = 0;
+}
+EXPORT_SYMBOL(__taskq_init_ent);
+
 /*
  * Returns the lowest incomplete taskqid_t.  The taskqid_t may
  * be queued on the pending list, on the priority list,  or on
@@ -407,6 +481,10 @@ taskq_thread(void *args)
                if (pend_list) {
                         t = list_entry(pend_list->next, taskq_ent_t, tqent_list);
                         list_del_init(&t->tqent_list);
+                       /* In order to support recursively dispatching a
+                        * preallocated taskq_ent_t, tqent_id must be
+                        * stored prior to executing tqent_func. */
+                       id = t->tqent_id;
                        tqt->tqt_ent = t;
                        taskq_insert_in_order(tq, tqt);
                         tq->tq_nactive++;
@@ -419,7 +497,6 @@ taskq_thread(void *args)
                         tq->tq_nactive--;
                        list_del_init(&tqt->tqt_active_list);
                        tqt->tqt_ent = NULL;
-                       id = t->tqent_id;
                         task_done(tq, t);
 
                        /* When the current lowest outstanding taskqid is
@@ -570,6 +647,9 @@ __taskq_destroy(taskq_t *tq)
 
         while (!list_empty(&tq->tq_free_list)) {
                t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
+
+               ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
+
                list_del_init(&t->tqent_list);
                 task_free(tq, t);
         }