From e7e5f78e7bf6dc86337483f4d9f01becc017d185 Mon Sep 17 00:00:00 2001
From: Prakash Surya <surya1@llnl.gov>
Date: Fri, 16 Dec 2011 09:44:31 -0800
Subject: [PATCH] Swap taskq_ent_t with taskqid_t in taskq_thread_t

The taskq_t's active thread list is sorted based on its
tqt_ent->tqent_id field. The list is kept sorted solely by inserting
new taskq_thread_t's in their correct sorted location; no other
means is used. This means that once inserted, if a taskq_thread_t's
tqt_ent->tqent_id field changes, the list runs the risk of no
longer being sorted.

Prior to the introduction of the taskq_dispatch_prealloc() interface,
this was not a problem as a taskq_ent_t actively being serviced under
the old interface should always have a static tqent_id field. Thus,
once the taskq_thread_t is added to the taskq_t's active thread list,
the taskq_thread_t's tqt_ent->tqent_id field would remain constant.

Now, this is no longer the case. Currently, if using the
taskq_dispatch_prealloc() interface, any given taskq_ent_t actively
being serviced _may_ have its tqent_id value incremented. This happens
when the preallocated taskq_ent_t structure is recursively dispatched.
Thus, a taskq_thread_t could potentially have its tqt_ent->tqent_id
field silently modified from under its feet. If this were to happen
to a taskq_thread_t on a taskq_t's active thread list, this would
compromise the integrity of the order of the list (as the list
_may_ no longer be sorted).

To get around this, the taskq_thread_t's taskq_ent_t pointer was
replaced with its own static copy of the tqent_id. So, as a taskq_ent_t
is pulled off of the taskq_t's pending list, a static copy of its
tqent_id is made and this copy is used to sort the active thread
list. Using a static copy is key in ensuring the integrity of the
order of the active thread list. Even if the underlying taskq_ent_t
is recursively dispatched (as has its tqent_id modified), this
static copy stored inside the taskq_thread_t will remain constant.

Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #71
---
 include/sys/taskq.h    |  2 +-
 module/spl/spl-taskq.c | 18 ++++++++----------
 2 files changed, 9 insertions(+), 11 deletions(-)

diff --git a/include/sys/taskq.h b/include/sys/taskq.h
index 54d869a..0a71433 100644
--- a/include/sys/taskq.h
+++ b/include/sys/taskq.h
@@ -96,7 +96,7 @@ typedef struct taskq_thread {
 	struct list_head       tqt_active_list;
 	struct task_struct     *tqt_thread;
 	taskq_t                *tqt_tq;
-	taskq_ent_t            *tqt_ent;
+	taskqid_t              tqt_id;
 } taskq_thread_t;
 
 /* Global system-wide dynamic task queue available for all consumers */
diff --git a/module/spl/spl-taskq.c b/module/spl/spl-taskq.c
index b2b0e6c..ccb713c 100644
--- a/module/spl/spl-taskq.c
+++ b/module/spl/spl-taskq.c
@@ -393,8 +393,8 @@ taskq_lowest_id(taskq_t *tq)
 	if (!list_empty(&tq->tq_active_list)) {
 		tqt = list_entry(tq->tq_active_list.next, taskq_thread_t,
 		                 tqt_active_list);
-		ASSERT(tqt->tqt_ent != NULL);
-		lowest_id = MIN(lowest_id, tqt->tqt_ent->tqent_id);
+		ASSERT(tqt->tqt_id != 0);
+		lowest_id = MIN(lowest_id, tqt->tqt_id);
 	}
 
 	SRETURN(lowest_id);
@@ -417,7 +417,7 @@ taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt)
 
 	list_for_each_prev(l, &tq->tq_active_list) {
 		w = list_entry(l, taskq_thread_t, tqt_active_list);
-		if (w->tqt_ent->tqent_id < tqt->tqt_ent->tqent_id) {
+		if (w->tqt_id < tqt->tqt_id) {
 			list_add(&tqt->tqt_active_list, l);
 			break;
 		}
@@ -433,7 +433,6 @@ taskq_thread(void *args)
 {
         DECLARE_WAITQUEUE(wait, current);
         sigset_t blocked;
-	taskqid_t id;
 	taskq_thread_t *tqt = args;
         taskq_t *tq;
         taskq_ent_t *t;
@@ -484,8 +483,7 @@ taskq_thread(void *args)
 			/* In order to support recursively dispatching a
 			 * preallocated taskq_ent_t, tqent_id must be
 			 * stored prior to executing tqent_func. */
-			id = t->tqent_id;
-			tqt->tqt_ent = t;
+			tqt->tqt_id = t->tqent_id;
 			taskq_insert_in_order(tq, tqt);
                         tq->tq_nactive++;
 			spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
@@ -496,16 +494,16 @@ taskq_thread(void *args)
 			spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
                         tq->tq_nactive--;
 			list_del_init(&tqt->tqt_active_list);
-			tqt->tqt_ent = NULL;
                         task_done(tq, t);
 
 			/* When the current lowest outstanding taskqid is
 			 * done calculate the new lowest outstanding id */
-			if (tq->tq_lowest_id == id) {
+			if (tq->tq_lowest_id == tqt->tqt_id) {
 				tq->tq_lowest_id = taskq_lowest_id(tq);
-				ASSERT(tq->tq_lowest_id > id);
+				ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id);
 			}
 
+			tqt->tqt_id = 0;
                         wake_up_all(&tq->tq_wait_waitq);
 		}
 
@@ -582,7 +580,7 @@ __taskq_create(const char *name, int nthreads, pri_t pri,
 		INIT_LIST_HEAD(&tqt->tqt_thread_list);
 		INIT_LIST_HEAD(&tqt->tqt_active_list);
 		tqt->tqt_tq = tq;
-		tqt->tqt_ent = NULL;
+		tqt->tqt_id = 0;
 
 		tqt->tqt_thread = kthread_create(taskq_thread, tqt,
 		                                 "%s/%d", name, i);
-- 
2.40.0