]> granicus.if.org Git - zfs/blob - module/spl/spl-taskq.c
b362bef548ef434c978e8a7260004499c16a33b9
[zfs] / module / spl / spl-taskq.c
1 /*
2  *  Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3  *  Copyright (C) 2007 The Regents of the University of California.
4  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5  *  Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6  *  UCRL-CODE-235197
7  *
8  *  This file is part of the SPL, Solaris Porting Layer.
9  *  For details, see <http://zfsonlinux.org/>.
10  *
11  *  The SPL is free software; you can redistribute it and/or modify it
12  *  under the terms of the GNU General Public License as published by the
13  *  Free Software Foundation; either version 2 of the License, or (at your
14  *  option) any later version.
15  *
16  *  The SPL is distributed in the hope that it will be useful, but WITHOUT
17  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19  *  for more details.
20  *
21  *  You should have received a copy of the GNU General Public License along
22  *  with the SPL.  If not, see <http://www.gnu.org/licenses/>.
23  *
24  *  Solaris Porting Layer (SPL) Task Queue Implementation.
25  */
26
27 #include <sys/taskq.h>
28 #include <sys/kmem.h>
29 #include <sys/tsd.h>
30
31 int spl_taskq_thread_bind = 0;
32 module_param(spl_taskq_thread_bind, int, 0644);
33 MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default");
34
35
36 int spl_taskq_thread_dynamic = 1;
37 module_param(spl_taskq_thread_dynamic, int, 0644);
38 MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads");
39
40 int spl_taskq_thread_priority = 1;
41 module_param(spl_taskq_thread_priority, int, 0644);
42 MODULE_PARM_DESC(spl_taskq_thread_priority,
43         "Allow non-default priority for taskq threads");
44
45 int spl_taskq_thread_sequential = 4;
46 module_param(spl_taskq_thread_sequential, int, 0644);
47 MODULE_PARM_DESC(spl_taskq_thread_sequential,
48         "Create new taskq threads after N sequential tasks");
49
50 /* Global system-wide dynamic task queue available for all consumers */
51 taskq_t *system_taskq;
52 EXPORT_SYMBOL(system_taskq);
53
54 /* Private dedicated taskq for creating new taskq threads on demand. */
55 static taskq_t *dynamic_taskq;
56 static taskq_thread_t *taskq_thread_create(taskq_t *);
57
58 /* List of all taskqs */
59 LIST_HEAD(tq_list);
60 DECLARE_RWSEM(tq_list_sem);
61 static uint_t taskq_tsd;
62
63 static int
64 task_km_flags(uint_t flags)
65 {
66         if (flags & TQ_NOSLEEP)
67                 return (KM_NOSLEEP);
68
69         if (flags & TQ_PUSHPAGE)
70                 return (KM_PUSHPAGE);
71
72         return (KM_SLEEP);
73 }
74
75 /*
76  * taskq_find_by_name - Find the largest instance number of a named taskq.
77  */
78 static int
79 taskq_find_by_name(const char *name)
80 {
81         struct list_head *tql;
82         taskq_t *tq;
83
84         list_for_each_prev(tql, &tq_list) {
85                 tq = list_entry(tql, taskq_t, tq_taskqs);
86                 if (strcmp(name, tq->tq_name) == 0)
87                         return tq->tq_instance;
88         }
89         return (-1);
90 }
91
92 /*
93  * NOTE: Must be called with tq->tq_lock held, returns a list_t which
94  * is not attached to the free, work, or pending taskq lists.
95  */
96 static taskq_ent_t *
97 task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags)
98 {
99         taskq_ent_t *t;
100         int count = 0;
101
102         ASSERT(tq);
103         ASSERT(spin_is_locked(&tq->tq_lock));
104 retry:
105         /* Acquire taskq_ent_t's from free list if available */
106         if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) {
107                 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
108
109                 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
110                 ASSERT(!(t->tqent_flags & TQENT_FLAG_CANCEL));
111                 ASSERT(!timer_pending(&t->tqent_timer));
112
113                 list_del_init(&t->tqent_list);
114                 return (t);
115         }
116
117         /* Free list is empty and memory allocations are prohibited */
118         if (flags & TQ_NOALLOC)
119                 return (NULL);
120
121         /* Hit maximum taskq_ent_t pool size */
122         if (tq->tq_nalloc >= tq->tq_maxalloc) {
123                 if (flags & TQ_NOSLEEP)
124                         return (NULL);
125
126                 /*
127                  * Sleep periodically polling the free list for an available
128                  * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed
129                  * but we cannot block forever waiting for an taskq_ent_t to
130                  * show up in the free list, otherwise a deadlock can happen.
131                  *
132                  * Therefore, we need to allocate a new task even if the number
133                  * of allocated tasks is above tq->tq_maxalloc, but we still
134                  * end up delaying the task allocation by one second, thereby
135                  * throttling the task dispatch rate.
136                  */
137                 spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
138                 schedule_timeout(HZ / 100);
139                 spin_lock_irqsave_nested(&tq->tq_lock, *irqflags,
140                     tq->tq_lock_class);
141                 if (count < 100) {
142                         count++;
143                         goto retry;
144                 }
145         }
146
147         spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
148         t = kmem_alloc(sizeof (taskq_ent_t), task_km_flags(flags));
149         spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class);
150
151         if (t) {
152                 taskq_init_ent(t);
153                 tq->tq_nalloc++;
154         }
155
156         return (t);
157 }
158
159 /*
160  * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t
161  * to already be removed from the free, work, or pending taskq lists.
162  */
163 static void
164 task_free(taskq_t *tq, taskq_ent_t *t)
165 {
166         ASSERT(tq);
167         ASSERT(t);
168         ASSERT(spin_is_locked(&tq->tq_lock));
169         ASSERT(list_empty(&t->tqent_list));
170         ASSERT(!timer_pending(&t->tqent_timer));
171
172         kmem_free(t, sizeof (taskq_ent_t));
173         tq->tq_nalloc--;
174 }
175
176 /*
177  * NOTE: Must be called with tq->tq_lock held, either destroys the
178  * taskq_ent_t if too many exist or moves it to the free list for later use.
179  */
180 static void
181 task_done(taskq_t *tq, taskq_ent_t *t)
182 {
183         ASSERT(tq);
184         ASSERT(t);
185         ASSERT(spin_is_locked(&tq->tq_lock));
186
187         /* Wake tasks blocked in taskq_wait_id() */
188         wake_up_all(&t->tqent_waitq);
189
190         list_del_init(&t->tqent_list);
191
192         if (tq->tq_nalloc <= tq->tq_minalloc) {
193                 t->tqent_id = TASKQID_INVALID;
194                 t->tqent_func = NULL;
195                 t->tqent_arg = NULL;
196                 t->tqent_flags = 0;
197
198                 list_add_tail(&t->tqent_list, &tq->tq_free_list);
199         } else {
200                 task_free(tq, t);
201         }
202 }
203
204 /*
205  * When a delayed task timer expires remove it from the delay list and
206  * add it to the priority list in order for immediate processing.
207  */
208 static void
209 task_expire(unsigned long data)
210 {
211         taskq_ent_t *w, *t = (taskq_ent_t *)data;
212         taskq_t *tq = t->tqent_taskq;
213         struct list_head *l;
214         unsigned long flags;
215
216         spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
217
218         if (t->tqent_flags & TQENT_FLAG_CANCEL) {
219                 ASSERT(list_empty(&t->tqent_list));
220                 spin_unlock_irqrestore(&tq->tq_lock, flags);
221                 return;
222         }
223
224         t->tqent_birth = jiffies;
225         /*
226          * The priority list must be maintained in strict task id order
227          * from lowest to highest for lowest_id to be easily calculable.
228          */
229         list_del(&t->tqent_list);
230         list_for_each_prev(l, &tq->tq_prio_list) {
231                 w = list_entry(l, taskq_ent_t, tqent_list);
232                 if (w->tqent_id < t->tqent_id) {
233                         list_add(&t->tqent_list, l);
234                         break;
235                 }
236         }
237         if (l == &tq->tq_prio_list)
238                 list_add(&t->tqent_list, &tq->tq_prio_list);
239
240         spin_unlock_irqrestore(&tq->tq_lock, flags);
241
242         wake_up(&tq->tq_work_waitq);
243 }
244
245 /*
246  * Returns the lowest incomplete taskqid_t.  The taskqid_t may
247  * be queued on the pending list, on the priority list, on the
248  * delay list, or on the work list currently being handled, but
249  * it is not 100% complete yet.
250  */
251 static taskqid_t
252 taskq_lowest_id(taskq_t *tq)
253 {
254         taskqid_t lowest_id = tq->tq_next_id;
255         taskq_ent_t *t;
256         taskq_thread_t *tqt;
257
258         ASSERT(tq);
259         ASSERT(spin_is_locked(&tq->tq_lock));
260
261         if (!list_empty(&tq->tq_pend_list)) {
262                 t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list);
263                 lowest_id = MIN(lowest_id, t->tqent_id);
264         }
265
266         if (!list_empty(&tq->tq_prio_list)) {
267                 t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list);
268                 lowest_id = MIN(lowest_id, t->tqent_id);
269         }
270
271         if (!list_empty(&tq->tq_delay_list)) {
272                 t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list);
273                 lowest_id = MIN(lowest_id, t->tqent_id);
274         }
275
276         if (!list_empty(&tq->tq_active_list)) {
277                 tqt = list_entry(tq->tq_active_list.next, taskq_thread_t,
278                     tqt_active_list);
279                 ASSERT(tqt->tqt_id != TASKQID_INVALID);
280                 lowest_id = MIN(lowest_id, tqt->tqt_id);
281         }
282
283         return (lowest_id);
284 }
285
286 /*
287  * Insert a task into a list keeping the list sorted by increasing taskqid.
288  */
289 static void
290 taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt)
291 {
292         taskq_thread_t *w;
293         struct list_head *l;
294
295         ASSERT(tq);
296         ASSERT(tqt);
297         ASSERT(spin_is_locked(&tq->tq_lock));
298
299         list_for_each_prev(l, &tq->tq_active_list) {
300                 w = list_entry(l, taskq_thread_t, tqt_active_list);
301                 if (w->tqt_id < tqt->tqt_id) {
302                         list_add(&tqt->tqt_active_list, l);
303                         break;
304                 }
305         }
306         if (l == &tq->tq_active_list)
307                 list_add(&tqt->tqt_active_list, &tq->tq_active_list);
308 }
309
310 /*
311  * Find and return a task from the given list if it exists.  The list
312  * must be in lowest to highest task id order.
313  */
314 static taskq_ent_t *
315 taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id)
316 {
317         struct list_head *l;
318         taskq_ent_t *t;
319
320         ASSERT(spin_is_locked(&tq->tq_lock));
321
322         list_for_each(l, lh) {
323                 t = list_entry(l, taskq_ent_t, tqent_list);
324
325                 if (t->tqent_id == id)
326                         return (t);
327
328                 if (t->tqent_id > id)
329                         break;
330         }
331
332         return (NULL);
333 }
334
335 /*
336  * Find an already dispatched task given the task id regardless of what
337  * state it is in.  If a task is still pending or executing it will be
338  * returned and 'active' set appropriately.  If the task has already
339  * been run then NULL is returned.
340  */
341 static taskq_ent_t *
342 taskq_find(taskq_t *tq, taskqid_t id, int *active)
343 {
344         taskq_thread_t *tqt;
345         struct list_head *l;
346         taskq_ent_t *t;
347
348         ASSERT(spin_is_locked(&tq->tq_lock));
349         *active = 0;
350
351         t = taskq_find_list(tq, &tq->tq_delay_list, id);
352         if (t)
353                 return (t);
354
355         t = taskq_find_list(tq, &tq->tq_prio_list, id);
356         if (t)
357                 return (t);
358
359         t = taskq_find_list(tq, &tq->tq_pend_list, id);
360         if (t)
361                 return (t);
362
363         list_for_each(l, &tq->tq_active_list) {
364                 tqt = list_entry(l, taskq_thread_t, tqt_active_list);
365                 if (tqt->tqt_id == id) {
366                         t = tqt->tqt_task;
367                         *active = 1;
368                         return (t);
369                 }
370         }
371
372         return (NULL);
373 }
374
375 /*
376  * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and
377  * taskq_wait() functions below.
378  *
379  * Taskq waiting is accomplished by tracking the lowest outstanding task
380  * id and the next available task id.  As tasks are dispatched they are
381  * added to the tail of the pending, priority, or delay lists.  As worker
382  * threads become available the tasks are removed from the heads of these
383  * lists and linked to the worker threads.  This ensures the lists are
384  * kept sorted by lowest to highest task id.
385  *
386  * Therefore the lowest outstanding task id can be quickly determined by
387  * checking the head item from all of these lists.  This value is stored
388  * with the taskq as the lowest id.  It only needs to be recalculated when
389  * either the task with the current lowest id completes or is canceled.
390  *
391  * By blocking until the lowest task id exceeds the passed task id the
392  * taskq_wait_outstanding() function can be easily implemented.  Similarly,
393  * by blocking until the lowest task id matches the next task id taskq_wait()
394  * can be implemented.
395  *
396  * Callers should be aware that when there are multiple worked threads it
397  * is possible for larger task ids to complete before smaller ones.  Also
398  * when the taskq contains delay tasks with small task ids callers may
399  * block for a considerable length of time waiting for them to expire and
400  * execute.
401  */
402 static int
403 taskq_wait_id_check(taskq_t *tq, taskqid_t id)
404 {
405         int active = 0;
406         int rc;
407         unsigned long flags;
408
409         spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
410         rc = (taskq_find(tq, id, &active) == NULL);
411         spin_unlock_irqrestore(&tq->tq_lock, flags);
412
413         return (rc);
414 }
415
416 /*
417  * The taskq_wait_id() function blocks until the passed task id completes.
418  * This does not guarantee that all lower task ids have completed.
419  */
420 void
421 taskq_wait_id(taskq_t *tq, taskqid_t id)
422 {
423         wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id));
424 }
425 EXPORT_SYMBOL(taskq_wait_id);
426
427 static int
428 taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id)
429 {
430         int rc;
431         unsigned long flags;
432
433         spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
434         rc = (id < tq->tq_lowest_id);
435         spin_unlock_irqrestore(&tq->tq_lock, flags);
436
437         return (rc);
438 }
439
440 /*
441  * The taskq_wait_outstanding() function will block until all tasks with a
442  * lower taskqid than the passed 'id' have been completed.  Note that all
443  * task id's are assigned monotonically at dispatch time.  Zero may be
444  * passed for the id to indicate all tasks dispatch up to this point,
445  * but not after, should be waited for.
446  */
447 void
448 taskq_wait_outstanding(taskq_t *tq, taskqid_t id)
449 {
450         id = id ? id : tq->tq_next_id - 1;
451         wait_event(tq->tq_wait_waitq, taskq_wait_outstanding_check(tq, id));
452 }
453 EXPORT_SYMBOL(taskq_wait_outstanding);
454
455 static int
456 taskq_wait_check(taskq_t *tq)
457 {
458         int rc;
459         unsigned long flags;
460
461         spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
462         rc = (tq->tq_lowest_id == tq->tq_next_id);
463         spin_unlock_irqrestore(&tq->tq_lock, flags);
464
465         return (rc);
466 }
467
468 /*
469  * The taskq_wait() function will block until the taskq is empty.
470  * This means that if a taskq re-dispatches work to itself taskq_wait()
471  * callers will block indefinitely.
472  */
473 void
474 taskq_wait(taskq_t *tq)
475 {
476         wait_event(tq->tq_wait_waitq, taskq_wait_check(tq));
477 }
478 EXPORT_SYMBOL(taskq_wait);
479
480 int
481 taskq_member(taskq_t *tq, kthread_t *t)
482 {
483         return (tq == (taskq_t *)tsd_get_by_thread(taskq_tsd, t));
484 }
485 EXPORT_SYMBOL(taskq_member);
486
487 /*
488  * Cancel an already dispatched task given the task id.  Still pending tasks
489  * will be immediately canceled, and if the task is active the function will
490  * block until it completes.  Preallocated tasks which are canceled must be
491  * freed by the caller.
492  */
493 int
494 taskq_cancel_id(taskq_t *tq, taskqid_t id)
495 {
496         taskq_ent_t *t;
497         int active = 0;
498         int rc = ENOENT;
499         unsigned long flags;
500
501         ASSERT(tq);
502
503         spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
504         t = taskq_find(tq, id, &active);
505         if (t && !active) {
506                 list_del_init(&t->tqent_list);
507                 t->tqent_flags |= TQENT_FLAG_CANCEL;
508
509                 /*
510                  * When canceling the lowest outstanding task id we
511                  * must recalculate the new lowest outstanding id.
512                  */
513                 if (tq->tq_lowest_id == t->tqent_id) {
514                         tq->tq_lowest_id = taskq_lowest_id(tq);
515                         ASSERT3S(tq->tq_lowest_id, >, t->tqent_id);
516                 }
517
518                 /*
519                  * The task_expire() function takes the tq->tq_lock so drop
520                  * drop the lock before synchronously cancelling the timer.
521                  */
522                 if (timer_pending(&t->tqent_timer)) {
523                         spin_unlock_irqrestore(&tq->tq_lock, flags);
524                         del_timer_sync(&t->tqent_timer);
525                         spin_lock_irqsave_nested(&tq->tq_lock, flags,
526                             tq->tq_lock_class);
527                 }
528
529                 if (!(t->tqent_flags & TQENT_FLAG_PREALLOC))
530                         task_done(tq, t);
531
532                 rc = 0;
533         }
534         spin_unlock_irqrestore(&tq->tq_lock, flags);
535
536         if (active) {
537                 taskq_wait_id(tq, id);
538                 rc = EBUSY;
539         }
540
541         return (rc);
542 }
543 EXPORT_SYMBOL(taskq_cancel_id);
544
545 static int taskq_thread_spawn(taskq_t *tq);
546
547 taskqid_t
548 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
549 {
550         taskq_ent_t *t;
551         taskqid_t rc = TASKQID_INVALID;
552         unsigned long irqflags;
553
554         ASSERT(tq);
555         ASSERT(func);
556
557         spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);
558
559         /* Taskq being destroyed and all tasks drained */
560         if (!(tq->tq_flags & TASKQ_ACTIVE))
561                 goto out;
562
563         /* Do not queue the task unless there is idle thread for it */
564         ASSERT(tq->tq_nactive <= tq->tq_nthreads);
565         if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) {
566                 /* Dynamic taskq may be able to spawn another thread */
567                 if (!(tq->tq_flags & TASKQ_DYNAMIC) || taskq_thread_spawn(tq) == 0)
568                         goto out;
569         }
570
571         if ((t = task_alloc(tq, flags, &irqflags)) == NULL)
572                 goto out;
573
574         spin_lock(&t->tqent_lock);
575
576         /* Queue to the front of the list to enforce TQ_NOQUEUE semantics */
577         if (flags & TQ_NOQUEUE)
578                 list_add(&t->tqent_list, &tq->tq_prio_list);
579         /* Queue to the priority list instead of the pending list */
580         else if (flags & TQ_FRONT)
581                 list_add_tail(&t->tqent_list, &tq->tq_prio_list);
582         else
583                 list_add_tail(&t->tqent_list, &tq->tq_pend_list);
584
585         t->tqent_id = rc = tq->tq_next_id;
586         tq->tq_next_id++;
587         t->tqent_func = func;
588         t->tqent_arg = arg;
589         t->tqent_taskq = tq;
590         t->tqent_timer.data = 0;
591         t->tqent_timer.function = NULL;
592         t->tqent_timer.expires = 0;
593         t->tqent_birth = jiffies;
594
595         ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
596
597         spin_unlock(&t->tqent_lock);
598
599         wake_up(&tq->tq_work_waitq);
600 out:
601         /* Spawn additional taskq threads if required. */
602         if (!(flags & TQ_NOQUEUE) && tq->tq_nactive == tq->tq_nthreads)
603                 (void) taskq_thread_spawn(tq);
604
605         spin_unlock_irqrestore(&tq->tq_lock, irqflags);
606         return (rc);
607 }
608 EXPORT_SYMBOL(taskq_dispatch);
609
610 taskqid_t
611 taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
612     uint_t flags, clock_t expire_time)
613 {
614         taskqid_t rc = TASKQID_INVALID;
615         taskq_ent_t *t;
616         unsigned long irqflags;
617
618         ASSERT(tq);
619         ASSERT(func);
620
621         spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);
622
623         /* Taskq being destroyed and all tasks drained */
624         if (!(tq->tq_flags & TASKQ_ACTIVE))
625                 goto out;
626
627         if ((t = task_alloc(tq, flags, &irqflags)) == NULL)
628                 goto out;
629
630         spin_lock(&t->tqent_lock);
631
632         /* Queue to the delay list for subsequent execution */
633         list_add_tail(&t->tqent_list, &tq->tq_delay_list);
634
635         t->tqent_id = rc = tq->tq_next_id;
636         tq->tq_next_id++;
637         t->tqent_func = func;
638         t->tqent_arg = arg;
639         t->tqent_taskq = tq;
640         t->tqent_timer.data = (unsigned long)t;
641         t->tqent_timer.function = task_expire;
642         t->tqent_timer.expires = (unsigned long)expire_time;
643         add_timer(&t->tqent_timer);
644
645         ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
646
647         spin_unlock(&t->tqent_lock);
648 out:
649         /* Spawn additional taskq threads if required. */
650         if (tq->tq_nactive == tq->tq_nthreads)
651                 (void) taskq_thread_spawn(tq);
652         spin_unlock_irqrestore(&tq->tq_lock, irqflags);
653         return (rc);
654 }
655 EXPORT_SYMBOL(taskq_dispatch_delay);
656
657 void
658 taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
659     taskq_ent_t *t)
660 {
661         unsigned long irqflags;
662         ASSERT(tq);
663         ASSERT(func);
664
665         spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
666             tq->tq_lock_class);
667
668         /* Taskq being destroyed and all tasks drained */
669         if (!(tq->tq_flags & TASKQ_ACTIVE)) {
670                 t->tqent_id = TASKQID_INVALID;
671                 goto out;
672         }
673
674         if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) {
675                 /* Dynamic taskq may be able to spawn another thread */
676                 if (!(tq->tq_flags & TASKQ_DYNAMIC) || taskq_thread_spawn(tq) == 0)
677                         goto out2;
678                 flags |= TQ_FRONT;
679         }
680
681         spin_lock(&t->tqent_lock);
682
683         /*
684          * Mark it as a prealloc'd task.  This is important
685          * to ensure that we don't free it later.
686          */
687         t->tqent_flags |= TQENT_FLAG_PREALLOC;
688
689         /* Queue to the priority list instead of the pending list */
690         if (flags & TQ_FRONT)
691                 list_add_tail(&t->tqent_list, &tq->tq_prio_list);
692         else
693                 list_add_tail(&t->tqent_list, &tq->tq_pend_list);
694
695         t->tqent_id = tq->tq_next_id;
696         tq->tq_next_id++;
697         t->tqent_func = func;
698         t->tqent_arg = arg;
699         t->tqent_taskq = tq;
700         t->tqent_birth = jiffies;
701
702         spin_unlock(&t->tqent_lock);
703
704         wake_up(&tq->tq_work_waitq);
705 out:
706         /* Spawn additional taskq threads if required. */
707         if (tq->tq_nactive == tq->tq_nthreads)
708                 (void) taskq_thread_spawn(tq);
709 out2:
710         spin_unlock_irqrestore(&tq->tq_lock, irqflags);
711 }
712 EXPORT_SYMBOL(taskq_dispatch_ent);
713
714 int
715 taskq_empty_ent(taskq_ent_t *t)
716 {
717         return (list_empty(&t->tqent_list));
718 }
719 EXPORT_SYMBOL(taskq_empty_ent);
720
721 void
722 taskq_init_ent(taskq_ent_t *t)
723 {
724         spin_lock_init(&t->tqent_lock);
725         init_waitqueue_head(&t->tqent_waitq);
726         init_timer(&t->tqent_timer);
727         INIT_LIST_HEAD(&t->tqent_list);
728         t->tqent_id = 0;
729         t->tqent_func = NULL;
730         t->tqent_arg = NULL;
731         t->tqent_flags = 0;
732         t->tqent_taskq = NULL;
733 }
734 EXPORT_SYMBOL(taskq_init_ent);
735
736 /*
737  * Return the next pending task, preference is given to tasks on the
738  * priority list which were dispatched with TQ_FRONT.
739  */
740 static taskq_ent_t *
741 taskq_next_ent(taskq_t *tq)
742 {
743         struct list_head *list;
744
745         ASSERT(spin_is_locked(&tq->tq_lock));
746
747         if (!list_empty(&tq->tq_prio_list))
748                 list = &tq->tq_prio_list;
749         else if (!list_empty(&tq->tq_pend_list))
750                 list = &tq->tq_pend_list;
751         else
752                 return (NULL);
753
754         return (list_entry(list->next, taskq_ent_t, tqent_list));
755 }
756
757 /*
758  * Spawns a new thread for the specified taskq.
759  */
760 static void
761 taskq_thread_spawn_task(void *arg)
762 {
763         taskq_t *tq = (taskq_t *)arg;
764         unsigned long flags;
765
766         if (taskq_thread_create(tq) == NULL) {
767                 /* restore spawning count if failed */
768                 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
769                 tq->tq_nspawn--;
770                 spin_unlock_irqrestore(&tq->tq_lock, flags);
771         }
772 }
773
774 /*
775  * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current
776  * number of threads is insufficient to handle the pending tasks.  These
777  * new threads must be created by the dedicated dynamic_taskq to avoid
778  * deadlocks between thread creation and memory reclaim.  The system_taskq
779  * which is also a dynamic taskq cannot be safely used for this.
780  */
781 static int
782 taskq_thread_spawn(taskq_t *tq)
783 {
784         int spawning = 0;
785
786         if (!(tq->tq_flags & TASKQ_DYNAMIC))
787                 return (0);
788
789         if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) &&
790             (tq->tq_flags & TASKQ_ACTIVE)) {
791                 spawning = (++tq->tq_nspawn);
792                 taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task,
793                     tq, TQ_NOSLEEP);
794         }
795
796         return (spawning);
797 }
798
799 /*
800  * Threads in a dynamic taskq should only exit once it has been completely
801  * drained and no other threads are actively servicing tasks.  This prevents
802  * threads from being created and destroyed more than is required.
803  *
804  * The first thread is the thread list is treated as the primary thread.
805  * There is nothing special about the primary thread but in order to avoid
806  * all the taskq pids from changing we opt to make it long running.
807  */
808 static int
809 taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt)
810 {
811         ASSERT(spin_is_locked(&tq->tq_lock));
812
813         if (!(tq->tq_flags & TASKQ_DYNAMIC))
814                 return (0);
815
816         if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t,
817             tqt_thread_list) == tqt)
818                 return (0);
819
820         return
821             ((tq->tq_nspawn == 0) &&    /* No threads are being spawned */
822             (tq->tq_nactive == 0) &&    /* No threads are handling tasks */
823             (tq->tq_nthreads > 1) &&    /* More than 1 thread is running */
824             (!taskq_next_ent(tq)) &&    /* There are no pending tasks */
825             (spl_taskq_thread_dynamic)); /* Dynamic taskqs are allowed */
826 }
827
828 static int
829 taskq_thread(void *args)
830 {
831         DECLARE_WAITQUEUE(wait, current);
832         sigset_t blocked;
833         taskq_thread_t *tqt = args;
834         taskq_t *tq;
835         taskq_ent_t *t;
836         int seq_tasks = 0;
837         unsigned long flags;
838
839         ASSERT(tqt);
840         ASSERT(tqt->tqt_tq);
841         tq = tqt->tqt_tq;
842         current->flags |= PF_NOFREEZE;
843
844         (void) spl_fstrans_mark();
845
846         sigfillset(&blocked);
847         sigprocmask(SIG_BLOCK, &blocked, NULL);
848         flush_signals(current);
849
850         tsd_set(taskq_tsd, tq);
851         spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
852         /*
853          * If we are dynamically spawned, decrease spawning count. Note that
854          * we could be created during taskq_create, in which case we shouldn't
855          * do the decrement. But it's fine because taskq_create will reset
856          * tq_nspawn later.
857          */
858         if (tq->tq_flags & TASKQ_DYNAMIC)
859                 tq->tq_nspawn--;
860
861         /* Immediately exit if more threads than allowed were created. */
862         if (tq->tq_nthreads >= tq->tq_maxthreads)
863                 goto error;
864
865         tq->tq_nthreads++;
866         list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list);
867         wake_up(&tq->tq_wait_waitq);
868         set_current_state(TASK_INTERRUPTIBLE);
869
870         while (!kthread_should_stop()) {
871
872                 if (list_empty(&tq->tq_pend_list) &&
873                     list_empty(&tq->tq_prio_list)) {
874
875                         if (taskq_thread_should_stop(tq, tqt)) {
876                                 wake_up_all(&tq->tq_wait_waitq);
877                                 break;
878                         }
879
880                         add_wait_queue_exclusive(&tq->tq_work_waitq, &wait);
881                         spin_unlock_irqrestore(&tq->tq_lock, flags);
882
883                         schedule();
884                         seq_tasks = 0;
885
886                         spin_lock_irqsave_nested(&tq->tq_lock, flags,
887                             tq->tq_lock_class);
888                         remove_wait_queue(&tq->tq_work_waitq, &wait);
889                 } else {
890                         __set_current_state(TASK_RUNNING);
891                 }
892
893                 if ((t = taskq_next_ent(tq)) != NULL) {
894                         list_del_init(&t->tqent_list);
895
896                         /*
897                          * In order to support recursively dispatching a
898                          * preallocated taskq_ent_t, tqent_id must be
899                          * stored prior to executing tqent_func.
900                          */
901                         tqt->tqt_id = t->tqent_id;
902                         tqt->tqt_task = t;
903
904                         /*
905                          * We must store a copy of the flags prior to
906                          * servicing the task (servicing a prealloc'd task
907                          * returns the ownership of the tqent back to
908                          * the caller of taskq_dispatch). Thus,
909                          * tqent_flags _may_ change within the call.
910                          */
911                         tqt->tqt_flags = t->tqent_flags;
912
913                         taskq_insert_in_order(tq, tqt);
914                         tq->tq_nactive++;
915                         spin_unlock_irqrestore(&tq->tq_lock, flags);
916
917                         /* Perform the requested task */
918                         t->tqent_func(t->tqent_arg);
919
920                         spin_lock_irqsave_nested(&tq->tq_lock, flags,
921                             tq->tq_lock_class);
922                         tq->tq_nactive--;
923                         list_del_init(&tqt->tqt_active_list);
924                         tqt->tqt_task = NULL;
925
926                         /* For prealloc'd tasks, we don't free anything. */
927                         if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC))
928                                 task_done(tq, t);
929
930                         /*
931                          * When the current lowest outstanding taskqid is
932                          * done calculate the new lowest outstanding id
933                          */
934                         if (tq->tq_lowest_id == tqt->tqt_id) {
935                                 tq->tq_lowest_id = taskq_lowest_id(tq);
936                                 ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id);
937                         }
938
939                         /* Spawn additional taskq threads if required. */
940                         if ((++seq_tasks) > spl_taskq_thread_sequential &&
941                             taskq_thread_spawn(tq))
942                                 seq_tasks = 0;
943
944                         tqt->tqt_id = TASKQID_INVALID;
945                         tqt->tqt_flags = 0;
946                         wake_up_all(&tq->tq_wait_waitq);
947                 } else {
948                         if (taskq_thread_should_stop(tq, tqt))
949                                 break;
950                 }
951
952                 set_current_state(TASK_INTERRUPTIBLE);
953
954         }
955
956         __set_current_state(TASK_RUNNING);
957         tq->tq_nthreads--;
958         list_del_init(&tqt->tqt_thread_list);
959 error:
960         kmem_free(tqt, sizeof (taskq_thread_t));
961         spin_unlock_irqrestore(&tq->tq_lock, flags);
962
963         tsd_set(taskq_tsd, NULL);
964
965         return (0);
966 }
967
968 static taskq_thread_t *
969 taskq_thread_create(taskq_t *tq)
970 {
971         static int last_used_cpu = 0;
972         taskq_thread_t *tqt;
973
974         tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE);
975         INIT_LIST_HEAD(&tqt->tqt_thread_list);
976         INIT_LIST_HEAD(&tqt->tqt_active_list);
977         tqt->tqt_tq = tq;
978         tqt->tqt_id = TASKQID_INVALID;
979
980         tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt,
981             "%s", tq->tq_name);
982         if (tqt->tqt_thread == NULL) {
983                 kmem_free(tqt, sizeof (taskq_thread_t));
984                 return (NULL);
985         }
986
987         if (spl_taskq_thread_bind) {
988                 last_used_cpu = (last_used_cpu + 1) % num_online_cpus();
989                 kthread_bind(tqt->tqt_thread, last_used_cpu);
990         }
991
992         if (spl_taskq_thread_priority)
993                 set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri));
994
995         wake_up_process(tqt->tqt_thread);
996
997         return (tqt);
998 }
999
1000 taskq_t *
1001 taskq_create(const char *name, int nthreads, pri_t pri,
1002     int minalloc, int maxalloc, uint_t flags)
1003 {
1004         taskq_t *tq;
1005         taskq_thread_t *tqt;
1006         int count = 0, rc = 0, i;
1007         unsigned long irqflags;
1008
1009         ASSERT(name != NULL);
1010         ASSERT(minalloc >= 0);
1011         ASSERT(maxalloc <= INT_MAX);
1012         ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */
1013
1014         /* Scale the number of threads using nthreads as a percentage */
1015         if (flags & TASKQ_THREADS_CPU_PCT) {
1016                 ASSERT(nthreads <= 100);
1017                 ASSERT(nthreads >= 0);
1018                 nthreads = MIN(nthreads, 100);
1019                 nthreads = MAX(nthreads, 0);
1020                 nthreads = MAX((num_online_cpus() * nthreads) / 100, 1);
1021         }
1022
1023         tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE);
1024         if (tq == NULL)
1025                 return (NULL);
1026
1027         spin_lock_init(&tq->tq_lock);
1028         INIT_LIST_HEAD(&tq->tq_thread_list);
1029         INIT_LIST_HEAD(&tq->tq_active_list);
1030         tq->tq_name = strdup(name);
1031         tq->tq_nactive = 0;
1032         tq->tq_nthreads = 0;
1033         tq->tq_nspawn = 0;
1034         tq->tq_maxthreads = nthreads;
1035         tq->tq_pri = pri;
1036         tq->tq_minalloc = minalloc;
1037         tq->tq_maxalloc = maxalloc;
1038         tq->tq_nalloc = 0;
1039         tq->tq_flags = (flags | TASKQ_ACTIVE);
1040         tq->tq_next_id = TASKQID_INITIAL;
1041         tq->tq_lowest_id = TASKQID_INITIAL;
1042         INIT_LIST_HEAD(&tq->tq_free_list);
1043         INIT_LIST_HEAD(&tq->tq_pend_list);
1044         INIT_LIST_HEAD(&tq->tq_prio_list);
1045         INIT_LIST_HEAD(&tq->tq_delay_list);
1046         init_waitqueue_head(&tq->tq_work_waitq);
1047         init_waitqueue_head(&tq->tq_wait_waitq);
1048         tq->tq_lock_class = TQ_LOCK_GENERAL;
1049         INIT_LIST_HEAD(&tq->tq_taskqs);
1050
1051         if (flags & TASKQ_PREPOPULATE) {
1052                 spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
1053                     tq->tq_lock_class);
1054
1055                 for (i = 0; i < minalloc; i++)
1056                         task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW,
1057                             &irqflags));
1058
1059                 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
1060         }
1061
1062         if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic)
1063                 nthreads = 1;
1064
1065         for (i = 0; i < nthreads; i++) {
1066                 tqt = taskq_thread_create(tq);
1067                 if (tqt == NULL)
1068                         rc = 1;
1069                 else
1070                         count++;
1071         }
1072
1073         /* Wait for all threads to be started before potential destroy */
1074         wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count);
1075         /*
1076          * taskq_thread might have touched nspawn, but we don't want them to
1077          * because they're not dynamically spawned. So we reset it to 0
1078          */
1079         tq->tq_nspawn = 0;
1080
1081         if (rc) {
1082                 taskq_destroy(tq);
1083                 tq = NULL;
1084         } else {
1085                 down_write(&tq_list_sem);
1086                 tq->tq_instance = taskq_find_by_name(name) + 1;
1087                 list_add_tail(&tq->tq_taskqs, &tq_list);
1088                 up_write(&tq_list_sem);
1089         }
1090
1091         return (tq);
1092 }
1093 EXPORT_SYMBOL(taskq_create);
1094
1095 void
1096 taskq_destroy(taskq_t *tq)
1097 {
1098         struct task_struct *thread;
1099         taskq_thread_t *tqt;
1100         taskq_ent_t *t;
1101         unsigned long flags;
1102
1103         ASSERT(tq);
1104         spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
1105         tq->tq_flags &= ~TASKQ_ACTIVE;
1106         spin_unlock_irqrestore(&tq->tq_lock, flags);
1107
1108         /*
1109          * When TASKQ_ACTIVE is clear new tasks may not be added nor may
1110          * new worker threads be spawned for dynamic taskq.
1111          */
1112         if (dynamic_taskq != NULL)
1113                 taskq_wait_outstanding(dynamic_taskq, 0);
1114
1115         taskq_wait(tq);
1116
1117         /* remove taskq from global list used by the kstats */
1118         down_write(&tq_list_sem);
1119         list_del(&tq->tq_taskqs);
1120         up_write(&tq_list_sem);
1121
1122         spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
1123         /* wait for spawning threads to insert themselves to the list */
1124         while (tq->tq_nspawn) {
1125                 spin_unlock_irqrestore(&tq->tq_lock, flags);
1126                 schedule_timeout_interruptible(1);
1127                 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
1128         }
1129
1130         /*
1131          * Signal each thread to exit and block until it does.  Each thread
1132          * is responsible for removing itself from the list and freeing its
1133          * taskq_thread_t.  This allows for idle threads to opt to remove
1134          * themselves from the taskq.  They can be recreated as needed.
1135          */
1136         while (!list_empty(&tq->tq_thread_list)) {
1137                 tqt = list_entry(tq->tq_thread_list.next,
1138                     taskq_thread_t, tqt_thread_list);
1139                 thread = tqt->tqt_thread;
1140                 spin_unlock_irqrestore(&tq->tq_lock, flags);
1141
1142                 kthread_stop(thread);
1143
1144                 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1145                     tq->tq_lock_class);
1146         }
1147
1148         while (!list_empty(&tq->tq_free_list)) {
1149                 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
1150
1151                 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
1152
1153                 list_del_init(&t->tqent_list);
1154                 task_free(tq, t);
1155         }
1156
1157         ASSERT0(tq->tq_nthreads);
1158         ASSERT0(tq->tq_nalloc);
1159         ASSERT0(tq->tq_nspawn);
1160         ASSERT(list_empty(&tq->tq_thread_list));
1161         ASSERT(list_empty(&tq->tq_active_list));
1162         ASSERT(list_empty(&tq->tq_free_list));
1163         ASSERT(list_empty(&tq->tq_pend_list));
1164         ASSERT(list_empty(&tq->tq_prio_list));
1165         ASSERT(list_empty(&tq->tq_delay_list));
1166
1167         spin_unlock_irqrestore(&tq->tq_lock, flags);
1168
1169         strfree(tq->tq_name);
1170         kmem_free(tq, sizeof (taskq_t));
1171 }
1172 EXPORT_SYMBOL(taskq_destroy);
1173
1174
1175 static unsigned int spl_taskq_kick = 0;
1176
1177 /*
1178  * 2.6.36 API Change
1179  * module_param_cb is introduced to take kernel_param_ops and
1180  * module_param_call is marked as obsolete. Also set and get operations
1181  * were changed to take a 'const struct kernel_param *'.
1182  */
1183 static int
1184 #ifdef module_param_cb
1185 param_set_taskq_kick(const char *val, const struct kernel_param *kp)
1186 #else
1187 param_set_taskq_kick(const char *val, struct kernel_param *kp)
1188 #endif
1189 {
1190         int ret;
1191         taskq_t *tq;
1192         taskq_ent_t *t;
1193         unsigned long flags;
1194
1195         ret = param_set_uint(val, kp);
1196         if (ret < 0 || !spl_taskq_kick)
1197                 return (ret);
1198         /* reset value */
1199         spl_taskq_kick = 0;
1200
1201         down_read(&tq_list_sem);
1202         list_for_each_entry(tq, &tq_list, tq_taskqs) {
1203                 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1204                     tq->tq_lock_class);
1205                 /* Check if the first pending is older than 5 seconds */
1206                 t = taskq_next_ent(tq);
1207                 if (t && time_after(jiffies, t->tqent_birth + 5*HZ)) {
1208                         (void) taskq_thread_spawn(tq);
1209                         printk(KERN_INFO "spl: Kicked taskq %s/%d\n",
1210                             tq->tq_name, tq->tq_instance);
1211                 }
1212                 spin_unlock_irqrestore(&tq->tq_lock, flags);
1213         }
1214         up_read(&tq_list_sem);
1215         return (ret);
1216 }
1217
1218 #ifdef module_param_cb
1219 static const struct kernel_param_ops param_ops_taskq_kick = {
1220         .set = param_set_taskq_kick,
1221         .get = param_get_uint,
1222 };
1223 module_param_cb(spl_taskq_kick, &param_ops_taskq_kick, &spl_taskq_kick, 0644);
1224 #else
1225 module_param_call(spl_taskq_kick, param_set_taskq_kick, param_get_uint,
1226     &spl_taskq_kick, 0644);
1227 #endif
1228 MODULE_PARM_DESC(spl_taskq_kick,
1229     "Write nonzero to kick stuck taskqs to spawn more threads");
1230
1231 int
1232 spl_taskq_init(void)
1233 {
1234         tsd_create(&taskq_tsd, NULL);
1235
1236         system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64),
1237             maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
1238         if (system_taskq == NULL)
1239                 return (1);
1240
1241         dynamic_taskq = taskq_create("spl_dynamic_taskq", 1,
1242             maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE);
1243         if (dynamic_taskq == NULL) {
1244                 taskq_destroy(system_taskq);
1245                 return (1);
1246         }
1247
1248         /*
1249          * This is used to annotate tq_lock, so
1250          *   taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch
1251          * does not trigger a lockdep warning re: possible recursive locking
1252          */
1253         dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC;
1254
1255         return (0);
1256 }
1257
1258 void
1259 spl_taskq_fini(void)
1260 {
1261         taskq_destroy(dynamic_taskq);
1262         dynamic_taskq = NULL;
1263
1264         taskq_destroy(system_taskq);
1265         system_taskq = NULL;
1266
1267         tsd_destroy(&taskq_tsd);
1268 }