]> granicus.if.org Git - zfs/blob - module/spl/spl-taskq.c
Prefix all SPL debug macros with 'S'
[zfs] / module / spl / spl-taskq.c
1 /*****************************************************************************\
2  *  Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3  *  Copyright (C) 2007 The Regents of the University of California.
4  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5  *  Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6  *  UCRL-CODE-235197
7  *
8  *  This file is part of the SPL, Solaris Porting Layer.
9  *  For details, see <http://github.com/behlendorf/spl/>.
10  *
11  *  The SPL is free software; you can redistribute it and/or modify it
12  *  under the terms of the GNU General Public License as published by the
13  *  Free Software Foundation; either version 2 of the License, or (at your
14  *  option) any later version.
15  *
16  *  The SPL is distributed in the hope that it will be useful, but WITHOUT
17  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19  *  for more details.
20  *
21  *  You should have received a copy of the GNU General Public License along
22  *  with the SPL.  If not, see <http://www.gnu.org/licenses/>.
23  *****************************************************************************
24  *  Solaris Porting Layer (SPL) Task Queue Implementation.
25 \*****************************************************************************/
26
27 #include <sys/taskq.h>
28 #include <sys/kmem.h>
29 #include <spl-debug.h>
30
31 #ifdef SS_DEBUG_SUBSYS
32 #undef SS_DEBUG_SUBSYS
33 #endif
34
35 #define SS_DEBUG_SUBSYS SS_TASKQ
36
37 /* Global system-wide dynamic task queue available for all consumers */
38 taskq_t *system_taskq;
39 EXPORT_SYMBOL(system_taskq);
40
41 typedef struct spl_task {
42         spinlock_t              t_lock;
43         struct list_head        t_list;
44         taskqid_t               t_id;
45         task_func_t             *t_func;
46         void                    *t_arg;
47 } spl_task_t;
48
49 /*
50  * NOTE: Must be called with tq->tq_lock held, returns a list_t which
51  * is not attached to the free, work, or pending taskq lists.
52  */
53 static spl_task_t *
54 task_alloc(taskq_t *tq, uint_t flags)
55 {
56         spl_task_t *t;
57         int count = 0;
58         SENTRY;
59
60         ASSERT(tq);
61         ASSERT(flags & (TQ_SLEEP | TQ_NOSLEEP));               /* One set */
62         ASSERT(!((flags & TQ_SLEEP) && (flags & TQ_NOSLEEP))); /* Not both */
63         ASSERT(spin_is_locked(&tq->tq_lock));
64 retry:
65         /* Acquire spl_task_t's from free list if available */
66         if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) {
67                 t = list_entry(tq->tq_free_list.next, spl_task_t, t_list);
68                 list_del_init(&t->t_list);
69                 SRETURN(t);
70         }
71
72         /* Free list is empty and memory allocations are prohibited */
73         if (flags & TQ_NOALLOC)
74                 SRETURN(NULL);
75
76         /* Hit maximum spl_task_t pool size */
77         if (tq->tq_nalloc >= tq->tq_maxalloc) {
78                 if (flags & TQ_NOSLEEP)
79                         SRETURN(NULL);
80
81                 /* Sleep periodically polling the free list for an available
82                  * spl_task_t.  If a full second passes and we have not found
83                  * one gives up and return a NULL to the caller. */
84                 if (flags & TQ_SLEEP) {
85                         spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
86                         schedule_timeout(HZ / 100);
87                         spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
88                         if (count < 100)
89                                 SGOTO(retry, count++);
90
91                         SRETURN(NULL);
92                 }
93
94                 /* Unreachable, Neither TQ_SLEEP or TQ_NOSLEEP set */
95                 PANIC("Neither TQ_SLEEP or TQ_NOSLEEP set");
96         }
97
98         spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
99         t = kmem_alloc(sizeof(spl_task_t), flags & (TQ_SLEEP | TQ_NOSLEEP));
100         spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
101
102         if (t) {
103                 spin_lock_init(&t->t_lock);
104                 INIT_LIST_HEAD(&t->t_list);
105                 t->t_id = 0;
106                 t->t_func = NULL;
107                 t->t_arg = NULL;
108                 tq->tq_nalloc++;
109         }
110
111         SRETURN(t);
112 }
113
114 /*
115  * NOTE: Must be called with tq->tq_lock held, expects the spl_task_t
116  * to already be removed from the free, work, or pending taskq lists.
117  */
118 static void
119 task_free(taskq_t *tq, spl_task_t *t)
120 {
121         SENTRY;
122
123         ASSERT(tq);
124         ASSERT(t);
125         ASSERT(spin_is_locked(&tq->tq_lock));
126         ASSERT(list_empty(&t->t_list));
127
128         kmem_free(t, sizeof(spl_task_t));
129         tq->tq_nalloc--;
130
131         SEXIT;
132 }
133
134 /*
135  * NOTE: Must be called with tq->tq_lock held, either destroys the
136  * spl_task_t if too many exist or moves it to the free list for later use.
137  */
138 static void
139 task_done(taskq_t *tq, spl_task_t *t)
140 {
141         SENTRY;
142         ASSERT(tq);
143         ASSERT(t);
144         ASSERT(spin_is_locked(&tq->tq_lock));
145
146         list_del_init(&t->t_list);
147
148         if (tq->tq_nalloc <= tq->tq_minalloc) {
149                 t->t_id = 0;
150                 t->t_func = NULL;
151                 t->t_arg = NULL;
152                 list_add_tail(&t->t_list, &tq->tq_free_list);
153         } else {
154                 task_free(tq, t);
155         }
156
157         SEXIT;
158 }
159
160 /*
161  * As tasks are submitted to the task queue they are assigned a
162  * monotonically increasing taskqid and added to the tail of the pending
163  * list.  As worker threads become available the tasks are removed from
164  * the head of the pending or priority list, giving preference to the
165  * priority list.  The tasks are then added to the work list, preserving
166  * the ordering by taskqid.  Finally, as tasks complete they are removed
167  * from the work list.  This means that the pending and work lists are
168  * always kept sorted by taskqid.  Thus the lowest outstanding
169  * incomplete taskqid can be determined simply by checking the min
170  * taskqid for each head item on the pending, priority, and work list.
171  * This value is stored in tq->tq_lowest_id and only updated to the new
172  * lowest id when the previous lowest id completes.  All taskqids lower
173  * than tq->tq_lowest_id must have completed.  It is also possible
174  * larger taskqid's have completed because they may be processed in
175  * parallel by several worker threads.  However, this is not a problem
176  * because the behavior of taskq_wait_id() is to block until all
177  * previously submitted taskqid's have completed.
178  *
179  * XXX: Taskqid_t wrapping is not handled.  However, taskqid_t's are
180  * 64-bit values so even if a taskq is processing 2^24 (16,777,216)
181  * taskqid_ts per second it will still take 2^40 seconds, 34,865 years,
182  * before the wrap occurs.  I can live with that for now.
183  */
184 static int
185 taskq_wait_check(taskq_t *tq, taskqid_t id)
186 {
187         int rc;
188
189         spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
190         rc = (id < tq->tq_lowest_id);
191         spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
192
193         SRETURN(rc);
194 }
195
196 void
197 __taskq_wait_id(taskq_t *tq, taskqid_t id)
198 {
199         SENTRY;
200         ASSERT(tq);
201
202         wait_event(tq->tq_wait_waitq, taskq_wait_check(tq, id));
203
204         SEXIT;
205 }
206 EXPORT_SYMBOL(__taskq_wait_id);
207
208 void
209 __taskq_wait(taskq_t *tq)
210 {
211         taskqid_t id;
212         SENTRY;
213         ASSERT(tq);
214
215         /* Wait for the largest outstanding taskqid */
216         spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
217         id = tq->tq_next_id - 1;
218         spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
219
220         __taskq_wait_id(tq, id);
221
222         SEXIT;
223
224 }
225 EXPORT_SYMBOL(__taskq_wait);
226
227 int
228 __taskq_member(taskq_t *tq, void *t)
229 {
230         int i;
231         SENTRY;
232
233         ASSERT(tq);
234         ASSERT(t);
235
236         for (i = 0; i < tq->tq_nthreads; i++)
237                 if (tq->tq_threads[i] == (struct task_struct *)t)
238                         SRETURN(1);
239
240         SRETURN(0);
241 }
242 EXPORT_SYMBOL(__taskq_member);
243
244 taskqid_t
245 __taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
246 {
247         spl_task_t *t;
248         taskqid_t rc = 0;
249         SENTRY;
250
251         ASSERT(tq);
252         ASSERT(func);
253
254         /* Solaris assumes TQ_SLEEP if not passed explicitly */
255         if (!(flags & (TQ_SLEEP | TQ_NOSLEEP)))
256                 flags |= TQ_SLEEP;
257
258         if (unlikely(in_atomic() && (flags & TQ_SLEEP)))
259                 PANIC("May schedule while atomic: %s/0x%08x/%d\n",
260                     current->comm, preempt_count(), current->pid);
261
262         spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
263
264         /* Taskq being destroyed and all tasks drained */
265         if (!(tq->tq_flags & TQ_ACTIVE))
266                 SGOTO(out, rc = 0);
267
268         /* Do not queue the task unless there is idle thread for it */
269         ASSERT(tq->tq_nactive <= tq->tq_nthreads);
270         if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads))
271                 SGOTO(out, rc = 0);
272
273         if ((t = task_alloc(tq, flags)) == NULL)
274                 SGOTO(out, rc = 0);
275
276         spin_lock(&t->t_lock);
277
278         /* Queue to the priority list instead of the pending list */
279         if (flags & TQ_FRONT)
280                 list_add_tail(&t->t_list, &tq->tq_prio_list);
281         else
282                 list_add_tail(&t->t_list, &tq->tq_pend_list);
283
284         t->t_id = rc = tq->tq_next_id;
285         tq->tq_next_id++;
286         t->t_func = func;
287         t->t_arg = arg;
288         spin_unlock(&t->t_lock);
289
290         wake_up(&tq->tq_work_waitq);
291 out:
292         spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
293         SRETURN(rc);
294 }
295 EXPORT_SYMBOL(__taskq_dispatch);
296
297 /*
298  * Returns the lowest incomplete taskqid_t.  The taskqid_t may
299  * be queued on the pending list, on the priority list,  or on
300  * the work list currently being handled, but it is not 100%
301  * complete yet.
302  */
303 static taskqid_t
304 taskq_lowest_id(taskq_t *tq)
305 {
306         taskqid_t lowest_id = tq->tq_next_id;
307         spl_task_t *t;
308         SENTRY;
309
310         ASSERT(tq);
311         ASSERT(spin_is_locked(&tq->tq_lock));
312
313         if (!list_empty(&tq->tq_pend_list)) {
314                 t = list_entry(tq->tq_pend_list.next, spl_task_t, t_list);
315                 lowest_id = MIN(lowest_id, t->t_id);
316         }
317
318         if (!list_empty(&tq->tq_prio_list)) {
319                 t = list_entry(tq->tq_prio_list.next, spl_task_t, t_list);
320                 lowest_id = MIN(lowest_id, t->t_id);
321         }
322
323         if (!list_empty(&tq->tq_work_list)) {
324                 t = list_entry(tq->tq_work_list.next, spl_task_t, t_list);
325                 lowest_id = MIN(lowest_id, t->t_id);
326         }
327
328         SRETURN(lowest_id);
329 }
330
331 /*
332  * Insert a task into a list keeping the list sorted by increasing
333  * taskqid.
334  */
335 static void
336 taskq_insert_in_order(taskq_t *tq, spl_task_t *t)
337 {
338         spl_task_t *w;
339         struct list_head *l;
340
341         SENTRY;
342         ASSERT(tq);
343         ASSERT(t);
344         ASSERT(spin_is_locked(&tq->tq_lock));
345
346         list_for_each_prev(l, &tq->tq_work_list) {
347                 w = list_entry(l, spl_task_t, t_list);
348                 if (w->t_id < t->t_id) {
349                         list_add(&t->t_list, l);
350                         break;
351                 }
352         }
353         if (l == &tq->tq_work_list)
354                 list_add(&t->t_list, &tq->tq_work_list);
355
356         SEXIT;
357 }
358
359 static int
360 taskq_thread(void *args)
361 {
362         DECLARE_WAITQUEUE(wait, current);
363         sigset_t blocked;
364         taskqid_t id;
365         taskq_t *tq = args;
366         spl_task_t *t;
367         struct list_head *pend_list;
368         SENTRY;
369
370         ASSERT(tq);
371         current->flags |= PF_NOFREEZE;
372
373         sigfillset(&blocked);
374         sigprocmask(SIG_BLOCK, &blocked, NULL);
375         flush_signals(current);
376
377         spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
378         tq->tq_nthreads++;
379         wake_up(&tq->tq_wait_waitq);
380         set_current_state(TASK_INTERRUPTIBLE);
381
382         while (!kthread_should_stop()) {
383
384                 add_wait_queue(&tq->tq_work_waitq, &wait);
385                 if (list_empty(&tq->tq_pend_list) &&
386                     list_empty(&tq->tq_prio_list)) {
387                         spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
388                         schedule();
389                         spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
390                 } else {
391                         __set_current_state(TASK_RUNNING);
392                 }
393
394                 remove_wait_queue(&tq->tq_work_waitq, &wait);
395
396                 if (!list_empty(&tq->tq_prio_list))
397                         pend_list = &tq->tq_prio_list;
398                 else if (!list_empty(&tq->tq_pend_list))
399                         pend_list = &tq->tq_pend_list;
400                 else
401                         pend_list = NULL;
402
403                 if (pend_list) {
404                         t = list_entry(pend_list->next, spl_task_t, t_list);
405                         list_del_init(&t->t_list);
406                         taskq_insert_in_order(tq, t);
407                         tq->tq_nactive++;
408                         spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
409
410                         /* Perform the requested task */
411                         t->t_func(t->t_arg);
412
413                         spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
414                         tq->tq_nactive--;
415                         id = t->t_id;
416                         task_done(tq, t);
417
418                         /* When the current lowest outstanding taskqid is
419                          * done calculate the new lowest outstanding id */
420                         if (tq->tq_lowest_id == id) {
421                                 tq->tq_lowest_id = taskq_lowest_id(tq);
422                                 ASSERT(tq->tq_lowest_id > id);
423                         }
424
425                         wake_up_all(&tq->tq_wait_waitq);
426                 }
427
428                 set_current_state(TASK_INTERRUPTIBLE);
429
430         }
431
432         __set_current_state(TASK_RUNNING);
433         tq->tq_nthreads--;
434         spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
435
436         SRETURN(0);
437 }
438
439 taskq_t *
440 __taskq_create(const char *name, int nthreads, pri_t pri,
441                int minalloc, int maxalloc, uint_t flags)
442 {
443         taskq_t *tq;
444         struct task_struct *t;
445         int rc = 0, i, j = 0;
446         SENTRY;
447
448         ASSERT(name != NULL);
449         ASSERT(pri <= maxclsyspri);
450         ASSERT(minalloc >= 0);
451         ASSERT(maxalloc <= INT_MAX);
452         ASSERT(!(flags & (TASKQ_CPR_SAFE | TASKQ_DYNAMIC))); /* Unsupported */
453
454         /* Scale the number of threads using nthreads as a percentage */
455         if (flags & TASKQ_THREADS_CPU_PCT) {
456                 ASSERT(nthreads <= 100);
457                 ASSERT(nthreads >= 0);
458                 nthreads = MIN(nthreads, 100);
459                 nthreads = MAX(nthreads, 0);
460                 nthreads = MAX((num_online_cpus() * nthreads) / 100, 1);
461         }
462
463         tq = kmem_alloc(sizeof(*tq), KM_SLEEP);
464         if (tq == NULL)
465                 SRETURN(NULL);
466
467         tq->tq_threads = kmem_alloc(nthreads * sizeof(t), KM_SLEEP);
468         if (tq->tq_threads == NULL) {
469                 kmem_free(tq, sizeof(*tq));
470                 SRETURN(NULL);
471         }
472
473         spin_lock_init(&tq->tq_lock);
474         spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
475         tq->tq_name      = name;
476         tq->tq_nactive   = 0;
477         tq->tq_nthreads  = 0;
478         tq->tq_pri       = pri;
479         tq->tq_minalloc  = minalloc;
480         tq->tq_maxalloc  = maxalloc;
481         tq->tq_nalloc    = 0;
482         tq->tq_flags     = (flags | TQ_ACTIVE);
483         tq->tq_next_id   = 1;
484         tq->tq_lowest_id = 1;
485         INIT_LIST_HEAD(&tq->tq_free_list);
486         INIT_LIST_HEAD(&tq->tq_work_list);
487         INIT_LIST_HEAD(&tq->tq_pend_list);
488         INIT_LIST_HEAD(&tq->tq_prio_list);
489         init_waitqueue_head(&tq->tq_work_waitq);
490         init_waitqueue_head(&tq->tq_wait_waitq);
491
492         if (flags & TASKQ_PREPOPULATE)
493                 for (i = 0; i < minalloc; i++)
494                         task_done(tq, task_alloc(tq, TQ_SLEEP | TQ_NEW));
495
496         spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
497
498         for (i = 0; i < nthreads; i++) {
499                 t = kthread_create(taskq_thread, tq, "%s/%d", name, i);
500                 if (t) {
501                         tq->tq_threads[i] = t;
502                         kthread_bind(t, i % num_online_cpus());
503                         set_user_nice(t, PRIO_TO_NICE(pri));
504                         wake_up_process(t);
505                         j++;
506                 } else {
507                         tq->tq_threads[i] = NULL;
508                         rc = 1;
509                 }
510         }
511
512         /* Wait for all threads to be started before potential destroy */
513         wait_event(tq->tq_wait_waitq, tq->tq_nthreads == j);
514
515         if (rc) {
516                 __taskq_destroy(tq);
517                 tq = NULL;
518         }
519
520         SRETURN(tq);
521 }
522 EXPORT_SYMBOL(__taskq_create);
523
524 void
525 __taskq_destroy(taskq_t *tq)
526 {
527         spl_task_t *t;
528         int i, nthreads;
529         SENTRY;
530
531         ASSERT(tq);
532         spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
533         tq->tq_flags &= ~TQ_ACTIVE;
534         spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
535
536         /* TQ_ACTIVE cleared prevents new tasks being added to pending */
537         __taskq_wait(tq);
538
539         nthreads = tq->tq_nthreads;
540         for (i = 0; i < nthreads; i++)
541                 if (tq->tq_threads[i])
542                         kthread_stop(tq->tq_threads[i]);
543
544         spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
545
546         while (!list_empty(&tq->tq_free_list)) {
547                 t = list_entry(tq->tq_free_list.next, spl_task_t, t_list);
548                 list_del_init(&t->t_list);
549                 task_free(tq, t);
550         }
551
552         ASSERT(tq->tq_nthreads == 0);
553         ASSERT(tq->tq_nalloc == 0);
554         ASSERT(list_empty(&tq->tq_free_list));
555         ASSERT(list_empty(&tq->tq_work_list));
556         ASSERT(list_empty(&tq->tq_pend_list));
557         ASSERT(list_empty(&tq->tq_prio_list));
558
559         spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
560         kmem_free(tq->tq_threads, nthreads * sizeof(spl_task_t *));
561         kmem_free(tq, sizeof(taskq_t));
562
563         SEXIT;
564 }
565 EXPORT_SYMBOL(__taskq_destroy);
566
567 int
568 spl_taskq_init(void)
569 {
570         SENTRY;
571
572         /* Solaris creates a dynamic taskq of up to 64 threads, however in
573          * a Linux environment 1 thread per-core is usually about right */
574         system_taskq = taskq_create("spl_system_taskq", num_online_cpus(),
575                                     minclsyspri, 4, 512, TASKQ_PREPOPULATE);
576         if (system_taskq == NULL)
577                 SRETURN(1);
578
579         SRETURN(0);
580 }
581
582 void
583 spl_taskq_fini(void)
584 {
585         SENTRY;
586         taskq_destroy(system_taskq);
587         SEXIT;
588 }