1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Task Queue Implementation.
25 \*****************************************************************************/
27 #include <sys/taskq.h>
29 #include <spl-debug.h>
31 #ifdef SS_DEBUG_SUBSYS
32 #undef SS_DEBUG_SUBSYS
35 #define SS_DEBUG_SUBSYS SS_TASKQ
37 /* Global system-wide dynamic task queue available for all consumers */
38 taskq_t *system_taskq;
39 EXPORT_SYMBOL(system_taskq);
41 typedef struct spl_task {
43 struct list_head t_list;
50 * NOTE: Must be called with tq->tq_lock held, returns a list_t which
51 * is not attached to the free, work, or pending taskq lists.
54 task_alloc(taskq_t *tq, uint_t flags)
61 ASSERT(flags & (TQ_SLEEP | TQ_NOSLEEP)); /* One set */
62 ASSERT(!((flags & TQ_SLEEP) && (flags & TQ_NOSLEEP))); /* Not both */
63 ASSERT(spin_is_locked(&tq->tq_lock));
65 /* Acquire spl_task_t's from free list if available */
66 if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) {
67 t = list_entry(tq->tq_free_list.next, spl_task_t, t_list);
68 list_del_init(&t->t_list);
72 /* Free list is empty and memory allocations are prohibited */
73 if (flags & TQ_NOALLOC)
76 /* Hit maximum spl_task_t pool size */
77 if (tq->tq_nalloc >= tq->tq_maxalloc) {
78 if (flags & TQ_NOSLEEP)
81 /* Sleep periodically polling the free list for an available
82 * spl_task_t. If a full second passes and we have not found
83 * one gives up and return a NULL to the caller. */
84 if (flags & TQ_SLEEP) {
85 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
86 schedule_timeout(HZ / 100);
87 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
89 SGOTO(retry, count++);
94 /* Unreachable, Neither TQ_SLEEP or TQ_NOSLEEP set */
95 PANIC("Neither TQ_SLEEP or TQ_NOSLEEP set");
98 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
99 t = kmem_alloc(sizeof(spl_task_t), flags & (TQ_SLEEP | TQ_NOSLEEP));
100 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
103 spin_lock_init(&t->t_lock);
104 INIT_LIST_HEAD(&t->t_list);
115 * NOTE: Must be called with tq->tq_lock held, expects the spl_task_t
116 * to already be removed from the free, work, or pending taskq lists.
119 task_free(taskq_t *tq, spl_task_t *t)
125 ASSERT(spin_is_locked(&tq->tq_lock));
126 ASSERT(list_empty(&t->t_list));
128 kmem_free(t, sizeof(spl_task_t));
135 * NOTE: Must be called with tq->tq_lock held, either destroys the
136 * spl_task_t if too many exist or moves it to the free list for later use.
139 task_done(taskq_t *tq, spl_task_t *t)
144 ASSERT(spin_is_locked(&tq->tq_lock));
146 list_del_init(&t->t_list);
148 if (tq->tq_nalloc <= tq->tq_minalloc) {
152 list_add_tail(&t->t_list, &tq->tq_free_list);
161 * As tasks are submitted to the task queue they are assigned a
162 * monotonically increasing taskqid and added to the tail of the pending
163 * list. As worker threads become available the tasks are removed from
164 * the head of the pending or priority list, giving preference to the
165 * priority list. The tasks are then added to the work list, preserving
166 * the ordering by taskqid. Finally, as tasks complete they are removed
167 * from the work list. This means that the pending and work lists are
168 * always kept sorted by taskqid. Thus the lowest outstanding
169 * incomplete taskqid can be determined simply by checking the min
170 * taskqid for each head item on the pending, priority, and work list.
171 * This value is stored in tq->tq_lowest_id and only updated to the new
172 * lowest id when the previous lowest id completes. All taskqids lower
173 * than tq->tq_lowest_id must have completed. It is also possible
174 * larger taskqid's have completed because they may be processed in
175 * parallel by several worker threads. However, this is not a problem
176 * because the behavior of taskq_wait_id() is to block until all
177 * previously submitted taskqid's have completed.
179 * XXX: Taskqid_t wrapping is not handled. However, taskqid_t's are
180 * 64-bit values so even if a taskq is processing 2^24 (16,777,216)
181 * taskqid_ts per second it will still take 2^40 seconds, 34,865 years,
182 * before the wrap occurs. I can live with that for now.
185 taskq_wait_check(taskq_t *tq, taskqid_t id)
189 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
190 rc = (id < tq->tq_lowest_id);
191 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
197 __taskq_wait_id(taskq_t *tq, taskqid_t id)
202 wait_event(tq->tq_wait_waitq, taskq_wait_check(tq, id));
206 EXPORT_SYMBOL(__taskq_wait_id);
209 __taskq_wait(taskq_t *tq)
215 /* Wait for the largest outstanding taskqid */
216 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
217 id = tq->tq_next_id - 1;
218 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
220 __taskq_wait_id(tq, id);
225 EXPORT_SYMBOL(__taskq_wait);
228 __taskq_member(taskq_t *tq, void *t)
236 for (i = 0; i < tq->tq_nthreads; i++)
237 if (tq->tq_threads[i] == (struct task_struct *)t)
242 EXPORT_SYMBOL(__taskq_member);
245 __taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
254 /* Solaris assumes TQ_SLEEP if not passed explicitly */
255 if (!(flags & (TQ_SLEEP | TQ_NOSLEEP)))
258 if (unlikely(in_atomic() && (flags & TQ_SLEEP)))
259 PANIC("May schedule while atomic: %s/0x%08x/%d\n",
260 current->comm, preempt_count(), current->pid);
262 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
264 /* Taskq being destroyed and all tasks drained */
265 if (!(tq->tq_flags & TQ_ACTIVE))
268 /* Do not queue the task unless there is idle thread for it */
269 ASSERT(tq->tq_nactive <= tq->tq_nthreads);
270 if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads))
273 if ((t = task_alloc(tq, flags)) == NULL)
276 spin_lock(&t->t_lock);
278 /* Queue to the priority list instead of the pending list */
279 if (flags & TQ_FRONT)
280 list_add_tail(&t->t_list, &tq->tq_prio_list);
282 list_add_tail(&t->t_list, &tq->tq_pend_list);
284 t->t_id = rc = tq->tq_next_id;
288 spin_unlock(&t->t_lock);
290 wake_up(&tq->tq_work_waitq);
292 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
295 EXPORT_SYMBOL(__taskq_dispatch);
298 * Returns the lowest incomplete taskqid_t. The taskqid_t may
299 * be queued on the pending list, on the priority list, or on
300 * the work list currently being handled, but it is not 100%
304 taskq_lowest_id(taskq_t *tq)
306 taskqid_t lowest_id = tq->tq_next_id;
311 ASSERT(spin_is_locked(&tq->tq_lock));
313 if (!list_empty(&tq->tq_pend_list)) {
314 t = list_entry(tq->tq_pend_list.next, spl_task_t, t_list);
315 lowest_id = MIN(lowest_id, t->t_id);
318 if (!list_empty(&tq->tq_prio_list)) {
319 t = list_entry(tq->tq_prio_list.next, spl_task_t, t_list);
320 lowest_id = MIN(lowest_id, t->t_id);
323 if (!list_empty(&tq->tq_work_list)) {
324 t = list_entry(tq->tq_work_list.next, spl_task_t, t_list);
325 lowest_id = MIN(lowest_id, t->t_id);
332 * Insert a task into a list keeping the list sorted by increasing
336 taskq_insert_in_order(taskq_t *tq, spl_task_t *t)
344 ASSERT(spin_is_locked(&tq->tq_lock));
346 list_for_each_prev(l, &tq->tq_work_list) {
347 w = list_entry(l, spl_task_t, t_list);
348 if (w->t_id < t->t_id) {
349 list_add(&t->t_list, l);
353 if (l == &tq->tq_work_list)
354 list_add(&t->t_list, &tq->tq_work_list);
360 taskq_thread(void *args)
362 DECLARE_WAITQUEUE(wait, current);
367 struct list_head *pend_list;
371 current->flags |= PF_NOFREEZE;
373 sigfillset(&blocked);
374 sigprocmask(SIG_BLOCK, &blocked, NULL);
375 flush_signals(current);
377 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
379 wake_up(&tq->tq_wait_waitq);
380 set_current_state(TASK_INTERRUPTIBLE);
382 while (!kthread_should_stop()) {
384 add_wait_queue(&tq->tq_work_waitq, &wait);
385 if (list_empty(&tq->tq_pend_list) &&
386 list_empty(&tq->tq_prio_list)) {
387 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
389 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
391 __set_current_state(TASK_RUNNING);
394 remove_wait_queue(&tq->tq_work_waitq, &wait);
396 if (!list_empty(&tq->tq_prio_list))
397 pend_list = &tq->tq_prio_list;
398 else if (!list_empty(&tq->tq_pend_list))
399 pend_list = &tq->tq_pend_list;
404 t = list_entry(pend_list->next, spl_task_t, t_list);
405 list_del_init(&t->t_list);
406 taskq_insert_in_order(tq, t);
408 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
410 /* Perform the requested task */
413 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
418 /* When the current lowest outstanding taskqid is
419 * done calculate the new lowest outstanding id */
420 if (tq->tq_lowest_id == id) {
421 tq->tq_lowest_id = taskq_lowest_id(tq);
422 ASSERT(tq->tq_lowest_id > id);
425 wake_up_all(&tq->tq_wait_waitq);
428 set_current_state(TASK_INTERRUPTIBLE);
432 __set_current_state(TASK_RUNNING);
434 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
440 __taskq_create(const char *name, int nthreads, pri_t pri,
441 int minalloc, int maxalloc, uint_t flags)
444 struct task_struct *t;
445 int rc = 0, i, j = 0;
448 ASSERT(name != NULL);
449 ASSERT(pri <= maxclsyspri);
450 ASSERT(minalloc >= 0);
451 ASSERT(maxalloc <= INT_MAX);
452 ASSERT(!(flags & (TASKQ_CPR_SAFE | TASKQ_DYNAMIC))); /* Unsupported */
454 /* Scale the number of threads using nthreads as a percentage */
455 if (flags & TASKQ_THREADS_CPU_PCT) {
456 ASSERT(nthreads <= 100);
457 ASSERT(nthreads >= 0);
458 nthreads = MIN(nthreads, 100);
459 nthreads = MAX(nthreads, 0);
460 nthreads = MAX((num_online_cpus() * nthreads) / 100, 1);
463 tq = kmem_alloc(sizeof(*tq), KM_SLEEP);
467 tq->tq_threads = kmem_alloc(nthreads * sizeof(t), KM_SLEEP);
468 if (tq->tq_threads == NULL) {
469 kmem_free(tq, sizeof(*tq));
473 spin_lock_init(&tq->tq_lock);
474 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
479 tq->tq_minalloc = minalloc;
480 tq->tq_maxalloc = maxalloc;
482 tq->tq_flags = (flags | TQ_ACTIVE);
484 tq->tq_lowest_id = 1;
485 INIT_LIST_HEAD(&tq->tq_free_list);
486 INIT_LIST_HEAD(&tq->tq_work_list);
487 INIT_LIST_HEAD(&tq->tq_pend_list);
488 INIT_LIST_HEAD(&tq->tq_prio_list);
489 init_waitqueue_head(&tq->tq_work_waitq);
490 init_waitqueue_head(&tq->tq_wait_waitq);
492 if (flags & TASKQ_PREPOPULATE)
493 for (i = 0; i < minalloc; i++)
494 task_done(tq, task_alloc(tq, TQ_SLEEP | TQ_NEW));
496 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
498 for (i = 0; i < nthreads; i++) {
499 t = kthread_create(taskq_thread, tq, "%s/%d", name, i);
501 tq->tq_threads[i] = t;
502 kthread_bind(t, i % num_online_cpus());
503 set_user_nice(t, PRIO_TO_NICE(pri));
507 tq->tq_threads[i] = NULL;
512 /* Wait for all threads to be started before potential destroy */
513 wait_event(tq->tq_wait_waitq, tq->tq_nthreads == j);
522 EXPORT_SYMBOL(__taskq_create);
525 __taskq_destroy(taskq_t *tq)
532 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
533 tq->tq_flags &= ~TQ_ACTIVE;
534 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
536 /* TQ_ACTIVE cleared prevents new tasks being added to pending */
539 nthreads = tq->tq_nthreads;
540 for (i = 0; i < nthreads; i++)
541 if (tq->tq_threads[i])
542 kthread_stop(tq->tq_threads[i]);
544 spin_lock_irqsave(&tq->tq_lock, tq->tq_lock_flags);
546 while (!list_empty(&tq->tq_free_list)) {
547 t = list_entry(tq->tq_free_list.next, spl_task_t, t_list);
548 list_del_init(&t->t_list);
552 ASSERT(tq->tq_nthreads == 0);
553 ASSERT(tq->tq_nalloc == 0);
554 ASSERT(list_empty(&tq->tq_free_list));
555 ASSERT(list_empty(&tq->tq_work_list));
556 ASSERT(list_empty(&tq->tq_pend_list));
557 ASSERT(list_empty(&tq->tq_prio_list));
559 spin_unlock_irqrestore(&tq->tq_lock, tq->tq_lock_flags);
560 kmem_free(tq->tq_threads, nthreads * sizeof(spl_task_t *));
561 kmem_free(tq, sizeof(taskq_t));
565 EXPORT_SYMBOL(__taskq_destroy);
572 /* Solaris creates a dynamic taskq of up to 64 threads, however in
573 * a Linux environment 1 thread per-core is usually about right */
574 system_taskq = taskq_create("spl_system_taskq", num_online_cpus(),
575 minclsyspri, 4, 512, TASKQ_PREPOPULATE);
576 if (system_taskq == NULL)
586 taskq_destroy(system_taskq);