2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
24 * Solaris Porting Layer (SPL) Task Queue Implementation.
27 #include <sys/timer.h>
28 #include <sys/taskq.h>
31 #include <sys/trace_spl.h>
33 int spl_taskq_thread_bind = 0;
34 module_param(spl_taskq_thread_bind, int, 0644);
35 MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default");
38 int spl_taskq_thread_dynamic = 1;
39 module_param(spl_taskq_thread_dynamic, int, 0644);
40 MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads");
42 int spl_taskq_thread_priority = 1;
43 module_param(spl_taskq_thread_priority, int, 0644);
44 MODULE_PARM_DESC(spl_taskq_thread_priority,
45 "Allow non-default priority for taskq threads");
47 int spl_taskq_thread_sequential = 4;
48 module_param(spl_taskq_thread_sequential, int, 0644);
49 MODULE_PARM_DESC(spl_taskq_thread_sequential,
50 "Create new taskq threads after N sequential tasks");
52 /* Global system-wide dynamic task queue available for all consumers */
53 taskq_t *system_taskq;
54 EXPORT_SYMBOL(system_taskq);
55 /* Global dynamic task queue for long delay */
56 taskq_t *system_delay_taskq;
57 EXPORT_SYMBOL(system_delay_taskq);
59 /* Private dedicated taskq for creating new taskq threads on demand. */
60 static taskq_t *dynamic_taskq;
61 static taskq_thread_t *taskq_thread_create(taskq_t *);
63 /* List of all taskqs */
65 struct rw_semaphore tq_list_sem;
66 static uint_t taskq_tsd;
69 task_km_flags(uint_t flags)
71 if (flags & TQ_NOSLEEP)
74 if (flags & TQ_PUSHPAGE)
81 * taskq_find_by_name - Find the largest instance number of a named taskq.
84 taskq_find_by_name(const char *name)
86 struct list_head *tql = NULL;
89 list_for_each_prev(tql, &tq_list) {
90 tq = list_entry(tql, taskq_t, tq_taskqs);
91 if (strcmp(name, tq->tq_name) == 0)
92 return (tq->tq_instance);
98 * NOTE: Must be called with tq->tq_lock held, returns a list_t which
99 * is not attached to the free, work, or pending taskq lists.
102 task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags)
109 /* Acquire taskq_ent_t's from free list if available */
110 if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) {
111 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
113 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
114 ASSERT(!(t->tqent_flags & TQENT_FLAG_CANCEL));
115 ASSERT(!timer_pending(&t->tqent_timer));
117 list_del_init(&t->tqent_list);
121 /* Free list is empty and memory allocations are prohibited */
122 if (flags & TQ_NOALLOC)
125 /* Hit maximum taskq_ent_t pool size */
126 if (tq->tq_nalloc >= tq->tq_maxalloc) {
127 if (flags & TQ_NOSLEEP)
131 * Sleep periodically polling the free list for an available
132 * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed
133 * but we cannot block forever waiting for an taskq_ent_t to
134 * show up in the free list, otherwise a deadlock can happen.
136 * Therefore, we need to allocate a new task even if the number
137 * of allocated tasks is above tq->tq_maxalloc, but we still
138 * end up delaying the task allocation by one second, thereby
139 * throttling the task dispatch rate.
141 spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
142 schedule_timeout(HZ / 100);
143 spin_lock_irqsave_nested(&tq->tq_lock, *irqflags,
151 spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
152 t = kmem_alloc(sizeof (taskq_ent_t), task_km_flags(flags));
153 spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class);
164 * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t
165 * to already be removed from the free, work, or pending taskq lists.
168 task_free(taskq_t *tq, taskq_ent_t *t)
172 ASSERT(list_empty(&t->tqent_list));
173 ASSERT(!timer_pending(&t->tqent_timer));
175 kmem_free(t, sizeof (taskq_ent_t));
180 * NOTE: Must be called with tq->tq_lock held, either destroys the
181 * taskq_ent_t if too many exist or moves it to the free list for later use.
184 task_done(taskq_t *tq, taskq_ent_t *t)
189 /* Wake tasks blocked in taskq_wait_id() */
190 wake_up_all(&t->tqent_waitq);
192 list_del_init(&t->tqent_list);
194 if (tq->tq_nalloc <= tq->tq_minalloc) {
195 t->tqent_id = TASKQID_INVALID;
196 t->tqent_func = NULL;
200 list_add_tail(&t->tqent_list, &tq->tq_free_list);
207 * When a delayed task timer expires remove it from the delay list and
208 * add it to the priority list in order for immediate processing.
211 task_expire_impl(taskq_ent_t *t)
214 taskq_t *tq = t->tqent_taskq;
215 struct list_head *l = NULL;
218 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
220 if (t->tqent_flags & TQENT_FLAG_CANCEL) {
221 ASSERT(list_empty(&t->tqent_list));
222 spin_unlock_irqrestore(&tq->tq_lock, flags);
226 t->tqent_birth = jiffies;
227 DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t);
230 * The priority list must be maintained in strict task id order
231 * from lowest to highest for lowest_id to be easily calculable.
233 list_del(&t->tqent_list);
234 list_for_each_prev(l, &tq->tq_prio_list) {
235 w = list_entry(l, taskq_ent_t, tqent_list);
236 if (w->tqent_id < t->tqent_id) {
237 list_add(&t->tqent_list, l);
241 if (l == &tq->tq_prio_list)
242 list_add(&t->tqent_list, &tq->tq_prio_list);
244 spin_unlock_irqrestore(&tq->tq_lock, flags);
246 wake_up(&tq->tq_work_waitq);
250 task_expire(spl_timer_list_t tl)
252 struct timer_list *tmr = (struct timer_list *)tl;
253 taskq_ent_t *t = from_timer(t, tmr, tqent_timer);
258 * Returns the lowest incomplete taskqid_t. The taskqid_t may
259 * be queued on the pending list, on the priority list, on the
260 * delay list, or on the work list currently being handled, but
261 * it is not 100% complete yet.
264 taskq_lowest_id(taskq_t *tq)
266 taskqid_t lowest_id = tq->tq_next_id;
272 if (!list_empty(&tq->tq_pend_list)) {
273 t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list);
274 lowest_id = MIN(lowest_id, t->tqent_id);
277 if (!list_empty(&tq->tq_prio_list)) {
278 t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list);
279 lowest_id = MIN(lowest_id, t->tqent_id);
282 if (!list_empty(&tq->tq_delay_list)) {
283 t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list);
284 lowest_id = MIN(lowest_id, t->tqent_id);
287 if (!list_empty(&tq->tq_active_list)) {
288 tqt = list_entry(tq->tq_active_list.next, taskq_thread_t,
290 ASSERT(tqt->tqt_id != TASKQID_INVALID);
291 lowest_id = MIN(lowest_id, tqt->tqt_id);
298 * Insert a task into a list keeping the list sorted by increasing taskqid.
301 taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt)
304 struct list_head *l = NULL;
309 list_for_each_prev(l, &tq->tq_active_list) {
310 w = list_entry(l, taskq_thread_t, tqt_active_list);
311 if (w->tqt_id < tqt->tqt_id) {
312 list_add(&tqt->tqt_active_list, l);
316 if (l == &tq->tq_active_list)
317 list_add(&tqt->tqt_active_list, &tq->tq_active_list);
321 * Find and return a task from the given list if it exists. The list
322 * must be in lowest to highest task id order.
325 taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id)
327 struct list_head *l = NULL;
330 list_for_each(l, lh) {
331 t = list_entry(l, taskq_ent_t, tqent_list);
333 if (t->tqent_id == id)
336 if (t->tqent_id > id)
344 * Find an already dispatched task given the task id regardless of what
345 * state it is in. If a task is still pending it will be returned.
346 * If a task is executing, then -EBUSY will be returned instead.
347 * If the task has already been run then NULL is returned.
350 taskq_find(taskq_t *tq, taskqid_t id)
353 struct list_head *l = NULL;
356 t = taskq_find_list(tq, &tq->tq_delay_list, id);
360 t = taskq_find_list(tq, &tq->tq_prio_list, id);
364 t = taskq_find_list(tq, &tq->tq_pend_list, id);
368 list_for_each(l, &tq->tq_active_list) {
369 tqt = list_entry(l, taskq_thread_t, tqt_active_list);
370 if (tqt->tqt_id == id) {
372 * Instead of returning tqt_task, we just return a non
373 * NULL value to prevent misuse, since tqt_task only
374 * has two valid fields.
376 return (ERR_PTR(-EBUSY));
384 * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and
385 * taskq_wait() functions below.
387 * Taskq waiting is accomplished by tracking the lowest outstanding task
388 * id and the next available task id. As tasks are dispatched they are
389 * added to the tail of the pending, priority, or delay lists. As worker
390 * threads become available the tasks are removed from the heads of these
391 * lists and linked to the worker threads. This ensures the lists are
392 * kept sorted by lowest to highest task id.
394 * Therefore the lowest outstanding task id can be quickly determined by
395 * checking the head item from all of these lists. This value is stored
396 * with the taskq as the lowest id. It only needs to be recalculated when
397 * either the task with the current lowest id completes or is canceled.
399 * By blocking until the lowest task id exceeds the passed task id the
400 * taskq_wait_outstanding() function can be easily implemented. Similarly,
401 * by blocking until the lowest task id matches the next task id taskq_wait()
402 * can be implemented.
404 * Callers should be aware that when there are multiple worked threads it
405 * is possible for larger task ids to complete before smaller ones. Also
406 * when the taskq contains delay tasks with small task ids callers may
407 * block for a considerable length of time waiting for them to expire and
411 taskq_wait_id_check(taskq_t *tq, taskqid_t id)
416 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
417 rc = (taskq_find(tq, id) == NULL);
418 spin_unlock_irqrestore(&tq->tq_lock, flags);
424 * The taskq_wait_id() function blocks until the passed task id completes.
425 * This does not guarantee that all lower task ids have completed.
428 taskq_wait_id(taskq_t *tq, taskqid_t id)
430 wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id));
432 EXPORT_SYMBOL(taskq_wait_id);
435 taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id)
440 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
441 rc = (id < tq->tq_lowest_id);
442 spin_unlock_irqrestore(&tq->tq_lock, flags);
448 * The taskq_wait_outstanding() function will block until all tasks with a
449 * lower taskqid than the passed 'id' have been completed. Note that all
450 * task id's are assigned monotonically at dispatch time. Zero may be
451 * passed for the id to indicate all tasks dispatch up to this point,
452 * but not after, should be waited for.
455 taskq_wait_outstanding(taskq_t *tq, taskqid_t id)
457 id = id ? id : tq->tq_next_id - 1;
458 wait_event(tq->tq_wait_waitq, taskq_wait_outstanding_check(tq, id));
460 EXPORT_SYMBOL(taskq_wait_outstanding);
463 taskq_wait_check(taskq_t *tq)
468 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
469 rc = (tq->tq_lowest_id == tq->tq_next_id);
470 spin_unlock_irqrestore(&tq->tq_lock, flags);
476 * The taskq_wait() function will block until the taskq is empty.
477 * This means that if a taskq re-dispatches work to itself taskq_wait()
478 * callers will block indefinitely.
481 taskq_wait(taskq_t *tq)
483 wait_event(tq->tq_wait_waitq, taskq_wait_check(tq));
485 EXPORT_SYMBOL(taskq_wait);
488 taskq_member(taskq_t *tq, kthread_t *t)
490 return (tq == (taskq_t *)tsd_get_by_thread(taskq_tsd, t));
492 EXPORT_SYMBOL(taskq_member);
495 taskq_of_curthread(void)
497 return (tsd_get(taskq_tsd));
499 EXPORT_SYMBOL(taskq_of_curthread);
502 * Cancel an already dispatched task given the task id. Still pending tasks
503 * will be immediately canceled, and if the task is active the function will
504 * block until it completes. Preallocated tasks which are canceled must be
505 * freed by the caller.
508 taskq_cancel_id(taskq_t *tq, taskqid_t id)
516 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
517 t = taskq_find(tq, id);
518 if (t && t != ERR_PTR(-EBUSY)) {
519 list_del_init(&t->tqent_list);
520 t->tqent_flags |= TQENT_FLAG_CANCEL;
523 * When canceling the lowest outstanding task id we
524 * must recalculate the new lowest outstanding id.
526 if (tq->tq_lowest_id == t->tqent_id) {
527 tq->tq_lowest_id = taskq_lowest_id(tq);
528 ASSERT3S(tq->tq_lowest_id, >, t->tqent_id);
532 * The task_expire() function takes the tq->tq_lock so drop
533 * drop the lock before synchronously cancelling the timer.
535 if (timer_pending(&t->tqent_timer)) {
536 spin_unlock_irqrestore(&tq->tq_lock, flags);
537 del_timer_sync(&t->tqent_timer);
538 spin_lock_irqsave_nested(&tq->tq_lock, flags,
542 if (!(t->tqent_flags & TQENT_FLAG_PREALLOC))
547 spin_unlock_irqrestore(&tq->tq_lock, flags);
549 if (t == ERR_PTR(-EBUSY)) {
550 taskq_wait_id(tq, id);
556 EXPORT_SYMBOL(taskq_cancel_id);
558 static int taskq_thread_spawn(taskq_t *tq);
561 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
564 taskqid_t rc = TASKQID_INVALID;
565 unsigned long irqflags;
570 spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);
572 /* Taskq being destroyed and all tasks drained */
573 if (!(tq->tq_flags & TASKQ_ACTIVE))
576 /* Do not queue the task unless there is idle thread for it */
577 ASSERT(tq->tq_nactive <= tq->tq_nthreads);
578 if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) {
579 /* Dynamic taskq may be able to spawn another thread */
580 if (!(tq->tq_flags & TASKQ_DYNAMIC) ||
581 taskq_thread_spawn(tq) == 0)
585 if ((t = task_alloc(tq, flags, &irqflags)) == NULL)
588 spin_lock(&t->tqent_lock);
590 /* Queue to the front of the list to enforce TQ_NOQUEUE semantics */
591 if (flags & TQ_NOQUEUE)
592 list_add(&t->tqent_list, &tq->tq_prio_list);
593 /* Queue to the priority list instead of the pending list */
594 else if (flags & TQ_FRONT)
595 list_add_tail(&t->tqent_list, &tq->tq_prio_list);
597 list_add_tail(&t->tqent_list, &tq->tq_pend_list);
599 t->tqent_id = rc = tq->tq_next_id;
601 t->tqent_func = func;
604 t->tqent_timer.function = NULL;
605 t->tqent_timer.expires = 0;
607 t->tqent_birth = jiffies;
608 DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t);
610 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
612 spin_unlock(&t->tqent_lock);
614 wake_up(&tq->tq_work_waitq);
616 /* Spawn additional taskq threads if required. */
617 if (!(flags & TQ_NOQUEUE) && tq->tq_nactive == tq->tq_nthreads)
618 (void) taskq_thread_spawn(tq);
620 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
623 EXPORT_SYMBOL(taskq_dispatch);
626 taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
627 uint_t flags, clock_t expire_time)
629 taskqid_t rc = TASKQID_INVALID;
631 unsigned long irqflags;
636 spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);
638 /* Taskq being destroyed and all tasks drained */
639 if (!(tq->tq_flags & TASKQ_ACTIVE))
642 if ((t = task_alloc(tq, flags, &irqflags)) == NULL)
645 spin_lock(&t->tqent_lock);
647 /* Queue to the delay list for subsequent execution */
648 list_add_tail(&t->tqent_list, &tq->tq_delay_list);
650 t->tqent_id = rc = tq->tq_next_id;
652 t->tqent_func = func;
655 t->tqent_timer.function = task_expire;
656 t->tqent_timer.expires = (unsigned long)expire_time;
657 add_timer(&t->tqent_timer);
659 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
661 spin_unlock(&t->tqent_lock);
663 /* Spawn additional taskq threads if required. */
664 if (tq->tq_nactive == tq->tq_nthreads)
665 (void) taskq_thread_spawn(tq);
666 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
669 EXPORT_SYMBOL(taskq_dispatch_delay);
672 taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
675 unsigned long irqflags;
679 spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
682 /* Taskq being destroyed and all tasks drained */
683 if (!(tq->tq_flags & TASKQ_ACTIVE)) {
684 t->tqent_id = TASKQID_INVALID;
688 if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) {
689 /* Dynamic taskq may be able to spawn another thread */
690 if (!(tq->tq_flags & TASKQ_DYNAMIC) ||
691 taskq_thread_spawn(tq) == 0)
696 spin_lock(&t->tqent_lock);
699 * Make sure the entry is not on some other taskq; it is important to
700 * ASSERT() under lock
702 ASSERT(taskq_empty_ent(t));
705 * Mark it as a prealloc'd task. This is important
706 * to ensure that we don't free it later.
708 t->tqent_flags |= TQENT_FLAG_PREALLOC;
710 /* Queue to the priority list instead of the pending list */
711 if (flags & TQ_FRONT)
712 list_add_tail(&t->tqent_list, &tq->tq_prio_list);
714 list_add_tail(&t->tqent_list, &tq->tq_pend_list);
716 t->tqent_id = tq->tq_next_id;
718 t->tqent_func = func;
722 t->tqent_birth = jiffies;
723 DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t);
725 spin_unlock(&t->tqent_lock);
727 wake_up(&tq->tq_work_waitq);
729 /* Spawn additional taskq threads if required. */
730 if (tq->tq_nactive == tq->tq_nthreads)
731 (void) taskq_thread_spawn(tq);
733 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
735 EXPORT_SYMBOL(taskq_dispatch_ent);
738 taskq_empty_ent(taskq_ent_t *t)
740 return (list_empty(&t->tqent_list));
742 EXPORT_SYMBOL(taskq_empty_ent);
745 taskq_init_ent(taskq_ent_t *t)
747 spin_lock_init(&t->tqent_lock);
748 init_waitqueue_head(&t->tqent_waitq);
749 timer_setup(&t->tqent_timer, NULL, 0);
750 INIT_LIST_HEAD(&t->tqent_list);
752 t->tqent_func = NULL;
755 t->tqent_taskq = NULL;
757 EXPORT_SYMBOL(taskq_init_ent);
760 * Return the next pending task, preference is given to tasks on the
761 * priority list which were dispatched with TQ_FRONT.
764 taskq_next_ent(taskq_t *tq)
766 struct list_head *list;
768 if (!list_empty(&tq->tq_prio_list))
769 list = &tq->tq_prio_list;
770 else if (!list_empty(&tq->tq_pend_list))
771 list = &tq->tq_pend_list;
775 return (list_entry(list->next, taskq_ent_t, tqent_list));
779 * Spawns a new thread for the specified taskq.
782 taskq_thread_spawn_task(void *arg)
784 taskq_t *tq = (taskq_t *)arg;
787 if (taskq_thread_create(tq) == NULL) {
788 /* restore spawning count if failed */
789 spin_lock_irqsave_nested(&tq->tq_lock, flags,
792 spin_unlock_irqrestore(&tq->tq_lock, flags);
797 * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current
798 * number of threads is insufficient to handle the pending tasks. These
799 * new threads must be created by the dedicated dynamic_taskq to avoid
800 * deadlocks between thread creation and memory reclaim. The system_taskq
801 * which is also a dynamic taskq cannot be safely used for this.
804 taskq_thread_spawn(taskq_t *tq)
808 if (!(tq->tq_flags & TASKQ_DYNAMIC))
811 if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) &&
812 (tq->tq_flags & TASKQ_ACTIVE)) {
813 spawning = (++tq->tq_nspawn);
814 taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task,
822 * Threads in a dynamic taskq should only exit once it has been completely
823 * drained and no other threads are actively servicing tasks. This prevents
824 * threads from being created and destroyed more than is required.
826 * The first thread is the thread list is treated as the primary thread.
827 * There is nothing special about the primary thread but in order to avoid
828 * all the taskq pids from changing we opt to make it long running.
831 taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt)
833 if (!(tq->tq_flags & TASKQ_DYNAMIC))
836 if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t,
837 tqt_thread_list) == tqt)
841 ((tq->tq_nspawn == 0) && /* No threads are being spawned */
842 (tq->tq_nactive == 0) && /* No threads are handling tasks */
843 (tq->tq_nthreads > 1) && /* More than 1 thread is running */
844 (!taskq_next_ent(tq)) && /* There are no pending tasks */
845 (spl_taskq_thread_dynamic)); /* Dynamic taskqs are allowed */
849 taskq_thread(void *args)
851 DECLARE_WAITQUEUE(wait, current);
853 taskq_thread_t *tqt = args;
858 taskq_ent_t dup_task = {};
863 current->flags |= PF_NOFREEZE;
865 (void) spl_fstrans_mark();
867 sigfillset(&blocked);
868 sigprocmask(SIG_BLOCK, &blocked, NULL);
869 flush_signals(current);
871 tsd_set(taskq_tsd, tq);
872 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
874 * If we are dynamically spawned, decrease spawning count. Note that
875 * we could be created during taskq_create, in which case we shouldn't
876 * do the decrement. But it's fine because taskq_create will reset
879 if (tq->tq_flags & TASKQ_DYNAMIC)
882 /* Immediately exit if more threads than allowed were created. */
883 if (tq->tq_nthreads >= tq->tq_maxthreads)
887 list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list);
888 wake_up(&tq->tq_wait_waitq);
889 set_current_state(TASK_INTERRUPTIBLE);
891 while (!kthread_should_stop()) {
893 if (list_empty(&tq->tq_pend_list) &&
894 list_empty(&tq->tq_prio_list)) {
896 if (taskq_thread_should_stop(tq, tqt)) {
897 wake_up_all(&tq->tq_wait_waitq);
901 add_wait_queue_exclusive(&tq->tq_work_waitq, &wait);
902 spin_unlock_irqrestore(&tq->tq_lock, flags);
907 spin_lock_irqsave_nested(&tq->tq_lock, flags,
909 remove_wait_queue(&tq->tq_work_waitq, &wait);
911 __set_current_state(TASK_RUNNING);
914 if ((t = taskq_next_ent(tq)) != NULL) {
915 list_del_init(&t->tqent_list);
918 * A TQENT_FLAG_PREALLOC task may be reused or freed
919 * during the task function call. Store tqent_id and
922 * Also use an on stack taskq_ent_t for tqt_task
923 * assignment in this case; we want to make sure
924 * to duplicate all fields, so the values are
925 * correct when it's accessed via DTRACE_PROBE*.
927 tqt->tqt_id = t->tqent_id;
928 tqt->tqt_flags = t->tqent_flags;
930 if (t->tqent_flags & TQENT_FLAG_PREALLOC) {
936 taskq_insert_in_order(tq, tqt);
938 spin_unlock_irqrestore(&tq->tq_lock, flags);
940 DTRACE_PROBE1(taskq_ent__start, taskq_ent_t *, t);
942 /* Perform the requested task */
943 t->tqent_func(t->tqent_arg);
945 DTRACE_PROBE1(taskq_ent__finish, taskq_ent_t *, t);
947 spin_lock_irqsave_nested(&tq->tq_lock, flags,
950 list_del_init(&tqt->tqt_active_list);
951 tqt->tqt_task = NULL;
953 /* For prealloc'd tasks, we don't free anything. */
954 if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC))
958 * When the current lowest outstanding taskqid is
959 * done calculate the new lowest outstanding id
961 if (tq->tq_lowest_id == tqt->tqt_id) {
962 tq->tq_lowest_id = taskq_lowest_id(tq);
963 ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id);
966 /* Spawn additional taskq threads if required. */
967 if ((++seq_tasks) > spl_taskq_thread_sequential &&
968 taskq_thread_spawn(tq))
971 tqt->tqt_id = TASKQID_INVALID;
973 wake_up_all(&tq->tq_wait_waitq);
975 if (taskq_thread_should_stop(tq, tqt))
979 set_current_state(TASK_INTERRUPTIBLE);
983 __set_current_state(TASK_RUNNING);
985 list_del_init(&tqt->tqt_thread_list);
987 kmem_free(tqt, sizeof (taskq_thread_t));
988 spin_unlock_irqrestore(&tq->tq_lock, flags);
990 tsd_set(taskq_tsd, NULL);
995 static taskq_thread_t *
996 taskq_thread_create(taskq_t *tq)
998 static int last_used_cpu = 0;
1001 tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE);
1002 INIT_LIST_HEAD(&tqt->tqt_thread_list);
1003 INIT_LIST_HEAD(&tqt->tqt_active_list);
1005 tqt->tqt_id = TASKQID_INVALID;
1007 tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt,
1009 if (tqt->tqt_thread == NULL) {
1010 kmem_free(tqt, sizeof (taskq_thread_t));
1014 if (spl_taskq_thread_bind) {
1015 last_used_cpu = (last_used_cpu + 1) % num_online_cpus();
1016 kthread_bind(tqt->tqt_thread, last_used_cpu);
1019 if (spl_taskq_thread_priority)
1020 set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri));
1022 wake_up_process(tqt->tqt_thread);
1028 taskq_create(const char *name, int nthreads, pri_t pri,
1029 int minalloc, int maxalloc, uint_t flags)
1032 taskq_thread_t *tqt;
1033 int count = 0, rc = 0, i;
1034 unsigned long irqflags;
1036 ASSERT(name != NULL);
1037 ASSERT(minalloc >= 0);
1038 ASSERT(maxalloc <= INT_MAX);
1039 ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */
1041 /* Scale the number of threads using nthreads as a percentage */
1042 if (flags & TASKQ_THREADS_CPU_PCT) {
1043 ASSERT(nthreads <= 100);
1044 ASSERT(nthreads >= 0);
1045 nthreads = MIN(nthreads, 100);
1046 nthreads = MAX(nthreads, 0);
1047 nthreads = MAX((num_online_cpus() * nthreads) / 100, 1);
1050 tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE);
1054 spin_lock_init(&tq->tq_lock);
1055 INIT_LIST_HEAD(&tq->tq_thread_list);
1056 INIT_LIST_HEAD(&tq->tq_active_list);
1057 tq->tq_name = kmem_strdup(name);
1059 tq->tq_nthreads = 0;
1061 tq->tq_maxthreads = nthreads;
1063 tq->tq_minalloc = minalloc;
1064 tq->tq_maxalloc = maxalloc;
1066 tq->tq_flags = (flags | TASKQ_ACTIVE);
1067 tq->tq_next_id = TASKQID_INITIAL;
1068 tq->tq_lowest_id = TASKQID_INITIAL;
1069 INIT_LIST_HEAD(&tq->tq_free_list);
1070 INIT_LIST_HEAD(&tq->tq_pend_list);
1071 INIT_LIST_HEAD(&tq->tq_prio_list);
1072 INIT_LIST_HEAD(&tq->tq_delay_list);
1073 init_waitqueue_head(&tq->tq_work_waitq);
1074 init_waitqueue_head(&tq->tq_wait_waitq);
1075 tq->tq_lock_class = TQ_LOCK_GENERAL;
1076 INIT_LIST_HEAD(&tq->tq_taskqs);
1078 if (flags & TASKQ_PREPOPULATE) {
1079 spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
1082 for (i = 0; i < minalloc; i++)
1083 task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW,
1086 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
1089 if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic)
1092 for (i = 0; i < nthreads; i++) {
1093 tqt = taskq_thread_create(tq);
1100 /* Wait for all threads to be started before potential destroy */
1101 wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count);
1103 * taskq_thread might have touched nspawn, but we don't want them to
1104 * because they're not dynamically spawned. So we reset it to 0
1112 down_write(&tq_list_sem);
1113 tq->tq_instance = taskq_find_by_name(name) + 1;
1114 list_add_tail(&tq->tq_taskqs, &tq_list);
1115 up_write(&tq_list_sem);
1120 EXPORT_SYMBOL(taskq_create);
1123 taskq_destroy(taskq_t *tq)
1125 struct task_struct *thread;
1126 taskq_thread_t *tqt;
1128 unsigned long flags;
1131 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
1132 tq->tq_flags &= ~TASKQ_ACTIVE;
1133 spin_unlock_irqrestore(&tq->tq_lock, flags);
1136 * When TASKQ_ACTIVE is clear new tasks may not be added nor may
1137 * new worker threads be spawned for dynamic taskq.
1139 if (dynamic_taskq != NULL)
1140 taskq_wait_outstanding(dynamic_taskq, 0);
1144 /* remove taskq from global list used by the kstats */
1145 down_write(&tq_list_sem);
1146 list_del(&tq->tq_taskqs);
1147 up_write(&tq_list_sem);
1149 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
1150 /* wait for spawning threads to insert themselves to the list */
1151 while (tq->tq_nspawn) {
1152 spin_unlock_irqrestore(&tq->tq_lock, flags);
1153 schedule_timeout_interruptible(1);
1154 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1159 * Signal each thread to exit and block until it does. Each thread
1160 * is responsible for removing itself from the list and freeing its
1161 * taskq_thread_t. This allows for idle threads to opt to remove
1162 * themselves from the taskq. They can be recreated as needed.
1164 while (!list_empty(&tq->tq_thread_list)) {
1165 tqt = list_entry(tq->tq_thread_list.next,
1166 taskq_thread_t, tqt_thread_list);
1167 thread = tqt->tqt_thread;
1168 spin_unlock_irqrestore(&tq->tq_lock, flags);
1170 kthread_stop(thread);
1172 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1176 while (!list_empty(&tq->tq_free_list)) {
1177 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
1179 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
1181 list_del_init(&t->tqent_list);
1185 ASSERT0(tq->tq_nthreads);
1186 ASSERT0(tq->tq_nalloc);
1187 ASSERT0(tq->tq_nspawn);
1188 ASSERT(list_empty(&tq->tq_thread_list));
1189 ASSERT(list_empty(&tq->tq_active_list));
1190 ASSERT(list_empty(&tq->tq_free_list));
1191 ASSERT(list_empty(&tq->tq_pend_list));
1192 ASSERT(list_empty(&tq->tq_prio_list));
1193 ASSERT(list_empty(&tq->tq_delay_list));
1195 spin_unlock_irqrestore(&tq->tq_lock, flags);
1197 kmem_strfree(tq->tq_name);
1198 kmem_free(tq, sizeof (taskq_t));
1200 EXPORT_SYMBOL(taskq_destroy);
1203 static unsigned int spl_taskq_kick = 0;
1207 * module_param_cb is introduced to take kernel_param_ops and
1208 * module_param_call is marked as obsolete. Also set and get operations
1209 * were changed to take a 'const struct kernel_param *'.
1212 #ifdef module_param_cb
1213 param_set_taskq_kick(const char *val, const struct kernel_param *kp)
1215 param_set_taskq_kick(const char *val, struct kernel_param *kp)
1221 unsigned long flags;
1223 ret = param_set_uint(val, kp);
1224 if (ret < 0 || !spl_taskq_kick)
1229 down_read(&tq_list_sem);
1230 list_for_each_entry(tq, &tq_list, tq_taskqs) {
1231 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1233 /* Check if the first pending is older than 5 seconds */
1234 t = taskq_next_ent(tq);
1235 if (t && time_after(jiffies, t->tqent_birth + 5*HZ)) {
1236 (void) taskq_thread_spawn(tq);
1237 printk(KERN_INFO "spl: Kicked taskq %s/%d\n",
1238 tq->tq_name, tq->tq_instance);
1240 spin_unlock_irqrestore(&tq->tq_lock, flags);
1242 up_read(&tq_list_sem);
1246 #ifdef module_param_cb
1247 static const struct kernel_param_ops param_ops_taskq_kick = {
1248 .set = param_set_taskq_kick,
1249 .get = param_get_uint,
1251 module_param_cb(spl_taskq_kick, ¶m_ops_taskq_kick, &spl_taskq_kick, 0644);
1253 module_param_call(spl_taskq_kick, param_set_taskq_kick, param_get_uint,
1254 &spl_taskq_kick, 0644);
1256 MODULE_PARM_DESC(spl_taskq_kick,
1257 "Write nonzero to kick stuck taskqs to spawn more threads");
1260 spl_taskq_init(void)
1262 init_rwsem(&tq_list_sem);
1263 tsd_create(&taskq_tsd, NULL);
1265 system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64),
1266 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
1267 if (system_taskq == NULL)
1270 system_delay_taskq = taskq_create("spl_delay_taskq", MAX(boot_ncpus, 4),
1271 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
1272 if (system_delay_taskq == NULL) {
1273 taskq_destroy(system_taskq);
1277 dynamic_taskq = taskq_create("spl_dynamic_taskq", 1,
1278 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE);
1279 if (dynamic_taskq == NULL) {
1280 taskq_destroy(system_taskq);
1281 taskq_destroy(system_delay_taskq);
1286 * This is used to annotate tq_lock, so
1287 * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch
1288 * does not trigger a lockdep warning re: possible recursive locking
1290 dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC;
1296 spl_taskq_fini(void)
1298 taskq_destroy(dynamic_taskq);
1299 dynamic_taskq = NULL;
1301 taskq_destroy(system_delay_taskq);
1302 system_delay_taskq = NULL;
1304 taskq_destroy(system_taskq);
1305 system_taskq = NULL;
1307 tsd_destroy(&taskq_tsd);