2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
10 * The SPL is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
15 * The SPL is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * You should have received a copy of the GNU General Public License along
21 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 * Solaris Porting Layer (SPL) Task Queue Implementation.
26 #include <sys/timer.h>
27 #include <sys/taskq.h>
30 #include <sys/trace_spl.h>
32 int spl_taskq_thread_bind = 0;
33 module_param(spl_taskq_thread_bind, int, 0644);
34 MODULE_PARM_DESC(spl_taskq_thread_bind, "Bind taskq thread to CPU by default");
37 int spl_taskq_thread_dynamic = 1;
38 module_param(spl_taskq_thread_dynamic, int, 0644);
39 MODULE_PARM_DESC(spl_taskq_thread_dynamic, "Allow dynamic taskq threads");
41 int spl_taskq_thread_priority = 1;
42 module_param(spl_taskq_thread_priority, int, 0644);
43 MODULE_PARM_DESC(spl_taskq_thread_priority,
44 "Allow non-default priority for taskq threads");
46 int spl_taskq_thread_sequential = 4;
47 module_param(spl_taskq_thread_sequential, int, 0644);
48 MODULE_PARM_DESC(spl_taskq_thread_sequential,
49 "Create new taskq threads after N sequential tasks");
51 /* Global system-wide dynamic task queue available for all consumers */
52 taskq_t *system_taskq;
53 EXPORT_SYMBOL(system_taskq);
54 /* Global dynamic task queue for long delay */
55 taskq_t *system_delay_taskq;
56 EXPORT_SYMBOL(system_delay_taskq);
58 /* Private dedicated taskq for creating new taskq threads on demand. */
59 static taskq_t *dynamic_taskq;
60 static taskq_thread_t *taskq_thread_create(taskq_t *);
62 /* List of all taskqs */
64 struct rw_semaphore tq_list_sem;
65 static uint_t taskq_tsd;
68 task_km_flags(uint_t flags)
70 if (flags & TQ_NOSLEEP)
73 if (flags & TQ_PUSHPAGE)
80 * taskq_find_by_name - Find the largest instance number of a named taskq.
83 taskq_find_by_name(const char *name)
85 struct list_head *tql = NULL;
88 list_for_each_prev(tql, &tq_list) {
89 tq = list_entry(tql, taskq_t, tq_taskqs);
90 if (strcmp(name, tq->tq_name) == 0)
91 return (tq->tq_instance);
97 * NOTE: Must be called with tq->tq_lock held, returns a list_t which
98 * is not attached to the free, work, or pending taskq lists.
101 task_alloc(taskq_t *tq, uint_t flags, unsigned long *irqflags)
108 /* Acquire taskq_ent_t's from free list if available */
109 if (!list_empty(&tq->tq_free_list) && !(flags & TQ_NEW)) {
110 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
112 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
113 ASSERT(!(t->tqent_flags & TQENT_FLAG_CANCEL));
114 ASSERT(!timer_pending(&t->tqent_timer));
116 list_del_init(&t->tqent_list);
120 /* Free list is empty and memory allocations are prohibited */
121 if (flags & TQ_NOALLOC)
124 /* Hit maximum taskq_ent_t pool size */
125 if (tq->tq_nalloc >= tq->tq_maxalloc) {
126 if (flags & TQ_NOSLEEP)
130 * Sleep periodically polling the free list for an available
131 * taskq_ent_t. Dispatching with TQ_SLEEP should always succeed
132 * but we cannot block forever waiting for an taskq_ent_t to
133 * show up in the free list, otherwise a deadlock can happen.
135 * Therefore, we need to allocate a new task even if the number
136 * of allocated tasks is above tq->tq_maxalloc, but we still
137 * end up delaying the task allocation by one second, thereby
138 * throttling the task dispatch rate.
140 spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
141 schedule_timeout(HZ / 100);
142 spin_lock_irqsave_nested(&tq->tq_lock, *irqflags,
150 spin_unlock_irqrestore(&tq->tq_lock, *irqflags);
151 t = kmem_alloc(sizeof (taskq_ent_t), task_km_flags(flags));
152 spin_lock_irqsave_nested(&tq->tq_lock, *irqflags, tq->tq_lock_class);
163 * NOTE: Must be called with tq->tq_lock held, expects the taskq_ent_t
164 * to already be removed from the free, work, or pending taskq lists.
167 task_free(taskq_t *tq, taskq_ent_t *t)
171 ASSERT(list_empty(&t->tqent_list));
172 ASSERT(!timer_pending(&t->tqent_timer));
174 kmem_free(t, sizeof (taskq_ent_t));
179 * NOTE: Must be called with tq->tq_lock held, either destroys the
180 * taskq_ent_t if too many exist or moves it to the free list for later use.
183 task_done(taskq_t *tq, taskq_ent_t *t)
188 /* Wake tasks blocked in taskq_wait_id() */
189 wake_up_all(&t->tqent_waitq);
191 list_del_init(&t->tqent_list);
193 if (tq->tq_nalloc <= tq->tq_minalloc) {
194 t->tqent_id = TASKQID_INVALID;
195 t->tqent_func = NULL;
199 list_add_tail(&t->tqent_list, &tq->tq_free_list);
206 * When a delayed task timer expires remove it from the delay list and
207 * add it to the priority list in order for immediate processing.
210 task_expire_impl(taskq_ent_t *t)
213 taskq_t *tq = t->tqent_taskq;
214 struct list_head *l = NULL;
217 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
219 if (t->tqent_flags & TQENT_FLAG_CANCEL) {
220 ASSERT(list_empty(&t->tqent_list));
221 spin_unlock_irqrestore(&tq->tq_lock, flags);
225 t->tqent_birth = jiffies;
226 DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t);
229 * The priority list must be maintained in strict task id order
230 * from lowest to highest for lowest_id to be easily calculable.
232 list_del(&t->tqent_list);
233 list_for_each_prev(l, &tq->tq_prio_list) {
234 w = list_entry(l, taskq_ent_t, tqent_list);
235 if (w->tqent_id < t->tqent_id) {
236 list_add(&t->tqent_list, l);
240 if (l == &tq->tq_prio_list)
241 list_add(&t->tqent_list, &tq->tq_prio_list);
243 spin_unlock_irqrestore(&tq->tq_lock, flags);
245 wake_up(&tq->tq_work_waitq);
249 task_expire(spl_timer_list_t tl)
251 struct timer_list *tmr = (struct timer_list *)tl;
252 taskq_ent_t *t = from_timer(t, tmr, tqent_timer);
257 * Returns the lowest incomplete taskqid_t. The taskqid_t may
258 * be queued on the pending list, on the priority list, on the
259 * delay list, or on the work list currently being handled, but
260 * it is not 100% complete yet.
263 taskq_lowest_id(taskq_t *tq)
265 taskqid_t lowest_id = tq->tq_next_id;
271 if (!list_empty(&tq->tq_pend_list)) {
272 t = list_entry(tq->tq_pend_list.next, taskq_ent_t, tqent_list);
273 lowest_id = MIN(lowest_id, t->tqent_id);
276 if (!list_empty(&tq->tq_prio_list)) {
277 t = list_entry(tq->tq_prio_list.next, taskq_ent_t, tqent_list);
278 lowest_id = MIN(lowest_id, t->tqent_id);
281 if (!list_empty(&tq->tq_delay_list)) {
282 t = list_entry(tq->tq_delay_list.next, taskq_ent_t, tqent_list);
283 lowest_id = MIN(lowest_id, t->tqent_id);
286 if (!list_empty(&tq->tq_active_list)) {
287 tqt = list_entry(tq->tq_active_list.next, taskq_thread_t,
289 ASSERT(tqt->tqt_id != TASKQID_INVALID);
290 lowest_id = MIN(lowest_id, tqt->tqt_id);
297 * Insert a task into a list keeping the list sorted by increasing taskqid.
300 taskq_insert_in_order(taskq_t *tq, taskq_thread_t *tqt)
303 struct list_head *l = NULL;
308 list_for_each_prev(l, &tq->tq_active_list) {
309 w = list_entry(l, taskq_thread_t, tqt_active_list);
310 if (w->tqt_id < tqt->tqt_id) {
311 list_add(&tqt->tqt_active_list, l);
315 if (l == &tq->tq_active_list)
316 list_add(&tqt->tqt_active_list, &tq->tq_active_list);
320 * Find and return a task from the given list if it exists. The list
321 * must be in lowest to highest task id order.
324 taskq_find_list(taskq_t *tq, struct list_head *lh, taskqid_t id)
326 struct list_head *l = NULL;
329 list_for_each(l, lh) {
330 t = list_entry(l, taskq_ent_t, tqent_list);
332 if (t->tqent_id == id)
335 if (t->tqent_id > id)
343 * Find an already dispatched task given the task id regardless of what
344 * state it is in. If a task is still pending it will be returned.
345 * If a task is executing, then -EBUSY will be returned instead.
346 * If the task has already been run then NULL is returned.
349 taskq_find(taskq_t *tq, taskqid_t id)
352 struct list_head *l = NULL;
355 t = taskq_find_list(tq, &tq->tq_delay_list, id);
359 t = taskq_find_list(tq, &tq->tq_prio_list, id);
363 t = taskq_find_list(tq, &tq->tq_pend_list, id);
367 list_for_each(l, &tq->tq_active_list) {
368 tqt = list_entry(l, taskq_thread_t, tqt_active_list);
369 if (tqt->tqt_id == id) {
371 * Instead of returning tqt_task, we just return a non
372 * NULL value to prevent misuse, since tqt_task only
373 * has two valid fields.
375 return (ERR_PTR(-EBUSY));
383 * Theory for the taskq_wait_id(), taskq_wait_outstanding(), and
384 * taskq_wait() functions below.
386 * Taskq waiting is accomplished by tracking the lowest outstanding task
387 * id and the next available task id. As tasks are dispatched they are
388 * added to the tail of the pending, priority, or delay lists. As worker
389 * threads become available the tasks are removed from the heads of these
390 * lists and linked to the worker threads. This ensures the lists are
391 * kept sorted by lowest to highest task id.
393 * Therefore the lowest outstanding task id can be quickly determined by
394 * checking the head item from all of these lists. This value is stored
395 * with the taskq as the lowest id. It only needs to be recalculated when
396 * either the task with the current lowest id completes or is canceled.
398 * By blocking until the lowest task id exceeds the passed task id the
399 * taskq_wait_outstanding() function can be easily implemented. Similarly,
400 * by blocking until the lowest task id matches the next task id taskq_wait()
401 * can be implemented.
403 * Callers should be aware that when there are multiple worked threads it
404 * is possible for larger task ids to complete before smaller ones. Also
405 * when the taskq contains delay tasks with small task ids callers may
406 * block for a considerable length of time waiting for them to expire and
410 taskq_wait_id_check(taskq_t *tq, taskqid_t id)
415 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
416 rc = (taskq_find(tq, id) == NULL);
417 spin_unlock_irqrestore(&tq->tq_lock, flags);
423 * The taskq_wait_id() function blocks until the passed task id completes.
424 * This does not guarantee that all lower task ids have completed.
427 taskq_wait_id(taskq_t *tq, taskqid_t id)
429 wait_event(tq->tq_wait_waitq, taskq_wait_id_check(tq, id));
431 EXPORT_SYMBOL(taskq_wait_id);
434 taskq_wait_outstanding_check(taskq_t *tq, taskqid_t id)
439 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
440 rc = (id < tq->tq_lowest_id);
441 spin_unlock_irqrestore(&tq->tq_lock, flags);
447 * The taskq_wait_outstanding() function will block until all tasks with a
448 * lower taskqid than the passed 'id' have been completed. Note that all
449 * task id's are assigned monotonically at dispatch time. Zero may be
450 * passed for the id to indicate all tasks dispatch up to this point,
451 * but not after, should be waited for.
454 taskq_wait_outstanding(taskq_t *tq, taskqid_t id)
456 id = id ? id : tq->tq_next_id - 1;
457 wait_event(tq->tq_wait_waitq, taskq_wait_outstanding_check(tq, id));
459 EXPORT_SYMBOL(taskq_wait_outstanding);
462 taskq_wait_check(taskq_t *tq)
467 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
468 rc = (tq->tq_lowest_id == tq->tq_next_id);
469 spin_unlock_irqrestore(&tq->tq_lock, flags);
475 * The taskq_wait() function will block until the taskq is empty.
476 * This means that if a taskq re-dispatches work to itself taskq_wait()
477 * callers will block indefinitely.
480 taskq_wait(taskq_t *tq)
482 wait_event(tq->tq_wait_waitq, taskq_wait_check(tq));
484 EXPORT_SYMBOL(taskq_wait);
487 taskq_member(taskq_t *tq, kthread_t *t)
489 return (tq == (taskq_t *)tsd_get_by_thread(taskq_tsd, t));
491 EXPORT_SYMBOL(taskq_member);
494 taskq_of_curthread(void)
496 return (tsd_get(taskq_tsd));
498 EXPORT_SYMBOL(taskq_of_curthread);
501 * Cancel an already dispatched task given the task id. Still pending tasks
502 * will be immediately canceled, and if the task is active the function will
503 * block until it completes. Preallocated tasks which are canceled must be
504 * freed by the caller.
507 taskq_cancel_id(taskq_t *tq, taskqid_t id)
515 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
516 t = taskq_find(tq, id);
517 if (t && t != ERR_PTR(-EBUSY)) {
518 list_del_init(&t->tqent_list);
519 t->tqent_flags |= TQENT_FLAG_CANCEL;
522 * When canceling the lowest outstanding task id we
523 * must recalculate the new lowest outstanding id.
525 if (tq->tq_lowest_id == t->tqent_id) {
526 tq->tq_lowest_id = taskq_lowest_id(tq);
527 ASSERT3S(tq->tq_lowest_id, >, t->tqent_id);
531 * The task_expire() function takes the tq->tq_lock so drop
532 * drop the lock before synchronously cancelling the timer.
534 if (timer_pending(&t->tqent_timer)) {
535 spin_unlock_irqrestore(&tq->tq_lock, flags);
536 del_timer_sync(&t->tqent_timer);
537 spin_lock_irqsave_nested(&tq->tq_lock, flags,
541 if (!(t->tqent_flags & TQENT_FLAG_PREALLOC))
546 spin_unlock_irqrestore(&tq->tq_lock, flags);
548 if (t == ERR_PTR(-EBUSY)) {
549 taskq_wait_id(tq, id);
555 EXPORT_SYMBOL(taskq_cancel_id);
557 static int taskq_thread_spawn(taskq_t *tq);
560 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
563 taskqid_t rc = TASKQID_INVALID;
564 unsigned long irqflags;
569 spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);
571 /* Taskq being destroyed and all tasks drained */
572 if (!(tq->tq_flags & TASKQ_ACTIVE))
575 /* Do not queue the task unless there is idle thread for it */
576 ASSERT(tq->tq_nactive <= tq->tq_nthreads);
577 if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) {
578 /* Dynamic taskq may be able to spawn another thread */
579 if (!(tq->tq_flags & TASKQ_DYNAMIC) ||
580 taskq_thread_spawn(tq) == 0)
584 if ((t = task_alloc(tq, flags, &irqflags)) == NULL)
587 spin_lock(&t->tqent_lock);
589 /* Queue to the front of the list to enforce TQ_NOQUEUE semantics */
590 if (flags & TQ_NOQUEUE)
591 list_add(&t->tqent_list, &tq->tq_prio_list);
592 /* Queue to the priority list instead of the pending list */
593 else if (flags & TQ_FRONT)
594 list_add_tail(&t->tqent_list, &tq->tq_prio_list);
596 list_add_tail(&t->tqent_list, &tq->tq_pend_list);
598 t->tqent_id = rc = tq->tq_next_id;
600 t->tqent_func = func;
603 t->tqent_timer.function = NULL;
604 t->tqent_timer.expires = 0;
606 t->tqent_birth = jiffies;
607 DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t);
609 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
611 spin_unlock(&t->tqent_lock);
613 wake_up(&tq->tq_work_waitq);
615 /* Spawn additional taskq threads if required. */
616 if (!(flags & TQ_NOQUEUE) && tq->tq_nactive == tq->tq_nthreads)
617 (void) taskq_thread_spawn(tq);
619 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
622 EXPORT_SYMBOL(taskq_dispatch);
625 taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
626 uint_t flags, clock_t expire_time)
628 taskqid_t rc = TASKQID_INVALID;
630 unsigned long irqflags;
635 spin_lock_irqsave_nested(&tq->tq_lock, irqflags, tq->tq_lock_class);
637 /* Taskq being destroyed and all tasks drained */
638 if (!(tq->tq_flags & TASKQ_ACTIVE))
641 if ((t = task_alloc(tq, flags, &irqflags)) == NULL)
644 spin_lock(&t->tqent_lock);
646 /* Queue to the delay list for subsequent execution */
647 list_add_tail(&t->tqent_list, &tq->tq_delay_list);
649 t->tqent_id = rc = tq->tq_next_id;
651 t->tqent_func = func;
654 t->tqent_timer.function = task_expire;
655 t->tqent_timer.expires = (unsigned long)expire_time;
656 add_timer(&t->tqent_timer);
658 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
660 spin_unlock(&t->tqent_lock);
662 /* Spawn additional taskq threads if required. */
663 if (tq->tq_nactive == tq->tq_nthreads)
664 (void) taskq_thread_spawn(tq);
665 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
668 EXPORT_SYMBOL(taskq_dispatch_delay);
671 taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint_t flags,
674 unsigned long irqflags;
678 spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
681 /* Taskq being destroyed and all tasks drained */
682 if (!(tq->tq_flags & TASKQ_ACTIVE)) {
683 t->tqent_id = TASKQID_INVALID;
687 if ((flags & TQ_NOQUEUE) && (tq->tq_nactive == tq->tq_nthreads)) {
688 /* Dynamic taskq may be able to spawn another thread */
689 if (!(tq->tq_flags & TASKQ_DYNAMIC) ||
690 taskq_thread_spawn(tq) == 0)
695 spin_lock(&t->tqent_lock);
698 * Make sure the entry is not on some other taskq; it is important to
699 * ASSERT() under lock
701 ASSERT(taskq_empty_ent(t));
704 * Mark it as a prealloc'd task. This is important
705 * to ensure that we don't free it later.
707 t->tqent_flags |= TQENT_FLAG_PREALLOC;
709 /* Queue to the priority list instead of the pending list */
710 if (flags & TQ_FRONT)
711 list_add_tail(&t->tqent_list, &tq->tq_prio_list);
713 list_add_tail(&t->tqent_list, &tq->tq_pend_list);
715 t->tqent_id = tq->tq_next_id;
717 t->tqent_func = func;
721 t->tqent_birth = jiffies;
722 DTRACE_PROBE1(taskq_ent__birth, taskq_ent_t *, t);
724 spin_unlock(&t->tqent_lock);
726 wake_up(&tq->tq_work_waitq);
728 /* Spawn additional taskq threads if required. */
729 if (tq->tq_nactive == tq->tq_nthreads)
730 (void) taskq_thread_spawn(tq);
732 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
734 EXPORT_SYMBOL(taskq_dispatch_ent);
737 taskq_empty_ent(taskq_ent_t *t)
739 return (list_empty(&t->tqent_list));
741 EXPORT_SYMBOL(taskq_empty_ent);
744 taskq_init_ent(taskq_ent_t *t)
746 spin_lock_init(&t->tqent_lock);
747 init_waitqueue_head(&t->tqent_waitq);
748 timer_setup(&t->tqent_timer, NULL, 0);
749 INIT_LIST_HEAD(&t->tqent_list);
751 t->tqent_func = NULL;
754 t->tqent_taskq = NULL;
756 EXPORT_SYMBOL(taskq_init_ent);
759 * Return the next pending task, preference is given to tasks on the
760 * priority list which were dispatched with TQ_FRONT.
763 taskq_next_ent(taskq_t *tq)
765 struct list_head *list;
767 if (!list_empty(&tq->tq_prio_list))
768 list = &tq->tq_prio_list;
769 else if (!list_empty(&tq->tq_pend_list))
770 list = &tq->tq_pend_list;
774 return (list_entry(list->next, taskq_ent_t, tqent_list));
778 * Spawns a new thread for the specified taskq.
781 taskq_thread_spawn_task(void *arg)
783 taskq_t *tq = (taskq_t *)arg;
786 if (taskq_thread_create(tq) == NULL) {
787 /* restore spawning count if failed */
788 spin_lock_irqsave_nested(&tq->tq_lock, flags,
791 spin_unlock_irqrestore(&tq->tq_lock, flags);
796 * Spawn addition threads for dynamic taskqs (TASKQ_DYNAMIC) the current
797 * number of threads is insufficient to handle the pending tasks. These
798 * new threads must be created by the dedicated dynamic_taskq to avoid
799 * deadlocks between thread creation and memory reclaim. The system_taskq
800 * which is also a dynamic taskq cannot be safely used for this.
803 taskq_thread_spawn(taskq_t *tq)
807 if (!(tq->tq_flags & TASKQ_DYNAMIC))
810 if ((tq->tq_nthreads + tq->tq_nspawn < tq->tq_maxthreads) &&
811 (tq->tq_flags & TASKQ_ACTIVE)) {
812 spawning = (++tq->tq_nspawn);
813 taskq_dispatch(dynamic_taskq, taskq_thread_spawn_task,
821 * Threads in a dynamic taskq should only exit once it has been completely
822 * drained and no other threads are actively servicing tasks. This prevents
823 * threads from being created and destroyed more than is required.
825 * The first thread is the thread list is treated as the primary thread.
826 * There is nothing special about the primary thread but in order to avoid
827 * all the taskq pids from changing we opt to make it long running.
830 taskq_thread_should_stop(taskq_t *tq, taskq_thread_t *tqt)
832 if (!(tq->tq_flags & TASKQ_DYNAMIC))
835 if (list_first_entry(&(tq->tq_thread_list), taskq_thread_t,
836 tqt_thread_list) == tqt)
840 ((tq->tq_nspawn == 0) && /* No threads are being spawned */
841 (tq->tq_nactive == 0) && /* No threads are handling tasks */
842 (tq->tq_nthreads > 1) && /* More than 1 thread is running */
843 (!taskq_next_ent(tq)) && /* There are no pending tasks */
844 (spl_taskq_thread_dynamic)); /* Dynamic taskqs are allowed */
848 taskq_thread(void *args)
850 DECLARE_WAITQUEUE(wait, current);
852 taskq_thread_t *tqt = args;
857 taskq_ent_t dup_task = {};
862 current->flags |= PF_NOFREEZE;
864 (void) spl_fstrans_mark();
866 sigfillset(&blocked);
867 sigprocmask(SIG_BLOCK, &blocked, NULL);
868 flush_signals(current);
870 tsd_set(taskq_tsd, tq);
871 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
873 * If we are dynamically spawned, decrease spawning count. Note that
874 * we could be created during taskq_create, in which case we shouldn't
875 * do the decrement. But it's fine because taskq_create will reset
878 if (tq->tq_flags & TASKQ_DYNAMIC)
881 /* Immediately exit if more threads than allowed were created. */
882 if (tq->tq_nthreads >= tq->tq_maxthreads)
886 list_add_tail(&tqt->tqt_thread_list, &tq->tq_thread_list);
887 wake_up(&tq->tq_wait_waitq);
888 set_current_state(TASK_INTERRUPTIBLE);
890 while (!kthread_should_stop()) {
892 if (list_empty(&tq->tq_pend_list) &&
893 list_empty(&tq->tq_prio_list)) {
895 if (taskq_thread_should_stop(tq, tqt)) {
896 wake_up_all(&tq->tq_wait_waitq);
900 add_wait_queue_exclusive(&tq->tq_work_waitq, &wait);
901 spin_unlock_irqrestore(&tq->tq_lock, flags);
906 spin_lock_irqsave_nested(&tq->tq_lock, flags,
908 remove_wait_queue(&tq->tq_work_waitq, &wait);
910 __set_current_state(TASK_RUNNING);
913 if ((t = taskq_next_ent(tq)) != NULL) {
914 list_del_init(&t->tqent_list);
917 * A TQENT_FLAG_PREALLOC task may be reused or freed
918 * during the task function call. Store tqent_id and
921 * Also use an on stack taskq_ent_t for tqt_task
922 * assignment in this case; we want to make sure
923 * to duplicate all fields, so the values are
924 * correct when it's accessed via DTRACE_PROBE*.
926 tqt->tqt_id = t->tqent_id;
927 tqt->tqt_flags = t->tqent_flags;
929 if (t->tqent_flags & TQENT_FLAG_PREALLOC) {
935 taskq_insert_in_order(tq, tqt);
937 spin_unlock_irqrestore(&tq->tq_lock, flags);
939 DTRACE_PROBE1(taskq_ent__start, taskq_ent_t *, t);
941 /* Perform the requested task */
942 t->tqent_func(t->tqent_arg);
944 DTRACE_PROBE1(taskq_ent__finish, taskq_ent_t *, t);
946 spin_lock_irqsave_nested(&tq->tq_lock, flags,
949 list_del_init(&tqt->tqt_active_list);
950 tqt->tqt_task = NULL;
952 /* For prealloc'd tasks, we don't free anything. */
953 if (!(tqt->tqt_flags & TQENT_FLAG_PREALLOC))
957 * When the current lowest outstanding taskqid is
958 * done calculate the new lowest outstanding id
960 if (tq->tq_lowest_id == tqt->tqt_id) {
961 tq->tq_lowest_id = taskq_lowest_id(tq);
962 ASSERT3S(tq->tq_lowest_id, >, tqt->tqt_id);
965 /* Spawn additional taskq threads if required. */
966 if ((++seq_tasks) > spl_taskq_thread_sequential &&
967 taskq_thread_spawn(tq))
970 tqt->tqt_id = TASKQID_INVALID;
972 wake_up_all(&tq->tq_wait_waitq);
974 if (taskq_thread_should_stop(tq, tqt))
978 set_current_state(TASK_INTERRUPTIBLE);
982 __set_current_state(TASK_RUNNING);
984 list_del_init(&tqt->tqt_thread_list);
986 kmem_free(tqt, sizeof (taskq_thread_t));
987 spin_unlock_irqrestore(&tq->tq_lock, flags);
989 tsd_set(taskq_tsd, NULL);
994 static taskq_thread_t *
995 taskq_thread_create(taskq_t *tq)
997 static int last_used_cpu = 0;
1000 tqt = kmem_alloc(sizeof (*tqt), KM_PUSHPAGE);
1001 INIT_LIST_HEAD(&tqt->tqt_thread_list);
1002 INIT_LIST_HEAD(&tqt->tqt_active_list);
1004 tqt->tqt_id = TASKQID_INVALID;
1006 tqt->tqt_thread = spl_kthread_create(taskq_thread, tqt,
1008 if (tqt->tqt_thread == NULL) {
1009 kmem_free(tqt, sizeof (taskq_thread_t));
1013 if (spl_taskq_thread_bind) {
1014 last_used_cpu = (last_used_cpu + 1) % num_online_cpus();
1015 kthread_bind(tqt->tqt_thread, last_used_cpu);
1018 if (spl_taskq_thread_priority)
1019 set_user_nice(tqt->tqt_thread, PRIO_TO_NICE(tq->tq_pri));
1021 wake_up_process(tqt->tqt_thread);
1027 taskq_create(const char *name, int nthreads, pri_t pri,
1028 int minalloc, int maxalloc, uint_t flags)
1031 taskq_thread_t *tqt;
1032 int count = 0, rc = 0, i;
1033 unsigned long irqflags;
1035 ASSERT(name != NULL);
1036 ASSERT(minalloc >= 0);
1037 ASSERT(maxalloc <= INT_MAX);
1038 ASSERT(!(flags & (TASKQ_CPR_SAFE))); /* Unsupported */
1040 /* Scale the number of threads using nthreads as a percentage */
1041 if (flags & TASKQ_THREADS_CPU_PCT) {
1042 ASSERT(nthreads <= 100);
1043 ASSERT(nthreads >= 0);
1044 nthreads = MIN(nthreads, 100);
1045 nthreads = MAX(nthreads, 0);
1046 nthreads = MAX((num_online_cpus() * nthreads) / 100, 1);
1049 tq = kmem_alloc(sizeof (*tq), KM_PUSHPAGE);
1053 spin_lock_init(&tq->tq_lock);
1054 INIT_LIST_HEAD(&tq->tq_thread_list);
1055 INIT_LIST_HEAD(&tq->tq_active_list);
1056 tq->tq_name = kmem_strdup(name);
1058 tq->tq_nthreads = 0;
1060 tq->tq_maxthreads = nthreads;
1062 tq->tq_minalloc = minalloc;
1063 tq->tq_maxalloc = maxalloc;
1065 tq->tq_flags = (flags | TASKQ_ACTIVE);
1066 tq->tq_next_id = TASKQID_INITIAL;
1067 tq->tq_lowest_id = TASKQID_INITIAL;
1068 INIT_LIST_HEAD(&tq->tq_free_list);
1069 INIT_LIST_HEAD(&tq->tq_pend_list);
1070 INIT_LIST_HEAD(&tq->tq_prio_list);
1071 INIT_LIST_HEAD(&tq->tq_delay_list);
1072 init_waitqueue_head(&tq->tq_work_waitq);
1073 init_waitqueue_head(&tq->tq_wait_waitq);
1074 tq->tq_lock_class = TQ_LOCK_GENERAL;
1075 INIT_LIST_HEAD(&tq->tq_taskqs);
1077 if (flags & TASKQ_PREPOPULATE) {
1078 spin_lock_irqsave_nested(&tq->tq_lock, irqflags,
1081 for (i = 0; i < minalloc; i++)
1082 task_done(tq, task_alloc(tq, TQ_PUSHPAGE | TQ_NEW,
1085 spin_unlock_irqrestore(&tq->tq_lock, irqflags);
1088 if ((flags & TASKQ_DYNAMIC) && spl_taskq_thread_dynamic)
1091 for (i = 0; i < nthreads; i++) {
1092 tqt = taskq_thread_create(tq);
1099 /* Wait for all threads to be started before potential destroy */
1100 wait_event(tq->tq_wait_waitq, tq->tq_nthreads == count);
1102 * taskq_thread might have touched nspawn, but we don't want them to
1103 * because they're not dynamically spawned. So we reset it to 0
1111 down_write(&tq_list_sem);
1112 tq->tq_instance = taskq_find_by_name(name) + 1;
1113 list_add_tail(&tq->tq_taskqs, &tq_list);
1114 up_write(&tq_list_sem);
1119 EXPORT_SYMBOL(taskq_create);
1122 taskq_destroy(taskq_t *tq)
1124 struct task_struct *thread;
1125 taskq_thread_t *tqt;
1127 unsigned long flags;
1130 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
1131 tq->tq_flags &= ~TASKQ_ACTIVE;
1132 spin_unlock_irqrestore(&tq->tq_lock, flags);
1135 * When TASKQ_ACTIVE is clear new tasks may not be added nor may
1136 * new worker threads be spawned for dynamic taskq.
1138 if (dynamic_taskq != NULL)
1139 taskq_wait_outstanding(dynamic_taskq, 0);
1143 /* remove taskq from global list used by the kstats */
1144 down_write(&tq_list_sem);
1145 list_del(&tq->tq_taskqs);
1146 up_write(&tq_list_sem);
1148 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
1149 /* wait for spawning threads to insert themselves to the list */
1150 while (tq->tq_nspawn) {
1151 spin_unlock_irqrestore(&tq->tq_lock, flags);
1152 schedule_timeout_interruptible(1);
1153 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1158 * Signal each thread to exit and block until it does. Each thread
1159 * is responsible for removing itself from the list and freeing its
1160 * taskq_thread_t. This allows for idle threads to opt to remove
1161 * themselves from the taskq. They can be recreated as needed.
1163 while (!list_empty(&tq->tq_thread_list)) {
1164 tqt = list_entry(tq->tq_thread_list.next,
1165 taskq_thread_t, tqt_thread_list);
1166 thread = tqt->tqt_thread;
1167 spin_unlock_irqrestore(&tq->tq_lock, flags);
1169 kthread_stop(thread);
1171 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1175 while (!list_empty(&tq->tq_free_list)) {
1176 t = list_entry(tq->tq_free_list.next, taskq_ent_t, tqent_list);
1178 ASSERT(!(t->tqent_flags & TQENT_FLAG_PREALLOC));
1180 list_del_init(&t->tqent_list);
1184 ASSERT0(tq->tq_nthreads);
1185 ASSERT0(tq->tq_nalloc);
1186 ASSERT0(tq->tq_nspawn);
1187 ASSERT(list_empty(&tq->tq_thread_list));
1188 ASSERT(list_empty(&tq->tq_active_list));
1189 ASSERT(list_empty(&tq->tq_free_list));
1190 ASSERT(list_empty(&tq->tq_pend_list));
1191 ASSERT(list_empty(&tq->tq_prio_list));
1192 ASSERT(list_empty(&tq->tq_delay_list));
1194 spin_unlock_irqrestore(&tq->tq_lock, flags);
1196 kmem_strfree(tq->tq_name);
1197 kmem_free(tq, sizeof (taskq_t));
1199 EXPORT_SYMBOL(taskq_destroy);
1202 static unsigned int spl_taskq_kick = 0;
1206 * module_param_cb is introduced to take kernel_param_ops and
1207 * module_param_call is marked as obsolete. Also set and get operations
1208 * were changed to take a 'const struct kernel_param *'.
1211 #ifdef module_param_cb
1212 param_set_taskq_kick(const char *val, const struct kernel_param *kp)
1214 param_set_taskq_kick(const char *val, struct kernel_param *kp)
1220 unsigned long flags;
1222 ret = param_set_uint(val, kp);
1223 if (ret < 0 || !spl_taskq_kick)
1228 down_read(&tq_list_sem);
1229 list_for_each_entry(tq, &tq_list, tq_taskqs) {
1230 spin_lock_irqsave_nested(&tq->tq_lock, flags,
1232 /* Check if the first pending is older than 5 seconds */
1233 t = taskq_next_ent(tq);
1234 if (t && time_after(jiffies, t->tqent_birth + 5*HZ)) {
1235 (void) taskq_thread_spawn(tq);
1236 printk(KERN_INFO "spl: Kicked taskq %s/%d\n",
1237 tq->tq_name, tq->tq_instance);
1239 spin_unlock_irqrestore(&tq->tq_lock, flags);
1241 up_read(&tq_list_sem);
1245 #ifdef module_param_cb
1246 static const struct kernel_param_ops param_ops_taskq_kick = {
1247 .set = param_set_taskq_kick,
1248 .get = param_get_uint,
1250 module_param_cb(spl_taskq_kick, ¶m_ops_taskq_kick, &spl_taskq_kick, 0644);
1252 module_param_call(spl_taskq_kick, param_set_taskq_kick, param_get_uint,
1253 &spl_taskq_kick, 0644);
1255 MODULE_PARM_DESC(spl_taskq_kick,
1256 "Write nonzero to kick stuck taskqs to spawn more threads");
1259 spl_taskq_init(void)
1261 init_rwsem(&tq_list_sem);
1262 tsd_create(&taskq_tsd, NULL);
1264 system_taskq = taskq_create("spl_system_taskq", MAX(boot_ncpus, 64),
1265 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
1266 if (system_taskq == NULL)
1269 system_delay_taskq = taskq_create("spl_delay_taskq", MAX(boot_ncpus, 4),
1270 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE|TASKQ_DYNAMIC);
1271 if (system_delay_taskq == NULL) {
1272 taskq_destroy(system_taskq);
1276 dynamic_taskq = taskq_create("spl_dynamic_taskq", 1,
1277 maxclsyspri, boot_ncpus, INT_MAX, TASKQ_PREPOPULATE);
1278 if (dynamic_taskq == NULL) {
1279 taskq_destroy(system_taskq);
1280 taskq_destroy(system_delay_taskq);
1285 * This is used to annotate tq_lock, so
1286 * taskq_dispatch -> taskq_thread_spawn -> taskq_dispatch
1287 * does not trigger a lockdep warning re: possible recursive locking
1289 dynamic_taskq->tq_lock_class = TQ_LOCK_DYNAMIC;
1295 spl_taskq_fini(void)
1297 taskq_destroy(dynamic_taskq);
1298 dynamic_taskq = NULL;
1300 taskq_destroy(system_delay_taskq);
1301 system_delay_taskq = NULL;
1303 taskq_destroy(system_taskq);
1304 system_taskq = NULL;
1306 tsd_destroy(&taskq_tsd);