4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
30 * Kernel task queues: general-purpose asynchronous task scheduling.
32 * A common problem in kernel programming is the need to schedule tasks
33 * to be performed later, by another thread. There are several reasons
34 * you may want or need to do this:
36 * (1) The task isn't time-critical, but your current code path is.
38 * (2) The task may require grabbing locks that you already hold.
40 * (3) The task may need to block (e.g. to wait for memory), but you
41 * cannot block in your current context.
43 * (4) Your code path can't complete because of some condition, but you can't
44 * sleep or fail, so you queue the task for later execution when condition
47 * (5) You just want a simple way to launch multiple tasks in parallel.
49 * Task queues provide such a facility. In its simplest form (used when
50 * performance is not a critical consideration) a task queue consists of a
51 * single list of tasks, together with one or more threads to service the
52 * list. There are some cases when this simple queue is not sufficient:
54 * (1) The task queues are very hot and there is a need to avoid data and lock
55 * contention over global resources.
57 * (2) Some tasks may depend on other tasks to complete, so they can't be put in
58 * the same list managed by the same thread.
60 * (3) Some tasks may block for a long time, and this should not block other
63 * To provide useful service in such cases we define a "dynamic task queue"
64 * which has an individual thread for each of the tasks. These threads are
65 * dynamically created as they are needed and destroyed when they are not in
66 * use. The API for managing task pools is the same as for managing task queues
67 * with the exception of a taskq creation flag TASKQ_DYNAMIC which tells that
68 * dynamic task pool behavior is desired.
70 * Dynamic task queues may also place tasks in the normal queue (called "backing
71 * queue") when task pool runs out of resources. Users of task queues may
72 * disallow such queued scheduling by specifying TQ_NOQUEUE in the dispatch
75 * The backing task queue is also used for scheduling internal tasks needed for
76 * dynamic task queue maintenance.
80 * taskq_t *taskq_create(name, nthreads, pri_t pri, minalloc, maxall, flags);
82 * Create a taskq with specified properties.
85 * TASKQ_DYNAMIC: Create task pool for task management. If this flag is
86 * specified, 'nthreads' specifies the maximum number of threads in
87 * the task queue. Task execution order for dynamic task queues is
90 * If this flag is not specified (default case) a
91 * single-list task queue is created with 'nthreads' threads
92 * servicing it. Entries in this queue are managed by
93 * taskq_ent_alloc() and taskq_ent_free() which try to keep the
94 * task population between 'minalloc' and 'maxalloc', but the
95 * latter limit is only advisory for TQ_SLEEP dispatches and the
96 * former limit is only advisory for TQ_NOALLOC dispatches. If
97 * TASKQ_PREPOPULATE is set in 'flags', the taskq will be
98 * prepopulated with 'minalloc' task structures.
100 * Since non-DYNAMIC taskqs are queues, tasks are guaranteed to be
101 * executed in the order they are scheduled if nthreads == 1.
102 * If nthreads > 1, task execution order is not predictable.
104 * TASKQ_PREPOPULATE: Prepopulate task queue with threads.
105 * Also prepopulate the task queue with 'minalloc' task structures.
107 * TASKQ_CPR_SAFE: This flag specifies that users of the task queue will
108 * use their own protocol for handling CPR issues. This flag is not
109 * supported for DYNAMIC task queues.
111 * The 'pri' field specifies the default priority for the threads that
112 * service all scheduled tasks.
114 * void taskq_destroy(tap):
116 * Waits for any scheduled tasks to complete, then destroys the taskq.
117 * Caller should guarantee that no new tasks are scheduled in the closing
120 * taskqid_t taskq_dispatch(tq, func, arg, flags):
122 * Dispatches the task "func(arg)" to taskq. The 'flags' indicates whether
123 * the caller is willing to block for memory. The function returns an
124 * opaque value which is zero iff dispatch fails. If flags is TQ_NOSLEEP
125 * or TQ_NOALLOC and the task can't be dispatched, taskq_dispatch() fails
126 * and returns (taskqid_t)0.
128 * ASSUMES: func != NULL.
131 * TQ_NOSLEEP: Do not wait for resources; may fail.
133 * TQ_NOALLOC: Do not allocate memory; may fail. May only be used with
134 * non-dynamic task queues.
136 * TQ_NOQUEUE: Do not enqueue a task if it can't dispatch it due to
137 * lack of available resources and fail. If this flag is not
138 * set, and the task pool is exhausted, the task may be scheduled
139 * in the backing queue. This flag may ONLY be used with dynamic
142 * NOTE: This flag should always be used when a task queue is used
143 * for tasks that may depend on each other for completion.
144 * Enqueueing dependent tasks may create deadlocks.
146 * TQ_SLEEP: May block waiting for resources. May still fail for
147 * dynamic task queues if TQ_NOQUEUE is also specified, otherwise
150 * NOTE: Dynamic task queues are much more likely to fail in
151 * taskq_dispatch() (especially if TQ_NOQUEUE was specified), so it
152 * is important to have backup strategies handling such failures.
154 * void taskq_wait(tq):
156 * Waits for all previously scheduled tasks to complete.
158 * NOTE: It does not stop any new task dispatches.
159 * Do NOT call taskq_wait() from a task: it will cause deadlock.
161 * void taskq_suspend(tq)
163 * Suspend all task execution. Tasks already scheduled for a dynamic task
164 * queue will still be executed, but all new scheduled tasks will be
165 * suspended until taskq_resume() is called.
167 * int taskq_suspended(tq)
169 * Returns 1 if taskq is suspended and 0 otherwise. It is intended to
170 * ASSERT that the task queue is suspended.
172 * void taskq_resume(tq)
174 * Resume task queue execution.
176 * int taskq_member(tq, thread)
178 * Returns 1 if 'thread' belongs to taskq 'tq' and 0 otherwise. The
179 * intended use is to ASSERT that a given function is called in taskq
184 * Global system-wide dynamic task queue for common uses. It may be used by
185 * any subsystem that needs to schedule tasks and does not need to manage
186 * its own task queues. It is initialized quite early during system boot.
190 * This is schematic representation of the task queue structures.
194 * |tq_lock | +---< taskq_ent_free()
196 * |... | | tqent: tqent:
197 * +-------------+ | +------------+ +------------+
198 * | tq_freelist |-->| tqent_next |--> ... ->| tqent_next |
199 * +-------------+ +------------+ +------------+
200 * |... | | ... | | ... |
201 * +-------------+ +------------+ +------------+
203 * | | +-------------->taskq_ent_alloc()
204 * +--------------------------------------------------------------------------+
205 * | | | tqent tqent |
206 * | +---------------------+ +--> +------------+ +--> +------------+ |
207 * | | ... | | | func, arg | | | func, arg | |
208 * +>+---------------------+ <---|-+ +------------+ <---|-+ +------------+ |
209 * | tq_taskq.tqent_next | ----+ | | tqent_next | --->+ | | tqent_next |--+
210 * +---------------------+ | +------------+ ^ | +------------+
211 * +-| tq_task.tqent_prev | +--| tqent_prev | | +--| tqent_prev | ^
212 * | +---------------------+ +------------+ | +------------+ |
213 * | |... | | ... | | | ... | |
214 * | +---------------------+ +------------+ | +------------+ |
217 * +--------------------------------------+--------------+ TQ_APPEND() -+
219 * |... | taskq_thread()-----+
221 * | tq_buckets |--+-------> [ NULL ] (for regular task queues)
223 * | DYNAMIC TASK QUEUES:
225 * +-> taskq_bucket[nCPU] taskq_bucket_dispatch()
226 * +-------------------+ ^
227 * +--->| tqbucket_lock | |
228 * | +-------------------+ +--------+ +--------+
229 * | | tqbucket_freelist |-->| tqent |-->...| tqent | ^
230 * | +-------------------+<--+--------+<--...+--------+ |
231 * | | ... | | thread | | thread | |
232 * | +-------------------+ +--------+ +--------+ |
233 * | +-------------------+ |
234 * taskq_dispatch()--+--->| tqbucket_lock | TQ_APPEND()------+
235 * TQ_HASH() | +-------------------+ +--------+ +--------+
236 * | | tqbucket_freelist |-->| tqent |-->...| tqent |
237 * | +-------------------+<--+--------+<--...+--------+
238 * | | ... | | thread | | thread |
239 * | +-------------------+ +--------+ +--------+
243 * Task queues use tq_task field to link new entry in the queue. The queue is a
244 * circular doubly-linked list. Entries are put in the end of the list with
245 * TQ_APPEND() and processed from the front of the list by taskq_thread() in
246 * FIFO order. Task queue entries are cached in the free list managed by
247 * taskq_ent_alloc() and taskq_ent_free() functions.
249 * All threads used by task queues mark t_taskq field of the thread to
250 * point to the task queue.
252 * Dynamic Task Queues Implementation.
254 * For a dynamic task queues there is a 1-to-1 mapping between a thread and
255 * taskq_ent_structure. Each entry is serviced by its own thread and each thread
256 * is controlled by a single entry.
258 * Entries are distributed over a set of buckets. To avoid using modulo
259 * arithmetics the number of buckets is 2^n and is determined as the nearest
260 * power of two roundown of the number of CPUs in the system. Tunable
261 * variable 'taskq_maxbuckets' limits the maximum number of buckets. Each entry
262 * is attached to a bucket for its lifetime and can't migrate to other buckets.
264 * Entries that have scheduled tasks are not placed in any list. The dispatch
265 * function sets their "func" and "arg" fields and signals the corresponding
266 * thread to execute the task. Once the thread executes the task it clears the
267 * "func" field and places an entry on the bucket cache of free entries pointed
268 * by "tqbucket_freelist" field. ALL entries on the free list should have "func"
269 * field equal to NULL. The free list is a circular doubly-linked list identical
270 * in structure to the tq_task list above, but entries are taken from it in LIFO
271 * order - the last freed entry is the first to be allocated. The
272 * taskq_bucket_dispatch() function gets the most recently used entry from the
273 * free list, sets its "func" and "arg" fields and signals a worker thread.
275 * After executing each task a per-entry thread taskq_d_thread() places its
276 * entry on the bucket free list and goes to a timed sleep. If it wakes up
277 * without getting new task it removes the entry from the free list and destroys
278 * itself. The thread sleep time is controlled by a tunable variable
279 * `taskq_thread_timeout'.
281 * There is various statistics kept in the bucket which allows for later
282 * analysis of taskq usage patterns. Also, a global copy of taskq creation and
283 * death statistics is kept in the global taskq data structure. Since thread
284 * creation and death happen rarely, updating such global data does not present
285 * a performance problem.
287 * NOTE: Threads are not bound to any CPU and there is absolutely no association
288 * between the bucket and actual thread CPU, so buckets are used only to
289 * split resources and reduce resource contention. Having threads attached
290 * to the CPU denoted by a bucket may reduce number of times the job
291 * switches between CPUs.
293 * Current algorithm creates a thread whenever a bucket has no free
294 * entries. It would be nice to know how many threads are in the running
295 * state and don't create threads if all CPUs are busy with existing
296 * tasks, but it is unclear how such strategy can be implemented.
298 * Currently buckets are created statically as an array attached to task
299 * queue. On some system with nCPUs < max_ncpus it may waste system
300 * memory. One solution may be allocation of buckets when they are first
301 * touched, but it is not clear how useful it is.
303 * SUSPEND/RESUME implementation.
305 * Before executing a task taskq_thread() (executing non-dynamic task
306 * queues) obtains taskq's thread lock as a reader. The taskq_suspend()
307 * function gets the same lock as a writer blocking all non-dynamic task
308 * execution. The taskq_resume() function releases the lock allowing
309 * taskq_thread to continue execution.
311 * For dynamic task queues, each bucket is marked as TQBUCKET_SUSPEND by
312 * taskq_suspend() function. After that taskq_bucket_dispatch() always
313 * fails, so that taskq_dispatch() will either enqueue tasks for a
314 * suspended backing queue or fail if TQ_NOQUEUE is specified in dispatch
317 * NOTE: taskq_suspend() does not immediately block any tasks already
318 * scheduled for dynamic task queues. It only suspends new tasks
319 * scheduled after taskq_suspend() was called.
321 * taskq_member() function works by comparing a thread t_taskq pointer with
322 * the passed thread pointer.
324 * LOCKS and LOCK Hierarchy:
326 * There are two locks used in task queues.
328 * 1) Task queue structure has a lock, protecting global task queue state.
330 * 2) Each per-CPU bucket has a lock for bucket management.
332 * If both locks are needed, task queue lock should be taken only after bucket
337 * For DEBUG kernels it is possible to induce random failures to
338 * taskq_dispatch() function when it is given TQ_NOSLEEP argument. The value of
339 * taskq_dmtbf and taskq_smtbf tunables control the mean time between induced
340 * failures for dynamic and static task queues respectively.
342 * Setting TASKQ_STATISTIC to 0 will disable per-bucket statistics.
346 * system_taskq_size - Size of the global system_taskq.
347 * This value is multiplied by nCPUs to determine
351 * taskq_thread_timeout - Maximum idle time for taskq_d_thread()
352 * Default value: 5 minutes
354 * taskq_maxbuckets - Maximum number of buckets in any task queue
357 * taskq_search_depth - Maximum # of buckets searched for a free entry
360 * taskq_dmtbf - Mean time between induced dispatch failures
361 * for dynamic task queues.
362 * Default value: UINT_MAX (no induced failures)
364 * taskq_smtbf - Mean time between induced dispatch failures
365 * for static task queues.
366 * Default value: UINT_MAX (no induced failures)
368 * CONDITIONAL compilation.
370 * TASKQ_STATISTIC - If set will enable bucket statistic (default).
374 #include <sys/taskq_impl.h>
375 #include <sys/proc.h>
376 #include <sys/kmem.h>
377 #include <sys/callb.h>
378 #include <sys/systm.h>
379 #include <sys/cmn_err.h>
380 #include <sys/debug.h>
381 #include <sys/sysmacros.h>
383 #include <sys/mutex.h>
384 #include <sys/kernel.h>
385 #include <sys/limits.h>
387 static kmem_cache_t *taskq_ent_cache, *taskq_cache;
389 /* Global system task queue for common use */
390 taskq_t *system_taskq;
393 * Maxmimum number of entries in global system taskq is
394 * system_taskq_size * max_ncpus
396 #define SYSTEM_TASKQ_SIZE 1
397 int system_taskq_size = SYSTEM_TASKQ_SIZE;
400 * Dynamic task queue threads that don't get any work within
401 * taskq_thread_timeout destroy themselves
403 #define TASKQ_THREAD_TIMEOUT (60 * 5)
404 int taskq_thread_timeout = TASKQ_THREAD_TIMEOUT;
406 #define TASKQ_MAXBUCKETS 128
407 int taskq_maxbuckets = TASKQ_MAXBUCKETS;
410 * When a bucket has no available entries another buckets are tried.
411 * taskq_search_depth parameter limits the amount of buckets that we search
412 * before failing. This is mostly useful in systems with many CPUs where we may
413 * spend too much time scanning busy buckets.
415 #define TASKQ_SEARCH_DEPTH 4
416 int taskq_search_depth = TASKQ_SEARCH_DEPTH;
419 * Hashing function: mix various bits of x. May be pretty much anything.
421 #define TQ_HASH(x) ((x) ^ ((x) >> 11) ^ ((x) >> 17) ^ ((x) ^ 27))
424 * We do not create any new threads when the system is low on memory and start
425 * throttling memory allocations. The following macro tries to estimate such
428 #define ENOUGH_MEMORY() (freemem > throttlefree)
433 static taskq_t *taskq_create_common(const char *, int, int, pri_t, int,
435 static void taskq_thread(void *);
436 static int taskq_constructor(void *, void *, int);
437 static void taskq_destructor(void *, void *);
438 static int taskq_ent_constructor(void *, void *, int);
439 static void taskq_ent_destructor(void *, void *);
440 static taskq_ent_t *taskq_ent_alloc(taskq_t *, int);
441 static void taskq_ent_free(taskq_t *, taskq_ent_t *);
444 * Collect per-bucket statistic when TASKQ_STATISTIC is defined.
446 #define TASKQ_STATISTIC 1
449 #define TQ_STAT(b, x) b->tqbucket_stat.x++
451 #define TQ_STAT(b, x)
455 * Random fault injection.
458 uint_t taskq_dmtbf = UINT_MAX; /* mean time between injected failures */
459 uint_t taskq_smtbf = UINT_MAX; /* mean time between injected failures */
462 * TQ_NOSLEEP dispatches on dynamic task queues are always allowed to fail.
464 * TQ_NOSLEEP dispatches on static task queues can't arbitrarily fail because
465 * they could prepopulate the cache and make sure that they do not use more
466 * then minalloc entries. So, fault injection in this case insures that
467 * either TASKQ_PREPOPULATE is not set or there are more entries allocated
468 * than is specified by minalloc. TQ_NOALLOC dispatches are always allowed
469 * to fail, but for simplicity we treat them identically to TQ_NOSLEEP
473 #define TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flag) \
474 taskq_random = (taskq_random * 2416 + 374441) % 1771875;\
475 if ((flag & TQ_NOSLEEP) && \
476 taskq_random < 1771875 / taskq_dmtbf) { \
480 #define TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flag) \
481 taskq_random = (taskq_random * 2416 + 374441) % 1771875;\
482 if ((flag & (TQ_NOSLEEP | TQ_NOALLOC)) && \
483 (!(tq->tq_flags & TASKQ_PREPOPULATE) || \
484 (tq->tq_nalloc > tq->tq_minalloc)) && \
485 (taskq_random < (1771875 / taskq_smtbf))) { \
486 mutex_exit(&tq->tq_lock); \
487 return ((taskqid_t)0); \
490 #define TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flag)
491 #define TASKQ_D_RANDOM_DISPATCH_FAILURE(tq, flag)
494 #define IS_EMPTY(l) (((l).tqent_prev == (l).tqent_next) && \
495 ((l).tqent_prev == &(l)))
498 * Append `tqe' in the end of the doubly-linked list denoted by l.
500 #define TQ_APPEND(l, tqe) { \
501 tqe->tqent_next = &l; \
502 tqe->tqent_prev = l.tqent_prev; \
503 tqe->tqent_next->tqent_prev = tqe; \
504 tqe->tqent_prev->tqent_next = tqe; \
508 * Schedule a task specified by func and arg into the task queue entry tqe.
510 #define TQ_ENQUEUE(tq, tqe, func, arg) { \
511 ASSERT(MUTEX_HELD(&tq->tq_lock)); \
512 TQ_APPEND(tq->tq_task, tqe); \
513 tqe->tqent_func = (func); \
514 tqe->tqent_arg = (arg); \
516 if (tq->tq_tasks - tq->tq_executed > tq->tq_maxtasks) \
517 tq->tq_maxtasks = tq->tq_tasks - tq->tq_executed; \
518 cv_signal(&tq->tq_dispatch_cv); \
519 DTRACE_PROBE2(taskq__enqueue, taskq_t *, tq, taskq_ent_t *, tqe); \
523 * Do-nothing task which may be used to prepopulate thread caches.
527 nulltask(void *unused)
534 taskq_constructor(void *buf, void *cdrarg, int kmflags)
538 bzero(tq, sizeof (taskq_t));
540 mutex_init(&tq->tq_lock, NULL, MUTEX_DEFAULT, NULL);
541 rw_init(&tq->tq_threadlock, NULL, RW_DEFAULT, NULL);
542 cv_init(&tq->tq_dispatch_cv, NULL, CV_DEFAULT, NULL);
543 cv_init(&tq->tq_wait_cv, NULL, CV_DEFAULT, NULL);
545 tq->tq_task.tqent_next = &tq->tq_task;
546 tq->tq_task.tqent_prev = &tq->tq_task;
553 taskq_destructor(void *buf, void *cdrarg)
557 mutex_destroy(&tq->tq_lock);
558 rw_destroy(&tq->tq_threadlock);
559 cv_destroy(&tq->tq_dispatch_cv);
560 cv_destroy(&tq->tq_wait_cv);
565 taskq_ent_constructor(void *buf, void *cdrarg, int kmflags)
567 taskq_ent_t *tqe = buf;
569 tqe->tqent_thread = NULL;
570 cv_init(&tqe->tqent_cv, NULL, CV_DEFAULT, NULL);
577 taskq_ent_destructor(void *buf, void *cdrarg)
579 taskq_ent_t *tqe = buf;
581 ASSERT(tqe->tqent_thread == NULL);
582 cv_destroy(&tqe->tqent_cv);
586 * Create global system dynamic task queue.
589 system_taskq_init(void)
591 system_taskq = taskq_create_common("system_taskq", 0,
592 system_taskq_size * max_ncpus, minclsyspri, 4, 512,
597 system_taskq_fini(void)
599 taskq_destroy(system_taskq);
603 taskq_init(void *dummy __unused)
605 taskq_ent_cache = kmem_cache_create("taskq_ent_cache",
606 sizeof (taskq_ent_t), 0, taskq_ent_constructor,
607 taskq_ent_destructor, NULL, NULL, NULL, 0);
608 taskq_cache = kmem_cache_create("taskq_cache", sizeof (taskq_t),
609 0, taskq_constructor, taskq_destructor, NULL, NULL, NULL, 0);
614 taskq_fini(void *dummy __unused)
617 kmem_cache_destroy(taskq_cache);
618 kmem_cache_destroy(taskq_ent_cache);
624 * Allocates a new taskq_ent_t structure either from the free list or from the
625 * cache. Returns NULL if it can't be allocated.
627 * Assumes: tq->tq_lock is held.
630 taskq_ent_alloc(taskq_t *tq, int flags)
632 int kmflags = (flags & TQ_NOSLEEP) ? KM_NOSLEEP : KM_SLEEP;
636 ASSERT(MUTEX_HELD(&tq->tq_lock));
639 * TQ_NOALLOC allocations are allowed to use the freelist, even if
640 * we are below tq_minalloc.
642 if ((tqe = tq->tq_freelist) != NULL &&
643 ((flags & TQ_NOALLOC) || tq->tq_nalloc >= tq->tq_minalloc)) {
644 tq->tq_freelist = tqe->tqent_next;
646 if (flags & TQ_NOALLOC)
649 mutex_exit(&tq->tq_lock);
650 if (tq->tq_nalloc >= tq->tq_maxalloc) {
651 if (kmflags & KM_NOSLEEP) {
652 mutex_enter(&tq->tq_lock);
656 * We don't want to exceed tq_maxalloc, but we can't
657 * wait for other tasks to complete (and thus free up
658 * task structures) without risking deadlock with
659 * the caller. So, we just delay for one second
660 * to throttle the allocation rate.
664 tqe = kmem_cache_alloc(taskq_ent_cache, kmflags);
665 mutex_enter(&tq->tq_lock);
675 * Free taskq_ent_t structure by either putting it on the free list or freeing
678 * Assumes: tq->tq_lock is held.
681 taskq_ent_free(taskq_t *tq, taskq_ent_t *tqe)
683 ASSERT(MUTEX_HELD(&tq->tq_lock));
685 if (tq->tq_nalloc <= tq->tq_minalloc) {
686 tqe->tqent_next = tq->tq_freelist;
687 tq->tq_freelist = tqe;
690 mutex_exit(&tq->tq_lock);
691 kmem_cache_free(taskq_ent_cache, tqe);
692 mutex_enter(&tq->tq_lock);
699 * Assumes: func != NULL
701 * Returns: NULL if dispatch failed.
702 * non-NULL if task dispatched successfully.
703 * Actual return value is the pointer to taskq entry that was used to
704 * dispatch a task. This is useful for debugging.
708 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
710 taskq_ent_t *tqe = NULL;
713 ASSERT(func != NULL);
714 ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC));
717 * TQ_NOQUEUE flag can't be used with non-dynamic task queues.
719 ASSERT(! (flags & TQ_NOQUEUE));
722 * Enqueue the task to the underlying queue.
724 mutex_enter(&tq->tq_lock);
726 TASKQ_S_RANDOM_DISPATCH_FAILURE(tq, flags);
728 if ((tqe = taskq_ent_alloc(tq, flags)) == NULL) {
729 mutex_exit(&tq->tq_lock);
730 return ((taskqid_t)NULL);
732 TQ_ENQUEUE(tq, tqe, func, arg);
733 mutex_exit(&tq->tq_lock);
734 return ((taskqid_t)tqe);
738 * Wait for all pending tasks to complete.
739 * Calling taskq_wait from a task will cause deadlock.
742 taskq_wait(taskq_t *tq)
745 mutex_enter(&tq->tq_lock);
746 while (tq->tq_task.tqent_next != &tq->tq_task || tq->tq_active != 0)
747 cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
748 mutex_exit(&tq->tq_lock);
752 * Suspend execution of tasks.
754 * Tasks in the queue part will be suspended immediately upon return from this
755 * function. Pending tasks in the dynamic part will continue to execute, but all
756 * new tasks will be suspended.
759 taskq_suspend(taskq_t *tq)
761 rw_enter(&tq->tq_threadlock, RW_WRITER);
764 * Mark task queue as being suspended. Needed for taskq_suspended().
766 mutex_enter(&tq->tq_lock);
767 ASSERT(!(tq->tq_flags & TASKQ_SUSPENDED));
768 tq->tq_flags |= TASKQ_SUSPENDED;
769 mutex_exit(&tq->tq_lock);
773 * returns: 1 if tq is suspended, 0 otherwise.
776 taskq_suspended(taskq_t *tq)
778 return ((tq->tq_flags & TASKQ_SUSPENDED) != 0);
782 * Resume taskq execution.
785 taskq_resume(taskq_t *tq)
787 ASSERT(RW_WRITE_HELD(&tq->tq_threadlock));
789 mutex_enter(&tq->tq_lock);
790 ASSERT(tq->tq_flags & TASKQ_SUSPENDED);
791 tq->tq_flags &= ~TASKQ_SUSPENDED;
792 mutex_exit(&tq->tq_lock);
794 rw_exit(&tq->tq_threadlock);
799 taskq_member(taskq_t *tq, kthread_t *thread)
801 if (tq->tq_nthreads == 1)
802 return (tq->tq_thread == thread);
806 mutex_enter(&tq->tq_lock);
807 for (i = 0; i < tq->tq_nthreads; i++) {
808 if (tq->tq_threadlist[i] == thread) {
813 mutex_exit(&tq->tq_lock);
819 * Worker thread for processing task queue.
822 taskq_thread(void *arg)
829 CALLB_CPR_INIT(&cprinfo, &tq->tq_lock, callb_generic_cpr, tq->tq_name);
831 mutex_enter(&tq->tq_lock);
832 while (tq->tq_flags & TASKQ_ACTIVE) {
833 if ((tqe = tq->tq_task.tqent_next) == &tq->tq_task) {
834 if (--tq->tq_active == 0)
835 cv_broadcast(&tq->tq_wait_cv);
836 if (tq->tq_flags & TASKQ_CPR_SAFE) {
837 cv_wait(&tq->tq_dispatch_cv, &tq->tq_lock);
839 CALLB_CPR_SAFE_BEGIN(&cprinfo);
840 cv_wait(&tq->tq_dispatch_cv, &tq->tq_lock);
841 CALLB_CPR_SAFE_END(&cprinfo, &tq->tq_lock);
846 tqe->tqent_prev->tqent_next = tqe->tqent_next;
847 tqe->tqent_next->tqent_prev = tqe->tqent_prev;
848 mutex_exit(&tq->tq_lock);
850 rw_enter(&tq->tq_threadlock, RW_READER);
852 DTRACE_PROBE2(taskq__exec__start, taskq_t *, tq,
854 tqe->tqent_func(tqe->tqent_arg);
855 DTRACE_PROBE2(taskq__exec__end, taskq_t *, tq,
858 rw_exit(&tq->tq_threadlock);
860 mutex_enter(&tq->tq_lock);
861 tq->tq_totaltime += end - start;
864 taskq_ent_free(tq, tqe);
867 cv_broadcast(&tq->tq_wait_cv);
868 ASSERT(!(tq->tq_flags & TASKQ_CPR_SAFE));
869 CALLB_CPR_EXIT(&cprinfo);
874 * Taskq creation. May sleep for memory.
875 * Always use automatically generated instances to avoid kstat name space
880 taskq_create(const char *name, int nthreads, pri_t pri, int minalloc,
881 int maxalloc, uint_t flags)
883 return taskq_create_common(name, 0, nthreads, pri, minalloc,
884 maxalloc, flags | TASKQ_NOINSTANCE);
888 taskq_create_common(const char *name, int instance, int nthreads, pri_t pri,
889 int minalloc, int maxalloc, uint_t flags)
891 taskq_t *tq = kmem_cache_alloc(taskq_cache, KM_SLEEP);
892 uint_t ncpus = ((boot_max_ncpus == -1) ? max_ncpus : boot_max_ncpus);
893 uint_t bsize; /* # of buckets - always power of 2 */
895 ASSERT(instance == 0);
896 ASSERT(flags == TASKQ_PREPOPULATE | TASKQ_NOINSTANCE);
899 * TASKQ_CPR_SAFE and TASKQ_DYNAMIC flags are mutually exclusive.
901 ASSERT((flags & (TASKQ_DYNAMIC | TASKQ_CPR_SAFE)) !=
902 ((TASKQ_DYNAMIC | TASKQ_CPR_SAFE)));
904 ASSERT(tq->tq_buckets == NULL);
906 bsize = 1 << (highbit(ncpus) - 1);
908 bsize = MIN(bsize, taskq_maxbuckets);
910 tq->tq_maxsize = nthreads;
912 (void) strncpy(tq->tq_name, name, TASKQ_NAMELEN + 1);
913 tq->tq_name[TASKQ_NAMELEN] = '\0';
914 /* Make sure the name conforms to the rules for C indentifiers */
915 strident_canon(tq->tq_name, TASKQ_NAMELEN);
917 tq->tq_flags = flags | TASKQ_ACTIVE;
918 tq->tq_active = nthreads;
919 tq->tq_nthreads = nthreads;
920 tq->tq_minalloc = minalloc;
921 tq->tq_maxalloc = maxalloc;
922 tq->tq_nbuckets = bsize;
925 if (flags & TASKQ_PREPOPULATE) {
926 mutex_enter(&tq->tq_lock);
927 while (minalloc-- > 0)
928 taskq_ent_free(tq, taskq_ent_alloc(tq, TQ_SLEEP));
929 mutex_exit(&tq->tq_lock);
933 tq->tq_thread = thread_create(NULL, 0, taskq_thread, tq,
934 0, NULL, TS_RUN, pri);
936 kthread_t **tpp = kmem_alloc(sizeof (kthread_t *) * nthreads,
939 tq->tq_threadlist = tpp;
941 mutex_enter(&tq->tq_lock);
942 while (nthreads-- > 0) {
943 *tpp = thread_create(NULL, 0, taskq_thread, tq,
944 0, NULL, TS_RUN, pri);
947 mutex_exit(&tq->tq_lock);
956 * Assumes: by the time taskq_destroy is called no one will use this task queue
957 * in any way and no one will try to dispatch entries in it.
960 taskq_destroy(taskq_t *tq)
962 taskq_bucket_t *b = tq->tq_buckets;
965 ASSERT(! (tq->tq_flags & TASKQ_CPR_SAFE));
968 * Wait for any pending entries to complete.
972 mutex_enter(&tq->tq_lock);
973 ASSERT((tq->tq_task.tqent_next == &tq->tq_task) &&
974 (tq->tq_active == 0));
976 if ((tq->tq_nthreads > 1) && (tq->tq_threadlist != NULL))
977 kmem_free(tq->tq_threadlist, sizeof (kthread_t *) *
980 tq->tq_flags &= ~TASKQ_ACTIVE;
981 cv_broadcast(&tq->tq_dispatch_cv);
982 while (tq->tq_nthreads != 0)
983 cv_wait(&tq->tq_wait_cv, &tq->tq_lock);
986 while (tq->tq_nalloc != 0)
987 taskq_ent_free(tq, taskq_ent_alloc(tq, TQ_SLEEP));
989 mutex_exit(&tq->tq_lock);
992 * Mark each bucket as closing and wakeup all sleeping threads.
994 for (; (b != NULL) && (bid < tq->tq_nbuckets); b++, bid++) {
997 mutex_enter(&b->tqbucket_lock);
999 b->tqbucket_flags |= TQBUCKET_CLOSE;
1000 /* Wakeup all sleeping threads */
1002 for (tqe = b->tqbucket_freelist.tqent_next;
1003 tqe != &b->tqbucket_freelist; tqe = tqe->tqent_next)
1004 cv_signal(&tqe->tqent_cv);
1006 ASSERT(b->tqbucket_nalloc == 0);
1009 * At this point we waited for all pending jobs to complete (in
1010 * both the task queue and the bucket and no new jobs should
1011 * arrive. Wait for all threads to die.
1013 while (b->tqbucket_nfree > 0)
1014 cv_wait(&b->tqbucket_cv, &b->tqbucket_lock);
1015 mutex_exit(&b->tqbucket_lock);
1016 mutex_destroy(&b->tqbucket_lock);
1017 cv_destroy(&b->tqbucket_cv);
1020 if (tq->tq_buckets != NULL) {
1021 ASSERT(tq->tq_flags & TASKQ_DYNAMIC);
1022 kmem_free(tq->tq_buckets,
1023 sizeof (taskq_bucket_t) * tq->tq_nbuckets);
1025 /* Cleanup fields before returning tq to the cache */
1026 tq->tq_buckets = NULL;
1027 tq->tq_tcreates = 0;
1030 ASSERT(!(tq->tq_flags & TASKQ_DYNAMIC));
1033 tq->tq_totaltime = 0;
1035 tq->tq_maxtasks = 0;
1036 tq->tq_executed = 0;
1037 kmem_cache_free(taskq_cache, tq);
1040 SYSINIT(sol_taskq, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, taskq_init, NULL);
1041 SYSUNINIT(sol_taskq, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, taskq_fini, NULL);