2 * Copyright (c) 2000 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
33 #include <sys/interrupt.h>
34 #include <sys/kernel.h>
35 #include <sys/kthread.h>
36 #include <sys/limits.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
41 #include <sys/sched.h>
42 #include <sys/taskqueue.h>
43 #include <sys/unistd.h>
44 #include <machine/stdarg.h>
46 static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
47 static void *taskqueue_giant_ih;
48 static void *taskqueue_ih;
50 struct taskqueue_busy {
51 struct task *tb_running;
52 TAILQ_ENTRY(taskqueue_busy) tb_link;
56 STAILQ_HEAD(, task) tq_queue;
57 taskqueue_enqueue_fn tq_enqueue;
59 TAILQ_HEAD(, taskqueue_busy) tq_active;
61 struct thread **tq_threads;
68 #define TQ_FLAGS_ACTIVE (1 << 0)
69 #define TQ_FLAGS_BLOCKED (1 << 1)
70 #define TQ_FLAGS_PENDING (1 << 2)
72 #define DT_CALLOUT_ARMED (1 << 0)
77 mtx_lock_spin(&(tq)->tq_mutex); \
79 mtx_lock(&(tq)->tq_mutex); \
82 #define TQ_UNLOCK(tq) \
85 mtx_unlock_spin(&(tq)->tq_mutex); \
87 mtx_unlock(&(tq)->tq_mutex); \
91 _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
92 int priority, task_fn_t func, void *context)
95 TASK_INIT(&timeout_task->t, priority, func, context);
96 callout_init_mtx(&timeout_task->c, &queue->tq_mutex, 0);
97 timeout_task->q = queue;
102 TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
106 return (msleep_spin(p, m, wm, t));
107 return (msleep(p, m, pri, wm, t));
110 static struct taskqueue *
111 _taskqueue_create(const char *name __unused, int mflags,
112 taskqueue_enqueue_fn enqueue, void *context,
113 int mtxflags, const char *mtxname)
115 struct taskqueue *queue;
117 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
121 STAILQ_INIT(&queue->tq_queue);
122 TAILQ_INIT(&queue->tq_active);
123 queue->tq_enqueue = enqueue;
124 queue->tq_context = context;
125 queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
126 queue->tq_flags |= TQ_FLAGS_ACTIVE;
127 mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags);
133 taskqueue_create(const char *name, int mflags,
134 taskqueue_enqueue_fn enqueue, void *context)
136 return _taskqueue_create(name, mflags, enqueue, context,
137 MTX_DEF, "taskqueue");
141 * Signal a taskqueue thread to terminate.
144 taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
147 while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
149 TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
154 taskqueue_free(struct taskqueue *queue)
158 queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
159 taskqueue_terminate(queue->tq_threads, queue);
160 KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
161 KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
162 mtx_destroy(&queue->tq_mutex);
163 free(queue->tq_threads, M_TASKQUEUE);
164 free(queue, M_TASKQUEUE);
168 taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
174 * Count multiple enqueues.
176 if (task->ta_pending) {
177 if (task->ta_pending < USHRT_MAX)
183 * Optimise the case when all tasks have the same priority.
185 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
186 if (!prev || prev->ta_priority >= task->ta_priority) {
187 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
190 for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
191 prev = ins, ins = STAILQ_NEXT(ins, ta_link))
192 if (ins->ta_priority < task->ta_priority)
196 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
198 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
201 task->ta_pending = 1;
202 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
203 queue->tq_enqueue(queue->tq_context);
205 queue->tq_flags |= TQ_FLAGS_PENDING;
210 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
215 res = taskqueue_enqueue_locked(queue, task);
222 taskqueue_timeout_func(void *arg)
224 struct taskqueue *queue;
225 struct timeout_task *timeout_task;
228 queue = timeout_task->q;
229 KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
230 timeout_task->f &= ~DT_CALLOUT_ARMED;
231 queue->tq_callouts--;
232 taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
236 taskqueue_enqueue_timeout(struct taskqueue *queue,
237 struct timeout_task *timeout_task, int ticks)
242 KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
244 KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
245 timeout_task->q = queue;
246 res = timeout_task->t.ta_pending;
248 taskqueue_enqueue_locked(queue, &timeout_task->t);
250 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
253 queue->tq_callouts++;
254 timeout_task->f |= DT_CALLOUT_ARMED;
256 callout_reset(&timeout_task->c, ticks, taskqueue_timeout_func,
264 taskqueue_block(struct taskqueue *queue)
268 queue->tq_flags |= TQ_FLAGS_BLOCKED;
273 taskqueue_unblock(struct taskqueue *queue)
277 queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
278 if (queue->tq_flags & TQ_FLAGS_PENDING) {
279 queue->tq_flags &= ~TQ_FLAGS_PENDING;
280 queue->tq_enqueue(queue->tq_context);
286 taskqueue_run_locked(struct taskqueue *queue)
288 struct taskqueue_busy tb;
292 mtx_assert(&queue->tq_mutex, MA_OWNED);
293 tb.tb_running = NULL;
294 TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
296 while (STAILQ_FIRST(&queue->tq_queue)) {
298 * Carefully remove the first task from the queue and
299 * zero its pending count.
301 task = STAILQ_FIRST(&queue->tq_queue);
302 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
303 pending = task->ta_pending;
304 task->ta_pending = 0;
305 tb.tb_running = task;
308 task->ta_func(task->ta_context, pending);
311 tb.tb_running = NULL;
314 TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
318 taskqueue_run(struct taskqueue *queue)
322 taskqueue_run_locked(queue);
327 task_is_running(struct taskqueue *queue, struct task *task)
329 struct taskqueue_busy *tb;
331 mtx_assert(&queue->tq_mutex, MA_OWNED);
332 TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
333 if (tb->tb_running == task)
340 taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
344 if (task->ta_pending > 0)
345 STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
347 *pendp = task->ta_pending;
348 task->ta_pending = 0;
349 return (task_is_running(queue, task) ? EBUSY : 0);
353 taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
359 pending = task->ta_pending;
360 error = taskqueue_cancel_locked(queue, task, pendp);
367 taskqueue_cancel_timeout(struct taskqueue *queue,
368 struct timeout_task *timeout_task, u_int *pendp)
370 u_int pending, pending1;
374 pending = !!callout_stop(&timeout_task->c);
375 error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
376 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
377 timeout_task->f &= ~DT_CALLOUT_ARMED;
378 queue->tq_callouts--;
383 *pendp = pending + pending1;
388 taskqueue_drain(struct taskqueue *queue, struct task *task)
392 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
395 while (task->ta_pending != 0 || task_is_running(queue, task))
396 TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
401 taskqueue_drain_timeout(struct taskqueue *queue,
402 struct timeout_task *timeout_task)
405 callout_drain(&timeout_task->c);
406 taskqueue_drain(queue, &timeout_task->t);
410 taskqueue_swi_enqueue(void *context)
412 swi_sched(taskqueue_ih, 0);
416 taskqueue_swi_run(void *dummy)
418 taskqueue_run(taskqueue_swi);
422 taskqueue_swi_giant_enqueue(void *context)
424 swi_sched(taskqueue_giant_ih, 0);
428 taskqueue_swi_giant_run(void *dummy)
430 taskqueue_run(taskqueue_swi_giant);
434 taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
435 const char *name, ...)
439 struct taskqueue *tq;
441 char ktname[MAXCOMLEN + 1];
449 vsnprintf(ktname, sizeof(ktname), name, ap);
452 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
454 if (tq->tq_threads == NULL) {
455 printf("%s: no memory for %s threads\n", __func__, ktname);
459 for (i = 0; i < count; i++) {
461 error = kthread_add(taskqueue_thread_loop, tqp, NULL,
462 &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
464 error = kthread_add(taskqueue_thread_loop, tqp, NULL,
465 &tq->tq_threads[i], RFSTOPPED, 0,
468 /* should be ok to continue, taskqueue_free will dtrt */
469 printf("%s: kthread_add(%s): error %d", __func__,
471 tq->tq_threads[i] = NULL; /* paranoid */
475 for (i = 0; i < count; i++) {
476 if (tq->tq_threads[i] == NULL)
478 td = tq->tq_threads[i];
481 sched_add(td, SRQ_BORING);
489 taskqueue_thread_loop(void *arg)
491 struct taskqueue **tqp, *tq;
496 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
497 taskqueue_run_locked(tq);
499 * Because taskqueue_run() can drop tq_mutex, we need to
500 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
501 * meantime, which means we missed a wakeup.
503 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
505 TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
507 taskqueue_run_locked(tq);
509 /* rendezvous with thread that asked us to terminate */
511 wakeup_one(tq->tq_threads);
517 taskqueue_thread_enqueue(void *context)
519 struct taskqueue **tqp, *tq;
524 mtx_assert(&tq->tq_mutex, MA_OWNED);
528 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
529 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
530 INTR_MPSAFE, &taskqueue_ih));
532 TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
533 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
534 NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
536 TASKQUEUE_DEFINE_THREAD(thread);
539 taskqueue_create_fast(const char *name, int mflags,
540 taskqueue_enqueue_fn enqueue, void *context)
542 return _taskqueue_create(name, mflags, enqueue, context,
543 MTX_SPIN, "fast_taskqueue");
546 /* NB: for backwards compatibility */
548 taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
550 return taskqueue_enqueue(queue, task);
553 static void *taskqueue_fast_ih;
556 taskqueue_fast_enqueue(void *context)
558 swi_sched(taskqueue_fast_ih, 0);
562 taskqueue_fast_run(void *dummy)
564 taskqueue_run(taskqueue_fast);
567 TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
568 swi_add(NULL, "Fast task queue", taskqueue_fast_run, NULL,
569 SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
572 taskqueue_member(struct taskqueue *queue, struct thread *td)
577 for (i = 0, j = 0; ; i++) {
578 if (queue->tq_threads[i] == NULL)
580 if (queue->tq_threads[i] == td) {
584 if (++j >= queue->tq_tcount)