2 * Copyright (c) 2000 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
33 #include <sys/interrupt.h>
34 #include <sys/kernel.h>
35 #include <sys/kthread.h>
36 #include <sys/limits.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
41 #include <sys/sched.h>
42 #include <sys/taskqueue.h>
43 #include <sys/unistd.h>
44 #include <machine/stdarg.h>
46 static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
47 static void *taskqueue_giant_ih;
48 static void *taskqueue_ih;
49 static void taskqueue_fast_enqueue(void *);
50 static void taskqueue_swi_enqueue(void *);
51 static void taskqueue_swi_giant_enqueue(void *);
53 struct taskqueue_busy {
54 struct task *tb_running;
55 TAILQ_ENTRY(taskqueue_busy) tb_link;
59 STAILQ_HEAD(, task) tq_queue;
60 taskqueue_enqueue_fn tq_enqueue;
62 TAILQ_HEAD(, taskqueue_busy) tq_active;
64 struct thread **tq_threads;
69 taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
70 void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
73 #define TQ_FLAGS_ACTIVE (1 << 0)
74 #define TQ_FLAGS_BLOCKED (1 << 1)
75 #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2)
77 #define DT_CALLOUT_ARMED (1 << 0)
82 mtx_lock_spin(&(tq)->tq_mutex); \
84 mtx_lock(&(tq)->tq_mutex); \
86 #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED)
88 #define TQ_UNLOCK(tq) \
91 mtx_unlock_spin(&(tq)->tq_mutex); \
93 mtx_unlock(&(tq)->tq_mutex); \
95 #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
98 _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
99 int priority, task_fn_t func, void *context)
102 TASK_INIT(&timeout_task->t, priority, func, context);
103 callout_init_mtx(&timeout_task->c, &queue->tq_mutex,
104 CALLOUT_RETURNUNLOCKED);
105 timeout_task->q = queue;
110 TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
114 return (msleep_spin(p, m, wm, t));
115 return (msleep(p, m, pri, wm, t));
118 static struct taskqueue *
119 _taskqueue_create(const char *name __unused, int mflags,
120 taskqueue_enqueue_fn enqueue, void *context,
121 int mtxflags, const char *mtxname)
123 struct taskqueue *queue;
125 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
129 STAILQ_INIT(&queue->tq_queue);
130 TAILQ_INIT(&queue->tq_active);
131 queue->tq_enqueue = enqueue;
132 queue->tq_context = context;
133 queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
134 queue->tq_flags |= TQ_FLAGS_ACTIVE;
135 if (enqueue == taskqueue_fast_enqueue ||
136 enqueue == taskqueue_swi_enqueue ||
137 enqueue == taskqueue_swi_giant_enqueue ||
138 enqueue == taskqueue_thread_enqueue)
139 queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
140 mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags);
146 taskqueue_create(const char *name, int mflags,
147 taskqueue_enqueue_fn enqueue, void *context)
149 return _taskqueue_create(name, mflags, enqueue, context,
150 MTX_DEF, "taskqueue");
154 taskqueue_set_callback(struct taskqueue *queue,
155 enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback,
159 KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) &&
160 (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)),
161 ("Callback type %d not valid, must be %d-%d", cb_type,
162 TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX));
163 KASSERT((queue->tq_callbacks[cb_type] == NULL),
164 ("Re-initialization of taskqueue callback?"));
166 queue->tq_callbacks[cb_type] = callback;
167 queue->tq_cb_contexts[cb_type] = context;
171 * Signal a taskqueue thread to terminate.
174 taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
177 while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
179 TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
184 taskqueue_free(struct taskqueue *queue)
188 queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
189 taskqueue_terminate(queue->tq_threads, queue);
190 KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
191 KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
192 mtx_destroy(&queue->tq_mutex);
193 free(queue->tq_threads, M_TASKQUEUE);
194 free(queue, M_TASKQUEUE);
198 taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
204 * Count multiple enqueues.
206 if (task->ta_pending) {
207 if (task->ta_pending < USHRT_MAX)
214 * Optimise the case when all tasks have the same priority.
216 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
217 if (!prev || prev->ta_priority >= task->ta_priority) {
218 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
221 for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
222 prev = ins, ins = STAILQ_NEXT(ins, ta_link))
223 if (ins->ta_priority < task->ta_priority)
227 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
229 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
232 task->ta_pending = 1;
233 if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0)
235 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
236 queue->tq_enqueue(queue->tq_context);
237 if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0)
240 /* Return with lock released. */
244 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
249 res = taskqueue_enqueue_locked(queue, task);
250 /* The lock is released inside. */
256 taskqueue_timeout_func(void *arg)
258 struct taskqueue *queue;
259 struct timeout_task *timeout_task;
262 queue = timeout_task->q;
263 KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
264 timeout_task->f &= ~DT_CALLOUT_ARMED;
265 queue->tq_callouts--;
266 taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
267 /* The lock is released inside. */
271 taskqueue_enqueue_timeout(struct taskqueue *queue,
272 struct timeout_task *timeout_task, int ticks)
277 KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
279 KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
280 timeout_task->q = queue;
281 res = timeout_task->t.ta_pending;
283 taskqueue_enqueue_locked(queue, &timeout_task->t);
284 /* The lock is released inside. */
286 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
289 queue->tq_callouts++;
290 timeout_task->f |= DT_CALLOUT_ARMED;
292 ticks = -ticks; /* Ignore overflow. */
295 callout_reset(&timeout_task->c, ticks,
296 taskqueue_timeout_func, timeout_task);
304 taskqueue_drain_running(struct taskqueue *queue)
307 while (!TAILQ_EMPTY(&queue->tq_active))
308 TQ_SLEEP(queue, &queue->tq_active, &queue->tq_mutex,
313 taskqueue_block(struct taskqueue *queue)
317 queue->tq_flags |= TQ_FLAGS_BLOCKED;
322 taskqueue_unblock(struct taskqueue *queue)
326 queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
327 if (!STAILQ_EMPTY(&queue->tq_queue))
328 queue->tq_enqueue(queue->tq_context);
333 taskqueue_run_locked(struct taskqueue *queue)
335 struct taskqueue_busy tb;
339 TQ_ASSERT_LOCKED(queue);
340 tb.tb_running = NULL;
341 TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
343 while (STAILQ_FIRST(&queue->tq_queue)) {
345 * Carefully remove the first task from the queue and
346 * zero its pending count.
348 task = STAILQ_FIRST(&queue->tq_queue);
349 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
350 pending = task->ta_pending;
351 task->ta_pending = 0;
352 tb.tb_running = task;
355 task->ta_func(task->ta_context, pending);
358 tb.tb_running = NULL;
361 TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
362 if (TAILQ_EMPTY(&queue->tq_active))
363 wakeup(&queue->tq_active);
367 taskqueue_run(struct taskqueue *queue)
371 taskqueue_run_locked(queue);
376 task_is_running(struct taskqueue *queue, struct task *task)
378 struct taskqueue_busy *tb;
380 TQ_ASSERT_LOCKED(queue);
381 TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
382 if (tb->tb_running == task)
389 taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
393 if (task->ta_pending > 0)
394 STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
396 *pendp = task->ta_pending;
397 task->ta_pending = 0;
398 return (task_is_running(queue, task) ? EBUSY : 0);
402 taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
407 error = taskqueue_cancel_locked(queue, task, pendp);
414 taskqueue_cancel_timeout(struct taskqueue *queue,
415 struct timeout_task *timeout_task, u_int *pendp)
417 u_int pending, pending1;
421 pending = !!callout_stop(&timeout_task->c);
422 error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
423 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
424 timeout_task->f &= ~DT_CALLOUT_ARMED;
425 queue->tq_callouts--;
430 *pendp = pending + pending1;
435 taskqueue_drain(struct taskqueue *queue, struct task *task)
439 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
442 while (task->ta_pending != 0 || task_is_running(queue, task))
443 TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
448 taskqueue_drain_all(struct taskqueue *queue)
453 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
456 task = STAILQ_LAST(&queue->tq_queue, task, ta_link);
458 while (task->ta_pending != 0)
459 TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
460 taskqueue_drain_running(queue);
461 KASSERT(STAILQ_EMPTY(&queue->tq_queue),
462 ("taskqueue queue is not empty after draining"));
467 taskqueue_drain_timeout(struct taskqueue *queue,
468 struct timeout_task *timeout_task)
471 callout_drain(&timeout_task->c);
472 taskqueue_drain(queue, &timeout_task->t);
476 taskqueue_swi_enqueue(void *context)
478 swi_sched(taskqueue_ih, 0);
482 taskqueue_swi_run(void *dummy)
484 taskqueue_run(taskqueue_swi);
488 taskqueue_swi_giant_enqueue(void *context)
490 swi_sched(taskqueue_giant_ih, 0);
494 taskqueue_swi_giant_run(void *dummy)
496 taskqueue_run(taskqueue_swi_giant);
500 taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
501 const char *name, ...)
505 struct taskqueue *tq;
507 char ktname[MAXCOMLEN + 1];
515 vsnprintf(ktname, sizeof(ktname), name, ap);
518 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
520 if (tq->tq_threads == NULL) {
521 printf("%s: no memory for %s threads\n", __func__, ktname);
525 for (i = 0; i < count; i++) {
527 error = kthread_add(taskqueue_thread_loop, tqp, NULL,
528 &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
530 error = kthread_add(taskqueue_thread_loop, tqp, NULL,
531 &tq->tq_threads[i], RFSTOPPED, 0,
534 /* should be ok to continue, taskqueue_free will dtrt */
535 printf("%s: kthread_add(%s): error %d", __func__,
537 tq->tq_threads[i] = NULL; /* paranoid */
541 for (i = 0; i < count; i++) {
542 if (tq->tq_threads[i] == NULL)
544 td = tq->tq_threads[i];
547 sched_add(td, SRQ_BORING);
555 taskqueue_run_callback(struct taskqueue *tq,
556 enum taskqueue_callback_type cb_type)
558 taskqueue_callback_fn tq_callback;
560 TQ_ASSERT_UNLOCKED(tq);
561 tq_callback = tq->tq_callbacks[cb_type];
562 if (tq_callback != NULL)
563 tq_callback(tq->tq_cb_contexts[cb_type]);
567 taskqueue_thread_loop(void *arg)
569 struct taskqueue **tqp, *tq;
573 taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
575 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
576 taskqueue_run_locked(tq);
578 * Because taskqueue_run() can drop tq_mutex, we need to
579 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
580 * meantime, which means we missed a wakeup.
582 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
584 TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
586 taskqueue_run_locked(tq);
589 * This thread is on its way out, so just drop the lock temporarily
590 * in order to call the shutdown callback. This allows the callback
591 * to look at the taskqueue, even just before it dies.
594 taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
597 /* rendezvous with thread that asked us to terminate */
599 wakeup_one(tq->tq_threads);
605 taskqueue_thread_enqueue(void *context)
607 struct taskqueue **tqp, *tq;
615 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
616 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
617 INTR_MPSAFE, &taskqueue_ih));
619 TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
620 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
621 NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
623 TASKQUEUE_DEFINE_THREAD(thread);
626 taskqueue_create_fast(const char *name, int mflags,
627 taskqueue_enqueue_fn enqueue, void *context)
629 return _taskqueue_create(name, mflags, enqueue, context,
630 MTX_SPIN, "fast_taskqueue");
633 /* NB: for backwards compatibility */
635 taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
637 return taskqueue_enqueue(queue, task);
640 static void *taskqueue_fast_ih;
643 taskqueue_fast_enqueue(void *context)
645 swi_sched(taskqueue_fast_ih, 0);
649 taskqueue_fast_run(void *dummy)
651 taskqueue_run(taskqueue_fast);
654 TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
655 swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL,
656 SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
659 taskqueue_member(struct taskqueue *queue, struct thread *td)
663 for (i = 0, j = 0; ; i++) {
664 if (queue->tq_threads[i] == NULL)
666 if (queue->tq_threads[i] == td) {
670 if (++j >= queue->tq_tcount)