2 * Copyright (c) 2000 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
33 #include <sys/cpuset.h>
34 #include <sys/interrupt.h>
35 #include <sys/kernel.h>
36 #include <sys/kthread.h>
37 #include <sys/limits.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
42 #include <sys/sched.h>
43 #include <sys/taskqueue.h>
44 #include <sys/unistd.h>
45 #include <machine/stdarg.h>
47 static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
48 static void *taskqueue_giant_ih;
49 static void *taskqueue_ih;
50 static void taskqueue_fast_enqueue(void *);
51 static void taskqueue_swi_enqueue(void *);
52 static void taskqueue_swi_giant_enqueue(void *);
54 struct taskqueue_busy {
55 struct task *tb_running;
56 TAILQ_ENTRY(taskqueue_busy) tb_link;
59 struct task * const TB_DRAIN_WAITER = (struct task *)0x1;
62 STAILQ_HEAD(, task) tq_queue;
63 taskqueue_enqueue_fn tq_enqueue;
65 TAILQ_HEAD(, taskqueue_busy) tq_active;
67 struct thread **tq_threads;
72 taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
73 void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
76 #define TQ_FLAGS_ACTIVE (1 << 0)
77 #define TQ_FLAGS_BLOCKED (1 << 1)
78 #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2)
80 #define DT_CALLOUT_ARMED (1 << 0)
85 mtx_lock_spin(&(tq)->tq_mutex); \
87 mtx_lock(&(tq)->tq_mutex); \
89 #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED)
91 #define TQ_UNLOCK(tq) \
94 mtx_unlock_spin(&(tq)->tq_mutex); \
96 mtx_unlock(&(tq)->tq_mutex); \
98 #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
101 _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
102 int priority, task_fn_t func, void *context)
105 TASK_INIT(&timeout_task->t, priority, func, context);
106 callout_init_mtx(&timeout_task->c, &queue->tq_mutex,
107 CALLOUT_RETURNUNLOCKED);
108 timeout_task->q = queue;
113 TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
117 return (msleep_spin(p, m, wm, t));
118 return (msleep(p, m, pri, wm, t));
121 static struct taskqueue *
122 _taskqueue_create(const char *name __unused, int mflags,
123 taskqueue_enqueue_fn enqueue, void *context,
124 int mtxflags, const char *mtxname)
126 struct taskqueue *queue;
128 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
132 STAILQ_INIT(&queue->tq_queue);
133 TAILQ_INIT(&queue->tq_active);
134 queue->tq_enqueue = enqueue;
135 queue->tq_context = context;
136 queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
137 queue->tq_flags |= TQ_FLAGS_ACTIVE;
138 if (enqueue == taskqueue_fast_enqueue ||
139 enqueue == taskqueue_swi_enqueue ||
140 enqueue == taskqueue_swi_giant_enqueue ||
141 enqueue == taskqueue_thread_enqueue)
142 queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
143 mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags);
149 taskqueue_create(const char *name, int mflags,
150 taskqueue_enqueue_fn enqueue, void *context)
152 return _taskqueue_create(name, mflags, enqueue, context,
153 MTX_DEF, "taskqueue");
157 taskqueue_set_callback(struct taskqueue *queue,
158 enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback,
162 KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) &&
163 (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)),
164 ("Callback type %d not valid, must be %d-%d", cb_type,
165 TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX));
166 KASSERT((queue->tq_callbacks[cb_type] == NULL),
167 ("Re-initialization of taskqueue callback?"));
169 queue->tq_callbacks[cb_type] = callback;
170 queue->tq_cb_contexts[cb_type] = context;
174 * Signal a taskqueue thread to terminate.
177 taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
180 while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
182 TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
187 taskqueue_free(struct taskqueue *queue)
191 queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
192 taskqueue_terminate(queue->tq_threads, queue);
193 KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
194 KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
195 mtx_destroy(&queue->tq_mutex);
196 free(queue->tq_threads, M_TASKQUEUE);
197 free(queue, M_TASKQUEUE);
201 taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
207 * Count multiple enqueues.
209 if (task->ta_pending) {
210 if (task->ta_pending < USHRT_MAX)
217 * Optimise the case when all tasks have the same priority.
219 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
220 if (!prev || prev->ta_priority >= task->ta_priority) {
221 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
224 for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
225 prev = ins, ins = STAILQ_NEXT(ins, ta_link))
226 if (ins->ta_priority < task->ta_priority)
230 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
232 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
235 task->ta_pending = 1;
236 if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0)
238 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
239 queue->tq_enqueue(queue->tq_context);
240 if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0)
243 /* Return with lock released. */
248 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
253 res = taskqueue_enqueue_locked(queue, task);
254 /* The lock is released inside. */
260 taskqueue_timeout_func(void *arg)
262 struct taskqueue *queue;
263 struct timeout_task *timeout_task;
266 queue = timeout_task->q;
267 KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
268 timeout_task->f &= ~DT_CALLOUT_ARMED;
269 queue->tq_callouts--;
270 taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
271 /* The lock is released inside. */
275 taskqueue_enqueue_timeout(struct taskqueue *queue,
276 struct timeout_task *timeout_task, int ticks)
281 KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
283 KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
284 timeout_task->q = queue;
285 res = timeout_task->t.ta_pending;
287 taskqueue_enqueue_locked(queue, &timeout_task->t);
288 /* The lock is released inside. */
290 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
293 queue->tq_callouts++;
294 timeout_task->f |= DT_CALLOUT_ARMED;
296 ticks = -ticks; /* Ignore overflow. */
299 callout_reset(&timeout_task->c, ticks,
300 taskqueue_timeout_func, timeout_task);
308 taskqueue_task_nop_fn(void *context, int pending)
313 * Block until all currently queued tasks in this taskqueue
314 * have begun execution. Tasks queued during execution of
315 * this function are ignored.
318 taskqueue_drain_tq_queue(struct taskqueue *queue)
320 struct task t_barrier;
322 if (STAILQ_EMPTY(&queue->tq_queue))
326 * Enqueue our barrier after all current tasks, but with
327 * the highest priority so that newly queued tasks cannot
328 * pass it. Because of the high priority, we can not use
329 * taskqueue_enqueue_locked directly (which drops the lock
330 * anyway) so just insert it at tail while we have the
333 TASK_INIT(&t_barrier, USHRT_MAX, taskqueue_task_nop_fn, &t_barrier);
334 STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
335 t_barrier.ta_pending = 1;
338 * Once the barrier has executed, all previously queued tasks
339 * have completed or are currently executing.
341 while (t_barrier.ta_pending != 0)
342 TQ_SLEEP(queue, &t_barrier, &queue->tq_mutex, PWAIT, "-", 0);
346 * Block until all currently executing tasks for this taskqueue
347 * complete. Tasks that begin execution during the execution
348 * of this function are ignored.
351 taskqueue_drain_tq_active(struct taskqueue *queue)
353 struct taskqueue_busy tb_marker, *tb_first;
355 if (TAILQ_EMPTY(&queue->tq_active))
358 /* Block taskq_terminate().*/
359 queue->tq_callouts++;
362 * Wait for all currently executing taskqueue threads
365 tb_marker.tb_running = TB_DRAIN_WAITER;
366 TAILQ_INSERT_TAIL(&queue->tq_active, &tb_marker, tb_link);
367 while (TAILQ_FIRST(&queue->tq_active) != &tb_marker)
368 TQ_SLEEP(queue, &tb_marker, &queue->tq_mutex, PWAIT, "-", 0);
369 TAILQ_REMOVE(&queue->tq_active, &tb_marker, tb_link);
372 * Wakeup any other drain waiter that happened to queue up
373 * without any intervening active thread.
375 tb_first = TAILQ_FIRST(&queue->tq_active);
376 if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER)
379 /* Release taskqueue_terminate(). */
380 queue->tq_callouts--;
381 if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
382 wakeup_one(queue->tq_threads);
386 taskqueue_block(struct taskqueue *queue)
390 queue->tq_flags |= TQ_FLAGS_BLOCKED;
395 taskqueue_unblock(struct taskqueue *queue)
399 queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
400 if (!STAILQ_EMPTY(&queue->tq_queue))
401 queue->tq_enqueue(queue->tq_context);
406 taskqueue_run_locked(struct taskqueue *queue)
408 struct taskqueue_busy tb;
409 struct taskqueue_busy *tb_first;
413 TQ_ASSERT_LOCKED(queue);
414 tb.tb_running = NULL;
416 while (STAILQ_FIRST(&queue->tq_queue)) {
417 TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
420 * Carefully remove the first task from the queue and
421 * zero its pending count.
423 task = STAILQ_FIRST(&queue->tq_queue);
424 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
425 pending = task->ta_pending;
426 task->ta_pending = 0;
427 tb.tb_running = task;
430 task->ta_func(task->ta_context, pending);
433 tb.tb_running = NULL;
436 TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
437 tb_first = TAILQ_FIRST(&queue->tq_active);
438 if (tb_first != NULL &&
439 tb_first->tb_running == TB_DRAIN_WAITER)
445 taskqueue_run(struct taskqueue *queue)
449 taskqueue_run_locked(queue);
454 task_is_running(struct taskqueue *queue, struct task *task)
456 struct taskqueue_busy *tb;
458 TQ_ASSERT_LOCKED(queue);
459 TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
460 if (tb->tb_running == task)
467 taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
471 if (task->ta_pending > 0)
472 STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
474 *pendp = task->ta_pending;
475 task->ta_pending = 0;
476 return (task_is_running(queue, task) ? EBUSY : 0);
480 taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
485 error = taskqueue_cancel_locked(queue, task, pendp);
492 taskqueue_cancel_timeout(struct taskqueue *queue,
493 struct timeout_task *timeout_task, u_int *pendp)
495 u_int pending, pending1;
499 pending = !!callout_stop(&timeout_task->c);
500 error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
501 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
502 timeout_task->f &= ~DT_CALLOUT_ARMED;
503 queue->tq_callouts--;
508 *pendp = pending + pending1;
513 taskqueue_drain(struct taskqueue *queue, struct task *task)
517 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
520 while (task->ta_pending != 0 || task_is_running(queue, task))
521 TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
526 taskqueue_drain_all(struct taskqueue *queue)
530 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
533 taskqueue_drain_tq_queue(queue);
534 taskqueue_drain_tq_active(queue);
539 taskqueue_drain_timeout(struct taskqueue *queue,
540 struct timeout_task *timeout_task)
543 callout_drain(&timeout_task->c);
544 taskqueue_drain(queue, &timeout_task->t);
548 taskqueue_swi_enqueue(void *context)
550 swi_sched(taskqueue_ih, 0);
554 taskqueue_swi_run(void *dummy)
556 taskqueue_run(taskqueue_swi);
560 taskqueue_swi_giant_enqueue(void *context)
562 swi_sched(taskqueue_giant_ih, 0);
566 taskqueue_swi_giant_run(void *dummy)
568 taskqueue_run(taskqueue_swi_giant);
572 _taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
573 cpuset_t *mask, const char *name, va_list ap)
575 char ktname[MAXCOMLEN + 1];
577 struct taskqueue *tq;
583 vsnprintf(ktname, sizeof(ktname), name, ap);
586 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
588 if (tq->tq_threads == NULL) {
589 printf("%s: no memory for %s threads\n", __func__, ktname);
593 for (i = 0; i < count; i++) {
595 error = kthread_add(taskqueue_thread_loop, tqp, NULL,
596 &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
598 error = kthread_add(taskqueue_thread_loop, tqp, NULL,
599 &tq->tq_threads[i], RFSTOPPED, 0,
602 /* should be ok to continue, taskqueue_free will dtrt */
603 printf("%s: kthread_add(%s): error %d", __func__,
605 tq->tq_threads[i] = NULL; /* paranoid */
609 for (i = 0; i < count; i++) {
610 if (tq->tq_threads[i] == NULL)
612 td = tq->tq_threads[i];
614 error = cpuset_setthread(td->td_tid, mask);
616 * Failing to pin is rarely an actual fatal error;
617 * it'll just affect performance.
620 printf("%s: curthread=%llu: can't pin; "
623 (unsigned long long) td->td_tid,
628 sched_add(td, SRQ_BORING);
636 taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
637 const char *name, ...)
643 error = _taskqueue_start_threads(tqp, count, pri, NULL, name, ap);
649 taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri,
650 cpuset_t *mask, const char *name, ...)
656 error = _taskqueue_start_threads(tqp, count, pri, mask, name, ap);
662 taskqueue_run_callback(struct taskqueue *tq,
663 enum taskqueue_callback_type cb_type)
665 taskqueue_callback_fn tq_callback;
667 TQ_ASSERT_UNLOCKED(tq);
668 tq_callback = tq->tq_callbacks[cb_type];
669 if (tq_callback != NULL)
670 tq_callback(tq->tq_cb_contexts[cb_type]);
674 taskqueue_thread_loop(void *arg)
676 struct taskqueue **tqp, *tq;
680 taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
682 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
683 taskqueue_run_locked(tq);
685 * Because taskqueue_run() can drop tq_mutex, we need to
686 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
687 * meantime, which means we missed a wakeup.
689 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
691 TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
693 taskqueue_run_locked(tq);
696 * This thread is on its way out, so just drop the lock temporarily
697 * in order to call the shutdown callback. This allows the callback
698 * to look at the taskqueue, even just before it dies.
701 taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
704 /* rendezvous with thread that asked us to terminate */
706 wakeup_one(tq->tq_threads);
712 taskqueue_thread_enqueue(void *context)
714 struct taskqueue **tqp, *tq;
722 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
723 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
724 INTR_MPSAFE, &taskqueue_ih));
726 TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
727 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
728 NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
730 TASKQUEUE_DEFINE_THREAD(thread);
733 taskqueue_create_fast(const char *name, int mflags,
734 taskqueue_enqueue_fn enqueue, void *context)
736 return _taskqueue_create(name, mflags, enqueue, context,
737 MTX_SPIN, "fast_taskqueue");
740 /* NB: for backwards compatibility */
742 taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
744 return taskqueue_enqueue(queue, task);
747 static void *taskqueue_fast_ih;
750 taskqueue_fast_enqueue(void *context)
752 swi_sched(taskqueue_fast_ih, 0);
756 taskqueue_fast_run(void *dummy)
758 taskqueue_run(taskqueue_fast);
761 TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
762 swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL,
763 SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
766 taskqueue_member(struct taskqueue *queue, struct thread *td)
770 for (i = 0, j = 0; ; i++) {
771 if (queue->tq_threads[i] == NULL)
773 if (queue->tq_threads[i] == td) {
777 if (++j >= queue->tq_tcount)