2 * Copyright (c) 2000 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
33 #include <sys/interrupt.h>
34 #include <sys/kernel.h>
35 #include <sys/kthread.h>
37 #include <sys/malloc.h>
38 #include <sys/mutex.h>
40 #include <sys/sched.h>
41 #include <sys/taskqueue.h>
42 #include <sys/unistd.h>
43 #include <machine/stdarg.h>
45 static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
46 static void *taskqueue_giant_ih;
47 static void *taskqueue_ih;
49 struct taskqueue_busy {
50 struct task *tb_running;
51 TAILQ_ENTRY(taskqueue_busy) tb_link;
55 STAILQ_HEAD(, task) tq_queue;
56 taskqueue_enqueue_fn tq_enqueue;
58 TAILQ_HEAD(, taskqueue_busy) tq_active;
60 struct thread **tq_threads;
66 #define TQ_FLAGS_ACTIVE (1 << 0)
67 #define TQ_FLAGS_BLOCKED (1 << 1)
68 #define TQ_FLAGS_PENDING (1 << 2)
70 static void taskqueue_run_locked(struct taskqueue *);
73 TQ_LOCK(struct taskqueue *tq)
76 mtx_lock_spin(&tq->tq_mutex);
78 mtx_lock(&tq->tq_mutex);
82 TQ_UNLOCK(struct taskqueue *tq)
85 mtx_unlock_spin(&tq->tq_mutex);
87 mtx_unlock(&tq->tq_mutex);
91 TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
95 return (msleep_spin(p, m, wm, t));
96 return (msleep(p, m, pri, wm, t));
99 static struct taskqueue *
100 _taskqueue_create(const char *name __unused, int mflags,
101 taskqueue_enqueue_fn enqueue, void *context,
102 int mtxflags, const char *mtxname)
104 struct taskqueue *queue;
106 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
110 STAILQ_INIT(&queue->tq_queue);
111 TAILQ_INIT(&queue->tq_active);
112 queue->tq_enqueue = enqueue;
113 queue->tq_context = context;
114 queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
115 queue->tq_flags |= TQ_FLAGS_ACTIVE;
116 mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags);
122 taskqueue_create(const char *name, int mflags,
123 taskqueue_enqueue_fn enqueue, void *context)
125 return _taskqueue_create(name, mflags, enqueue, context,
126 MTX_DEF, "taskqueue");
130 * Signal a taskqueue thread to terminate.
133 taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
136 while (tq->tq_tcount > 0) {
138 TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
143 taskqueue_free(struct taskqueue *queue)
147 queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
148 taskqueue_run_locked(queue);
149 taskqueue_terminate(queue->tq_threads, queue);
150 KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
151 mtx_destroy(&queue->tq_mutex);
152 free(queue->tq_threads, M_TASKQUEUE);
153 free(queue, M_TASKQUEUE);
157 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
165 * Count multiple enqueues.
167 if (task->ta_pending) {
174 * Optimise the case when all tasks have the same priority.
176 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
177 if (!prev || prev->ta_priority >= task->ta_priority) {
178 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
181 for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
182 prev = ins, ins = STAILQ_NEXT(ins, ta_link))
183 if (ins->ta_priority < task->ta_priority)
187 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
189 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
192 task->ta_pending = 1;
193 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
194 queue->tq_enqueue(queue->tq_context);
196 queue->tq_flags |= TQ_FLAGS_PENDING;
204 taskqueue_block(struct taskqueue *queue)
208 queue->tq_flags |= TQ_FLAGS_BLOCKED;
213 taskqueue_unblock(struct taskqueue *queue)
217 queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
218 if (queue->tq_flags & TQ_FLAGS_PENDING) {
219 queue->tq_flags &= ~TQ_FLAGS_PENDING;
220 queue->tq_enqueue(queue->tq_context);
226 taskqueue_run_locked(struct taskqueue *queue)
228 struct taskqueue_busy tb;
232 mtx_assert(&queue->tq_mutex, MA_OWNED);
233 tb.tb_running = NULL;
234 TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
236 while (STAILQ_FIRST(&queue->tq_queue)) {
238 * Carefully remove the first task from the queue and
239 * zero its pending count.
241 task = STAILQ_FIRST(&queue->tq_queue);
242 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
243 pending = task->ta_pending;
244 task->ta_pending = 0;
245 tb.tb_running = task;
248 task->ta_func(task->ta_context, pending);
251 tb.tb_running = NULL;
254 TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
258 taskqueue_run(struct taskqueue *queue)
262 taskqueue_run_locked(queue);
267 task_is_running(struct taskqueue *queue, struct task *task)
269 struct taskqueue_busy *tb;
271 mtx_assert(&queue->tq_mutex, MA_OWNED);
272 TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
273 if (tb->tb_running == task)
280 taskqueue_drain(struct taskqueue *queue, struct task *task)
282 if (queue->tq_spin) { /* XXX */
283 mtx_lock_spin(&queue->tq_mutex);
284 while (task->ta_pending != 0 || task_is_running(queue, task))
285 msleep_spin(task, &queue->tq_mutex, "-", 0);
286 mtx_unlock_spin(&queue->tq_mutex);
288 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
290 mtx_lock(&queue->tq_mutex);
291 while (task->ta_pending != 0 || task_is_running(queue, task))
292 msleep(task, &queue->tq_mutex, PWAIT, "-", 0);
293 mtx_unlock(&queue->tq_mutex);
298 taskqueue_swi_enqueue(void *context)
300 swi_sched(taskqueue_ih, 0);
304 taskqueue_swi_run(void *dummy)
306 taskqueue_run(taskqueue_swi);
310 taskqueue_swi_giant_enqueue(void *context)
312 swi_sched(taskqueue_giant_ih, 0);
316 taskqueue_swi_giant_run(void *dummy)
318 taskqueue_run(taskqueue_swi_giant);
322 taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
323 const char *name, ...)
327 struct taskqueue *tq;
329 char ktname[MAXCOMLEN + 1];
337 vsnprintf(ktname, sizeof(ktname), name, ap);
340 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
342 if (tq->tq_threads == NULL) {
343 printf("%s: no memory for %s threads\n", __func__, ktname);
347 for (i = 0; i < count; i++) {
349 error = kthread_add(taskqueue_thread_loop, tqp, NULL,
350 &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
352 error = kthread_add(taskqueue_thread_loop, tqp, NULL,
353 &tq->tq_threads[i], RFSTOPPED, 0,
356 /* should be ok to continue, taskqueue_free will dtrt */
357 printf("%s: kthread_add(%s): error %d", __func__,
359 tq->tq_threads[i] = NULL; /* paranoid */
363 for (i = 0; i < count; i++) {
364 if (tq->tq_threads[i] == NULL)
366 td = tq->tq_threads[i];
369 sched_add(td, SRQ_BORING);
377 taskqueue_thread_loop(void *arg)
379 struct taskqueue **tqp, *tq;
384 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
385 taskqueue_run_locked(tq);
387 * Because taskqueue_run() can drop tq_mutex, we need to
388 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
389 * meantime, which means we missed a wakeup.
391 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
393 TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
396 /* rendezvous with thread that asked us to terminate */
398 wakeup_one(tq->tq_threads);
404 taskqueue_thread_enqueue(void *context)
406 struct taskqueue **tqp, *tq;
411 mtx_assert(&tq->tq_mutex, MA_OWNED);
415 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
416 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
417 INTR_MPSAFE, &taskqueue_ih));
419 TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
420 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
421 NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
423 TASKQUEUE_DEFINE_THREAD(thread);
426 taskqueue_create_fast(const char *name, int mflags,
427 taskqueue_enqueue_fn enqueue, void *context)
429 return _taskqueue_create(name, mflags, enqueue, context,
430 MTX_SPIN, "fast_taskqueue");
433 /* NB: for backwards compatibility */
435 taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
437 return taskqueue_enqueue(queue, task);
440 static void *taskqueue_fast_ih;
443 taskqueue_fast_enqueue(void *context)
445 swi_sched(taskqueue_fast_ih, 0);
449 taskqueue_fast_run(void *dummy)
451 taskqueue_run(taskqueue_fast);
454 TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
455 swi_add(NULL, "Fast task queue", taskqueue_fast_run, NULL,
456 SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
459 taskqueue_member(struct taskqueue *queue, struct thread *td)
464 for (i = 0, j = 0; ; i++) {
465 if (queue->tq_threads[i] == NULL)
467 if (queue->tq_threads[i] == td) {
471 if (++j >= queue->tq_tcount)