]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/subr_taskqueue.c
MFuser/delphij/zfs-arc-rebase@r281754:
[FreeBSD/FreeBSD.git] / sys / kern / subr_taskqueue.c
1 /*-
2  * Copyright (c) 2000 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/bus.h>
33 #include <sys/cpuset.h>
34 #include <sys/interrupt.h>
35 #include <sys/kernel.h>
36 #include <sys/kthread.h>
37 #include <sys/limits.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/mutex.h>
41 #include <sys/proc.h>
42 #include <sys/sched.h>
43 #include <sys/taskqueue.h>
44 #include <sys/unistd.h>
45 #include <machine/stdarg.h>
46
47 static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
48 static void     *taskqueue_giant_ih;
49 static void     *taskqueue_ih;
50 static void      taskqueue_fast_enqueue(void *);
51 static void      taskqueue_swi_enqueue(void *);
52 static void      taskqueue_swi_giant_enqueue(void *);
53
54 struct taskqueue_busy {
55         struct task     *tb_running;
56         TAILQ_ENTRY(taskqueue_busy) tb_link;
57 };
58
59 struct task * const TB_DRAIN_WAITER = (struct task *)0x1;
60
61 struct taskqueue {
62         STAILQ_HEAD(, task)     tq_queue;
63         taskqueue_enqueue_fn    tq_enqueue;
64         void                    *tq_context;
65         TAILQ_HEAD(, taskqueue_busy) tq_active;
66         struct mtx              tq_mutex;
67         struct thread           **tq_threads;
68         int                     tq_tcount;
69         int                     tq_spin;
70         int                     tq_flags;
71         int                     tq_callouts;
72         taskqueue_callback_fn   tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
73         void                    *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
74 };
75
76 #define TQ_FLAGS_ACTIVE         (1 << 0)
77 #define TQ_FLAGS_BLOCKED        (1 << 1)
78 #define TQ_FLAGS_UNLOCKED_ENQUEUE       (1 << 2)
79
80 #define DT_CALLOUT_ARMED        (1 << 0)
81
82 #define TQ_LOCK(tq)                                                     \
83         do {                                                            \
84                 if ((tq)->tq_spin)                                      \
85                         mtx_lock_spin(&(tq)->tq_mutex);                 \
86                 else                                                    \
87                         mtx_lock(&(tq)->tq_mutex);                      \
88         } while (0)
89 #define TQ_ASSERT_LOCKED(tq)    mtx_assert(&(tq)->tq_mutex, MA_OWNED)
90
91 #define TQ_UNLOCK(tq)                                                   \
92         do {                                                            \
93                 if ((tq)->tq_spin)                                      \
94                         mtx_unlock_spin(&(tq)->tq_mutex);               \
95                 else                                                    \
96                         mtx_unlock(&(tq)->tq_mutex);                    \
97         } while (0)
98 #define TQ_ASSERT_UNLOCKED(tq)  mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
99
100 void
101 _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
102     int priority, task_fn_t func, void *context)
103 {
104
105         TASK_INIT(&timeout_task->t, priority, func, context);
106         callout_init_mtx(&timeout_task->c, &queue->tq_mutex,
107             CALLOUT_RETURNUNLOCKED);
108         timeout_task->q = queue;
109         timeout_task->f = 0;
110 }
111
112 static __inline int
113 TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
114     int t)
115 {
116         if (tq->tq_spin)
117                 return (msleep_spin(p, m, wm, t));
118         return (msleep(p, m, pri, wm, t));
119 }
120
121 static struct taskqueue *
122 _taskqueue_create(const char *name __unused, int mflags,
123                  taskqueue_enqueue_fn enqueue, void *context,
124                  int mtxflags, const char *mtxname)
125 {
126         struct taskqueue *queue;
127
128         queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
129         if (!queue)
130                 return NULL;
131
132         STAILQ_INIT(&queue->tq_queue);
133         TAILQ_INIT(&queue->tq_active);
134         queue->tq_enqueue = enqueue;
135         queue->tq_context = context;
136         queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
137         queue->tq_flags |= TQ_FLAGS_ACTIVE;
138         if (enqueue == taskqueue_fast_enqueue ||
139             enqueue == taskqueue_swi_enqueue ||
140             enqueue == taskqueue_swi_giant_enqueue ||
141             enqueue == taskqueue_thread_enqueue)
142                 queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
143         mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags);
144
145         return queue;
146 }
147
148 struct taskqueue *
149 taskqueue_create(const char *name, int mflags,
150                  taskqueue_enqueue_fn enqueue, void *context)
151 {
152         return _taskqueue_create(name, mflags, enqueue, context,
153                         MTX_DEF, "taskqueue");
154 }
155
156 void
157 taskqueue_set_callback(struct taskqueue *queue,
158     enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback,
159     void *context)
160 {
161
162         KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) &&
163             (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)),
164             ("Callback type %d not valid, must be %d-%d", cb_type,
165             TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX));
166         KASSERT((queue->tq_callbacks[cb_type] == NULL),
167             ("Re-initialization of taskqueue callback?"));
168
169         queue->tq_callbacks[cb_type] = callback;
170         queue->tq_cb_contexts[cb_type] = context;
171 }
172
173 /*
174  * Signal a taskqueue thread to terminate.
175  */
176 static void
177 taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
178 {
179
180         while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
181                 wakeup(tq);
182                 TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
183         }
184 }
185
186 void
187 taskqueue_free(struct taskqueue *queue)
188 {
189
190         TQ_LOCK(queue);
191         queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
192         taskqueue_terminate(queue->tq_threads, queue);
193         KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
194         KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
195         mtx_destroy(&queue->tq_mutex);
196         free(queue->tq_threads, M_TASKQUEUE);
197         free(queue, M_TASKQUEUE);
198 }
199
200 static int
201 taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
202 {
203         struct task *ins;
204         struct task *prev;
205
206         /*
207          * Count multiple enqueues.
208          */
209         if (task->ta_pending) {
210                 if (task->ta_pending < USHRT_MAX)
211                         task->ta_pending++;
212                 TQ_UNLOCK(queue);
213                 return (0);
214         }
215
216         /*
217          * Optimise the case when all tasks have the same priority.
218          */
219         prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
220         if (!prev || prev->ta_priority >= task->ta_priority) {
221                 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
222         } else {
223                 prev = NULL;
224                 for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
225                      prev = ins, ins = STAILQ_NEXT(ins, ta_link))
226                         if (ins->ta_priority < task->ta_priority)
227                                 break;
228
229                 if (prev)
230                         STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
231                 else
232                         STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
233         }
234
235         task->ta_pending = 1;
236         if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0)
237                 TQ_UNLOCK(queue);
238         if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
239                 queue->tq_enqueue(queue->tq_context);
240         if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0)
241                 TQ_UNLOCK(queue);
242
243         /* Return with lock released. */
244         return (0);
245 }
246
247 int
248 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
249 {
250         int res;
251
252         TQ_LOCK(queue);
253         res = taskqueue_enqueue_locked(queue, task);
254         /* The lock is released inside. */
255
256         return (res);
257 }
258
259 static void
260 taskqueue_timeout_func(void *arg)
261 {
262         struct taskqueue *queue;
263         struct timeout_task *timeout_task;
264
265         timeout_task = arg;
266         queue = timeout_task->q;
267         KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
268         timeout_task->f &= ~DT_CALLOUT_ARMED;
269         queue->tq_callouts--;
270         taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
271         /* The lock is released inside. */
272 }
273
274 int
275 taskqueue_enqueue_timeout(struct taskqueue *queue,
276     struct timeout_task *timeout_task, int ticks)
277 {
278         int res;
279
280         TQ_LOCK(queue);
281         KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
282             ("Migrated queue"));
283         KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
284         timeout_task->q = queue;
285         res = timeout_task->t.ta_pending;
286         if (ticks == 0) {
287                 taskqueue_enqueue_locked(queue, &timeout_task->t);
288                 /* The lock is released inside. */
289         } else {
290                 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
291                         res++;
292                 } else {
293                         queue->tq_callouts++;
294                         timeout_task->f |= DT_CALLOUT_ARMED;
295                         if (ticks < 0)
296                                 ticks = -ticks; /* Ignore overflow. */
297                 }
298                 if (ticks > 0) {
299                         callout_reset(&timeout_task->c, ticks,
300                             taskqueue_timeout_func, timeout_task);
301                 }
302                 TQ_UNLOCK(queue);
303         }
304         return (res);
305 }
306
307 static void
308 taskqueue_task_nop_fn(void *context, int pending)
309 {
310 }
311
312 /*
313  * Block until all currently queued tasks in this taskqueue
314  * have begun execution.  Tasks queued during execution of
315  * this function are ignored.
316  */
317 static void
318 taskqueue_drain_tq_queue(struct taskqueue *queue)
319 {
320         struct task t_barrier;
321
322         if (STAILQ_EMPTY(&queue->tq_queue))
323                 return;
324
325         /*
326          * Enqueue our barrier after all current tasks, but with
327          * the highest priority so that newly queued tasks cannot
328          * pass it.  Because of the high priority, we can not use
329          * taskqueue_enqueue_locked directly (which drops the lock
330          * anyway) so just insert it at tail while we have the
331          * queue lock.
332          */
333         TASK_INIT(&t_barrier, USHRT_MAX, taskqueue_task_nop_fn, &t_barrier);
334         STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
335         t_barrier.ta_pending = 1;
336
337         /*
338          * Once the barrier has executed, all previously queued tasks
339          * have completed or are currently executing.
340          */
341         while (t_barrier.ta_pending != 0)
342                 TQ_SLEEP(queue, &t_barrier, &queue->tq_mutex, PWAIT, "-", 0);
343 }
344
345 /*
346  * Block until all currently executing tasks for this taskqueue
347  * complete.  Tasks that begin execution during the execution
348  * of this function are ignored.
349  */
350 static void
351 taskqueue_drain_tq_active(struct taskqueue *queue)
352 {
353         struct taskqueue_busy tb_marker, *tb_first;
354
355         if (TAILQ_EMPTY(&queue->tq_active))
356                 return;
357
358         /* Block taskq_terminate().*/
359         queue->tq_callouts++;
360
361         /*
362          * Wait for all currently executing taskqueue threads
363          * to go idle.
364          */
365         tb_marker.tb_running = TB_DRAIN_WAITER;
366         TAILQ_INSERT_TAIL(&queue->tq_active, &tb_marker, tb_link);
367         while (TAILQ_FIRST(&queue->tq_active) != &tb_marker)
368                 TQ_SLEEP(queue, &tb_marker, &queue->tq_mutex, PWAIT, "-", 0);
369         TAILQ_REMOVE(&queue->tq_active, &tb_marker, tb_link);
370
371         /*
372          * Wakeup any other drain waiter that happened to queue up
373          * without any intervening active thread.
374          */
375         tb_first = TAILQ_FIRST(&queue->tq_active);
376         if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER)
377                 wakeup(tb_first);
378
379         /* Release taskqueue_terminate(). */
380         queue->tq_callouts--;
381         if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
382                 wakeup_one(queue->tq_threads);
383 }
384
385 void
386 taskqueue_block(struct taskqueue *queue)
387 {
388
389         TQ_LOCK(queue);
390         queue->tq_flags |= TQ_FLAGS_BLOCKED;
391         TQ_UNLOCK(queue);
392 }
393
394 void
395 taskqueue_unblock(struct taskqueue *queue)
396 {
397
398         TQ_LOCK(queue);
399         queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
400         if (!STAILQ_EMPTY(&queue->tq_queue))
401                 queue->tq_enqueue(queue->tq_context);
402         TQ_UNLOCK(queue);
403 }
404
405 static void
406 taskqueue_run_locked(struct taskqueue *queue)
407 {
408         struct taskqueue_busy tb;
409         struct taskqueue_busy *tb_first;
410         struct task *task;
411         int pending;
412
413         TQ_ASSERT_LOCKED(queue);
414         tb.tb_running = NULL;
415
416         while (STAILQ_FIRST(&queue->tq_queue)) {
417                 TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
418
419                 /*
420                  * Carefully remove the first task from the queue and
421                  * zero its pending count.
422                  */
423                 task = STAILQ_FIRST(&queue->tq_queue);
424                 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
425                 pending = task->ta_pending;
426                 task->ta_pending = 0;
427                 tb.tb_running = task;
428                 TQ_UNLOCK(queue);
429
430                 task->ta_func(task->ta_context, pending);
431
432                 TQ_LOCK(queue);
433                 tb.tb_running = NULL;
434                 wakeup(task);
435
436                 TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
437                 tb_first = TAILQ_FIRST(&queue->tq_active);
438                 if (tb_first != NULL &&
439                     tb_first->tb_running == TB_DRAIN_WAITER)
440                         wakeup(tb_first);
441         }
442 }
443
444 void
445 taskqueue_run(struct taskqueue *queue)
446 {
447
448         TQ_LOCK(queue);
449         taskqueue_run_locked(queue);
450         TQ_UNLOCK(queue);
451 }
452
453 static int
454 task_is_running(struct taskqueue *queue, struct task *task)
455 {
456         struct taskqueue_busy *tb;
457
458         TQ_ASSERT_LOCKED(queue);
459         TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
460                 if (tb->tb_running == task)
461                         return (1);
462         }
463         return (0);
464 }
465
466 static int
467 taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
468     u_int *pendp)
469 {
470
471         if (task->ta_pending > 0)
472                 STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
473         if (pendp != NULL)
474                 *pendp = task->ta_pending;
475         task->ta_pending = 0;
476         return (task_is_running(queue, task) ? EBUSY : 0);
477 }
478
479 int
480 taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
481 {
482         int error;
483
484         TQ_LOCK(queue);
485         error = taskqueue_cancel_locked(queue, task, pendp);
486         TQ_UNLOCK(queue);
487
488         return (error);
489 }
490
491 int
492 taskqueue_cancel_timeout(struct taskqueue *queue,
493     struct timeout_task *timeout_task, u_int *pendp)
494 {
495         u_int pending, pending1;
496         int error;
497
498         TQ_LOCK(queue);
499         pending = !!callout_stop(&timeout_task->c);
500         error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
501         if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
502                 timeout_task->f &= ~DT_CALLOUT_ARMED;
503                 queue->tq_callouts--;
504         }
505         TQ_UNLOCK(queue);
506
507         if (pendp != NULL)
508                 *pendp = pending + pending1;
509         return (error);
510 }
511
512 void
513 taskqueue_drain(struct taskqueue *queue, struct task *task)
514 {
515
516         if (!queue->tq_spin)
517                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
518
519         TQ_LOCK(queue);
520         while (task->ta_pending != 0 || task_is_running(queue, task))
521                 TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
522         TQ_UNLOCK(queue);
523 }
524
525 void
526 taskqueue_drain_all(struct taskqueue *queue)
527 {
528
529         if (!queue->tq_spin)
530                 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
531
532         TQ_LOCK(queue);
533         taskqueue_drain_tq_queue(queue);
534         taskqueue_drain_tq_active(queue);
535         TQ_UNLOCK(queue);
536 }
537
538 void
539 taskqueue_drain_timeout(struct taskqueue *queue,
540     struct timeout_task *timeout_task)
541 {
542
543         callout_drain(&timeout_task->c);
544         taskqueue_drain(queue, &timeout_task->t);
545 }
546
547 static void
548 taskqueue_swi_enqueue(void *context)
549 {
550         swi_sched(taskqueue_ih, 0);
551 }
552
553 static void
554 taskqueue_swi_run(void *dummy)
555 {
556         taskqueue_run(taskqueue_swi);
557 }
558
559 static void
560 taskqueue_swi_giant_enqueue(void *context)
561 {
562         swi_sched(taskqueue_giant_ih, 0);
563 }
564
565 static void
566 taskqueue_swi_giant_run(void *dummy)
567 {
568         taskqueue_run(taskqueue_swi_giant);
569 }
570
571 static int
572 _taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
573     cpuset_t *mask, const char *name, va_list ap)
574 {
575         char ktname[MAXCOMLEN + 1];
576         struct thread *td;
577         struct taskqueue *tq;
578         int i, error;
579
580         if (count <= 0)
581                 return (EINVAL);
582
583         vsnprintf(ktname, sizeof(ktname), name, ap);
584         tq = *tqp;
585
586         tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
587             M_NOWAIT | M_ZERO);
588         if (tq->tq_threads == NULL) {
589                 printf("%s: no memory for %s threads\n", __func__, ktname);
590                 return (ENOMEM);
591         }
592
593         for (i = 0; i < count; i++) {
594                 if (count == 1)
595                         error = kthread_add(taskqueue_thread_loop, tqp, NULL,
596                             &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
597                 else
598                         error = kthread_add(taskqueue_thread_loop, tqp, NULL,
599                             &tq->tq_threads[i], RFSTOPPED, 0,
600                             "%s_%d", ktname, i);
601                 if (error) {
602                         /* should be ok to continue, taskqueue_free will dtrt */
603                         printf("%s: kthread_add(%s): error %d", __func__,
604                             ktname, error);
605                         tq->tq_threads[i] = NULL;               /* paranoid */
606                 } else
607                         tq->tq_tcount++;
608         }
609         for (i = 0; i < count; i++) {
610                 if (tq->tq_threads[i] == NULL)
611                         continue;
612                 td = tq->tq_threads[i];
613                 if (mask) {
614                         error = cpuset_setthread(td->td_tid, mask);
615                         /*
616                          * Failing to pin is rarely an actual fatal error;
617                          * it'll just affect performance.
618                          */
619                         if (error)
620                                 printf("%s: curthread=%llu: can't pin; "
621                                     "error=%d\n",
622                                     __func__,
623                                     (unsigned long long) td->td_tid,
624                                     error);
625                 }
626                 thread_lock(td);
627                 sched_prio(td, pri);
628                 sched_add(td, SRQ_BORING);
629                 thread_unlock(td);
630         }
631
632         return (0);
633 }
634
635 int
636 taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
637     const char *name, ...)
638 {
639         va_list ap;
640         int error;
641
642         va_start(ap, name);
643         error = _taskqueue_start_threads(tqp, count, pri, NULL, name, ap);
644         va_end(ap);
645         return (error);
646 }
647
648 int
649 taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri,
650     cpuset_t *mask, const char *name, ...)
651 {
652         va_list ap;
653         int error;
654
655         va_start(ap, name);
656         error = _taskqueue_start_threads(tqp, count, pri, mask, name, ap);
657         va_end(ap);
658         return (error);
659 }
660
661 static inline void
662 taskqueue_run_callback(struct taskqueue *tq,
663     enum taskqueue_callback_type cb_type)
664 {
665         taskqueue_callback_fn tq_callback;
666
667         TQ_ASSERT_UNLOCKED(tq);
668         tq_callback = tq->tq_callbacks[cb_type];
669         if (tq_callback != NULL)
670                 tq_callback(tq->tq_cb_contexts[cb_type]);
671 }
672
673 void
674 taskqueue_thread_loop(void *arg)
675 {
676         struct taskqueue **tqp, *tq;
677
678         tqp = arg;
679         tq = *tqp;
680         taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
681         TQ_LOCK(tq);
682         while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
683                 taskqueue_run_locked(tq);
684                 /*
685                  * Because taskqueue_run() can drop tq_mutex, we need to
686                  * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
687                  * meantime, which means we missed a wakeup.
688                  */
689                 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
690                         break;
691                 TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
692         }
693         taskqueue_run_locked(tq);
694
695         /*
696          * This thread is on its way out, so just drop the lock temporarily
697          * in order to call the shutdown callback.  This allows the callback
698          * to look at the taskqueue, even just before it dies.
699          */
700         TQ_UNLOCK(tq);
701         taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
702         TQ_LOCK(tq);
703
704         /* rendezvous with thread that asked us to terminate */
705         tq->tq_tcount--;
706         wakeup_one(tq->tq_threads);
707         TQ_UNLOCK(tq);
708         kthread_exit();
709 }
710
711 void
712 taskqueue_thread_enqueue(void *context)
713 {
714         struct taskqueue **tqp, *tq;
715
716         tqp = context;
717         tq = *tqp;
718
719         wakeup_one(tq);
720 }
721
722 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
723                  swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
724                      INTR_MPSAFE, &taskqueue_ih));
725
726 TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
727                  swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
728                      NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
729
730 TASKQUEUE_DEFINE_THREAD(thread);
731
732 struct taskqueue *
733 taskqueue_create_fast(const char *name, int mflags,
734                  taskqueue_enqueue_fn enqueue, void *context)
735 {
736         return _taskqueue_create(name, mflags, enqueue, context,
737                         MTX_SPIN, "fast_taskqueue");
738 }
739
740 /* NB: for backwards compatibility */
741 int
742 taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
743 {
744         return taskqueue_enqueue(queue, task);
745 }
746
747 static void     *taskqueue_fast_ih;
748
749 static void
750 taskqueue_fast_enqueue(void *context)
751 {
752         swi_sched(taskqueue_fast_ih, 0);
753 }
754
755 static void
756 taskqueue_fast_run(void *dummy)
757 {
758         taskqueue_run(taskqueue_fast);
759 }
760
761 TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
762         swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL,
763         SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
764
765 int
766 taskqueue_member(struct taskqueue *queue, struct thread *td)
767 {
768         int i, j, ret = 0;
769
770         for (i = 0, j = 0; ; i++) {
771                 if (queue->tq_threads[i] == NULL)
772                         continue;
773                 if (queue->tq_threads[i] == td) {
774                         ret = 1;
775                         break;
776                 }
777                 if (++j >= queue->tq_tcount)
778                         break;
779         }
780         return (ret);
781 }