2 * Copyright (c) 2000 Doug Rabson
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
33 #include <sys/cpuset.h>
34 #include <sys/interrupt.h>
35 #include <sys/kernel.h>
36 #include <sys/kthread.h>
37 #include <sys/libkern.h>
38 #include <sys/limits.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
43 #include <sys/sched.h>
45 #include <sys/taskqueue.h>
46 #include <sys/unistd.h>
47 #include <machine/stdarg.h>
49 static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
50 static void *taskqueue_giant_ih;
51 static void *taskqueue_ih;
52 static void taskqueue_fast_enqueue(void *);
53 static void taskqueue_swi_enqueue(void *);
54 static void taskqueue_swi_giant_enqueue(void *);
56 struct taskqueue_busy {
57 struct task *tb_running;
58 TAILQ_ENTRY(taskqueue_busy) tb_link;
61 struct task * const TB_DRAIN_WAITER = (struct task *)0x1;
64 STAILQ_HEAD(, task) tq_queue;
65 taskqueue_enqueue_fn tq_enqueue;
68 TAILQ_HEAD(, taskqueue_busy) tq_active;
70 struct thread **tq_threads;
75 taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
76 void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
79 #define TQ_FLAGS_ACTIVE (1 << 0)
80 #define TQ_FLAGS_BLOCKED (1 << 1)
81 #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2)
83 #define DT_CALLOUT_ARMED (1 << 0)
88 mtx_lock_spin(&(tq)->tq_mutex); \
90 mtx_lock(&(tq)->tq_mutex); \
92 #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED)
94 #define TQ_UNLOCK(tq) \
97 mtx_unlock_spin(&(tq)->tq_mutex); \
99 mtx_unlock(&(tq)->tq_mutex); \
101 #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
104 _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
105 int priority, task_fn_t func, void *context)
108 TASK_INIT(&timeout_task->t, priority, func, context);
109 callout_init_mtx(&timeout_task->c, &queue->tq_mutex,
110 CALLOUT_RETURNUNLOCKED);
111 timeout_task->q = queue;
116 TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
120 return (msleep_spin(p, m, wm, t));
121 return (msleep(p, m, pri, wm, t));
124 static struct taskqueue *
125 _taskqueue_create(const char *name, int mflags,
126 taskqueue_enqueue_fn enqueue, void *context,
127 int mtxflags, const char *mtxname __unused)
129 struct taskqueue *queue;
132 tq_name = malloc(TASKQUEUE_NAMELEN, M_TASKQUEUE, mflags | M_ZERO);
136 snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue");
138 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
142 STAILQ_INIT(&queue->tq_queue);
143 TAILQ_INIT(&queue->tq_active);
144 queue->tq_enqueue = enqueue;
145 queue->tq_context = context;
146 queue->tq_name = tq_name;
147 queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
148 queue->tq_flags |= TQ_FLAGS_ACTIVE;
149 if (enqueue == taskqueue_fast_enqueue ||
150 enqueue == taskqueue_swi_enqueue ||
151 enqueue == taskqueue_swi_giant_enqueue ||
152 enqueue == taskqueue_thread_enqueue)
153 queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
154 mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags);
160 taskqueue_create(const char *name, int mflags,
161 taskqueue_enqueue_fn enqueue, void *context)
164 return _taskqueue_create(name, mflags, enqueue, context,
169 taskqueue_set_callback(struct taskqueue *queue,
170 enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback,
174 KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) &&
175 (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)),
176 ("Callback type %d not valid, must be %d-%d", cb_type,
177 TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX));
178 KASSERT((queue->tq_callbacks[cb_type] == NULL),
179 ("Re-initialization of taskqueue callback?"));
181 queue->tq_callbacks[cb_type] = callback;
182 queue->tq_cb_contexts[cb_type] = context;
186 * Signal a taskqueue thread to terminate.
189 taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
192 while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
194 TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
199 taskqueue_free(struct taskqueue *queue)
203 queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
204 taskqueue_terminate(queue->tq_threads, queue);
205 KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
206 KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
207 mtx_destroy(&queue->tq_mutex);
208 free(queue->tq_threads, M_TASKQUEUE);
209 free(queue->tq_name, M_TASKQUEUE);
210 free(queue, M_TASKQUEUE);
214 taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task)
219 KASSERT(task->ta_func != NULL, ("enqueueing task with NULL func"));
221 * Count multiple enqueues.
223 if (task->ta_pending) {
224 if (task->ta_pending < USHRT_MAX)
231 * Optimise the case when all tasks have the same priority.
233 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
234 if (!prev || prev->ta_priority >= task->ta_priority) {
235 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
238 for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
239 prev = ins, ins = STAILQ_NEXT(ins, ta_link))
240 if (ins->ta_priority < task->ta_priority)
244 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
246 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
249 task->ta_pending = 1;
250 if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0)
252 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
253 queue->tq_enqueue(queue->tq_context);
254 if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0)
257 /* Return with lock released. */
262 grouptaskqueue_enqueue(struct taskqueue *queue, struct task *task)
265 if (task->ta_pending) {
269 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
270 task->ta_pending = 1;
272 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
273 queue->tq_enqueue(queue->tq_context);
278 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
283 res = taskqueue_enqueue_locked(queue, task);
284 /* The lock is released inside. */
290 taskqueue_timeout_func(void *arg)
292 struct taskqueue *queue;
293 struct timeout_task *timeout_task;
296 queue = timeout_task->q;
297 KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
298 timeout_task->f &= ~DT_CALLOUT_ARMED;
299 queue->tq_callouts--;
300 taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t);
301 /* The lock is released inside. */
305 taskqueue_enqueue_timeout(struct taskqueue *queue,
306 struct timeout_task *timeout_task, int ticks)
311 KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
313 KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
314 timeout_task->q = queue;
315 res = timeout_task->t.ta_pending;
317 taskqueue_enqueue_locked(queue, &timeout_task->t);
318 /* The lock is released inside. */
320 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
323 queue->tq_callouts++;
324 timeout_task->f |= DT_CALLOUT_ARMED;
326 ticks = -ticks; /* Ignore overflow. */
329 callout_reset(&timeout_task->c, ticks,
330 taskqueue_timeout_func, timeout_task);
338 taskqueue_task_nop_fn(void *context, int pending)
343 * Block until all currently queued tasks in this taskqueue
344 * have begun execution. Tasks queued during execution of
345 * this function are ignored.
348 taskqueue_drain_tq_queue(struct taskqueue *queue)
350 struct task t_barrier;
352 if (STAILQ_EMPTY(&queue->tq_queue))
356 * Enqueue our barrier after all current tasks, but with
357 * the highest priority so that newly queued tasks cannot
358 * pass it. Because of the high priority, we can not use
359 * taskqueue_enqueue_locked directly (which drops the lock
360 * anyway) so just insert it at tail while we have the
363 TASK_INIT(&t_barrier, USHRT_MAX, taskqueue_task_nop_fn, &t_barrier);
364 STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
365 t_barrier.ta_pending = 1;
368 * Once the barrier has executed, all previously queued tasks
369 * have completed or are currently executing.
371 while (t_barrier.ta_pending != 0)
372 TQ_SLEEP(queue, &t_barrier, &queue->tq_mutex, PWAIT, "-", 0);
376 * Block until all currently executing tasks for this taskqueue
377 * complete. Tasks that begin execution during the execution
378 * of this function are ignored.
381 taskqueue_drain_tq_active(struct taskqueue *queue)
383 struct taskqueue_busy tb_marker, *tb_first;
385 if (TAILQ_EMPTY(&queue->tq_active))
388 /* Block taskq_terminate().*/
389 queue->tq_callouts++;
392 * Wait for all currently executing taskqueue threads
395 tb_marker.tb_running = TB_DRAIN_WAITER;
396 TAILQ_INSERT_TAIL(&queue->tq_active, &tb_marker, tb_link);
397 while (TAILQ_FIRST(&queue->tq_active) != &tb_marker)
398 TQ_SLEEP(queue, &tb_marker, &queue->tq_mutex, PWAIT, "-", 0);
399 TAILQ_REMOVE(&queue->tq_active, &tb_marker, tb_link);
402 * Wakeup any other drain waiter that happened to queue up
403 * without any intervening active thread.
405 tb_first = TAILQ_FIRST(&queue->tq_active);
406 if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER)
409 /* Release taskqueue_terminate(). */
410 queue->tq_callouts--;
411 if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
412 wakeup_one(queue->tq_threads);
416 taskqueue_block(struct taskqueue *queue)
420 queue->tq_flags |= TQ_FLAGS_BLOCKED;
425 taskqueue_unblock(struct taskqueue *queue)
429 queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
430 if (!STAILQ_EMPTY(&queue->tq_queue))
431 queue->tq_enqueue(queue->tq_context);
436 taskqueue_run_locked(struct taskqueue *queue)
438 struct taskqueue_busy tb;
439 struct taskqueue_busy *tb_first;
443 KASSERT(queue != NULL, ("tq is NULL"));
444 TQ_ASSERT_LOCKED(queue);
445 tb.tb_running = NULL;
447 while (STAILQ_FIRST(&queue->tq_queue)) {
448 TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
451 * Carefully remove the first task from the queue and
452 * zero its pending count.
454 task = STAILQ_FIRST(&queue->tq_queue);
455 KASSERT(task != NULL, ("task is NULL"));
456 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
457 pending = task->ta_pending;
458 task->ta_pending = 0;
459 tb.tb_running = task;
462 KASSERT(task->ta_func != NULL, ("task->ta_func is NULL"));
463 task->ta_func(task->ta_context, pending);
466 tb.tb_running = NULL;
469 TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
470 tb_first = TAILQ_FIRST(&queue->tq_active);
471 if (tb_first != NULL &&
472 tb_first->tb_running == TB_DRAIN_WAITER)
478 taskqueue_run(struct taskqueue *queue)
482 taskqueue_run_locked(queue);
487 task_is_running(struct taskqueue *queue, struct task *task)
489 struct taskqueue_busy *tb;
491 TQ_ASSERT_LOCKED(queue);
492 TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
493 if (tb->tb_running == task)
500 taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
504 if (task->ta_pending > 0)
505 STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
507 *pendp = task->ta_pending;
508 task->ta_pending = 0;
509 return (task_is_running(queue, task) ? EBUSY : 0);
513 taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
518 error = taskqueue_cancel_locked(queue, task, pendp);
525 taskqueue_cancel_timeout(struct taskqueue *queue,
526 struct timeout_task *timeout_task, u_int *pendp)
528 u_int pending, pending1;
532 pending = !!(callout_stop(&timeout_task->c) > 0);
533 error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
534 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
535 timeout_task->f &= ~DT_CALLOUT_ARMED;
536 queue->tq_callouts--;
541 *pendp = pending + pending1;
546 taskqueue_drain(struct taskqueue *queue, struct task *task)
550 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
553 while (task->ta_pending != 0 || task_is_running(queue, task))
554 TQ_SLEEP(queue, task, &queue->tq_mutex, PWAIT, "-", 0);
559 taskqueue_drain_all(struct taskqueue *queue)
563 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
566 taskqueue_drain_tq_queue(queue);
567 taskqueue_drain_tq_active(queue);
572 taskqueue_drain_timeout(struct taskqueue *queue,
573 struct timeout_task *timeout_task)
576 callout_drain(&timeout_task->c);
577 taskqueue_drain(queue, &timeout_task->t);
581 taskqueue_swi_enqueue(void *context)
583 swi_sched(taskqueue_ih, 0);
587 taskqueue_swi_run(void *dummy)
589 taskqueue_run(taskqueue_swi);
593 taskqueue_swi_giant_enqueue(void *context)
595 swi_sched(taskqueue_giant_ih, 0);
599 taskqueue_swi_giant_run(void *dummy)
601 taskqueue_run(taskqueue_swi_giant);
605 _taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
606 cpuset_t *mask, const char *name, va_list ap)
608 char ktname[MAXCOMLEN + 1];
610 struct taskqueue *tq;
616 vsnprintf(ktname, sizeof(ktname), name, ap);
619 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
621 if (tq->tq_threads == NULL) {
622 printf("%s: no memory for %s threads\n", __func__, ktname);
626 for (i = 0; i < count; i++) {
628 error = kthread_add(taskqueue_thread_loop, tqp, NULL,
629 &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
631 error = kthread_add(taskqueue_thread_loop, tqp, NULL,
632 &tq->tq_threads[i], RFSTOPPED, 0,
635 /* should be ok to continue, taskqueue_free will dtrt */
636 printf("%s: kthread_add(%s): error %d", __func__,
638 tq->tq_threads[i] = NULL; /* paranoid */
642 for (i = 0; i < count; i++) {
643 if (tq->tq_threads[i] == NULL)
645 td = tq->tq_threads[i];
647 error = cpuset_setthread(td->td_tid, mask);
649 * Failing to pin is rarely an actual fatal error;
650 * it'll just affect performance.
653 printf("%s: curthread=%llu: can't pin; "
656 (unsigned long long) td->td_tid,
661 sched_add(td, SRQ_BORING);
669 taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
670 const char *name, ...)
676 error = _taskqueue_start_threads(tqp, count, pri, NULL, name, ap);
682 taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri,
683 cpuset_t *mask, const char *name, ...)
689 error = _taskqueue_start_threads(tqp, count, pri, mask, name, ap);
695 taskqueue_run_callback(struct taskqueue *tq,
696 enum taskqueue_callback_type cb_type)
698 taskqueue_callback_fn tq_callback;
700 TQ_ASSERT_UNLOCKED(tq);
701 tq_callback = tq->tq_callbacks[cb_type];
702 if (tq_callback != NULL)
703 tq_callback(tq->tq_cb_contexts[cb_type]);
707 taskqueue_thread_loop(void *arg)
709 struct taskqueue **tqp, *tq;
713 taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
715 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
717 taskqueue_run_locked(tq);
719 * Because taskqueue_run() can drop tq_mutex, we need to
720 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
721 * meantime, which means we missed a wakeup.
723 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
725 TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
727 taskqueue_run_locked(tq);
729 * This thread is on its way out, so just drop the lock temporarily
730 * in order to call the shutdown callback. This allows the callback
731 * to look at the taskqueue, even just before it dies.
734 taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
737 /* rendezvous with thread that asked us to terminate */
739 wakeup_one(tq->tq_threads);
745 taskqueue_thread_enqueue(void *context)
747 struct taskqueue **tqp, *tq;
754 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
755 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
756 INTR_MPSAFE, &taskqueue_ih));
758 TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
759 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
760 NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
762 TASKQUEUE_DEFINE_THREAD(thread);
765 taskqueue_create_fast(const char *name, int mflags,
766 taskqueue_enqueue_fn enqueue, void *context)
768 return _taskqueue_create(name, mflags, enqueue, context,
769 MTX_SPIN, "fast_taskqueue");
772 static void *taskqueue_fast_ih;
775 taskqueue_fast_enqueue(void *context)
777 swi_sched(taskqueue_fast_ih, 0);
781 taskqueue_fast_run(void *dummy)
783 taskqueue_run(taskqueue_fast);
786 TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
787 swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL,
788 SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
791 taskqueue_member(struct taskqueue *queue, struct thread *td)
795 for (i = 0, j = 0; ; i++) {
796 if (queue->tq_threads[i] == NULL)
798 if (queue->tq_threads[i] == td) {
802 if (++j >= queue->tq_tcount)
808 struct taskqgroup_cpu {
809 LIST_HEAD(, grouptask) tgc_tasks;
810 struct taskqueue *tgc_taskq;
816 struct taskqgroup_cpu tqg_queue[MAXCPU];
824 struct taskq_bind_task {
830 taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx)
832 struct taskqgroup_cpu *qcpu;
834 qcpu = &qgroup->tqg_queue[idx];
835 LIST_INIT(&qcpu->tgc_tasks);
836 qcpu->tgc_taskq = taskqueue_create_fast(NULL, M_WAITOK,
837 taskqueue_thread_enqueue, &qcpu->tgc_taskq);
838 taskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT,
839 "%s_%d", qgroup->tqg_name, idx);
840 qcpu->tgc_cpu = idx * qgroup->tqg_stride;
844 taskqgroup_cpu_remove(struct taskqgroup *qgroup, int idx)
847 taskqueue_free(qgroup->tqg_queue[idx].tgc_taskq);
851 * Find the taskq with least # of tasks that doesn't currently have any
852 * other queues from the uniq identifier.
855 taskqgroup_find(struct taskqgroup *qgroup, void *uniq)
861 mtx_assert(&qgroup->tqg_lock, MA_OWNED);
862 if (qgroup->tqg_cnt == 0)
867 * Two passes; First scan for a queue with the least tasks that
868 * does not already service this uniq id. If that fails simply find
869 * the queue with the least total tasks;
871 for (strict = 1; mincnt == INT_MAX; strict = 0) {
872 for (i = 0; i < qgroup->tqg_cnt; i++) {
873 if (qgroup->tqg_queue[i].tgc_cnt > mincnt)
877 &qgroup->tqg_queue[i].tgc_tasks, gt_list)
878 if (n->gt_uniq == uniq)
883 mincnt = qgroup->tqg_queue[i].tgc_cnt;
888 panic("taskqgroup_find: Failed to pick a qid.");
894 taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
895 void *uniq, int irq, char *name)
900 gtask->gt_uniq = uniq;
901 gtask->gt_name = name;
904 mtx_lock(&qgroup->tqg_lock);
905 qid = taskqgroup_find(qgroup, uniq);
906 qgroup->tqg_queue[qid].tgc_cnt++;
907 LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
908 gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
909 if (irq != -1 && smp_started) {
911 CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
912 mtx_unlock(&qgroup->tqg_lock);
913 intr_setaffinity(irq, &mask);
915 mtx_unlock(&qgroup->tqg_lock);
919 taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
920 void *uniq, int cpu, int irq, char *name)
926 gtask->gt_uniq = uniq;
927 gtask->gt_name = name;
930 mtx_lock(&qgroup->tqg_lock);
932 for (i = 0; i < qgroup->tqg_cnt; i++)
933 if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
938 mtx_unlock(&qgroup->tqg_lock);
943 qgroup->tqg_queue[qid].tgc_cnt++;
944 LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
945 gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
946 if (irq != -1 && smp_started) {
948 CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
949 mtx_unlock(&qgroup->tqg_lock);
950 intr_setaffinity(irq, &mask);
952 mtx_unlock(&qgroup->tqg_lock);
957 taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask)
961 mtx_lock(&qgroup->tqg_lock);
962 for (i = 0; i < qgroup->tqg_cnt; i++)
963 if (qgroup->tqg_queue[i].tgc_taskq == gtask->gt_taskqueue)
965 if (i == qgroup->tqg_cnt)
966 panic("taskqgroup_detach: task not in group\n");
967 qgroup->tqg_queue[i].tgc_cnt--;
968 LIST_REMOVE(gtask, gt_list);
969 mtx_unlock(&qgroup->tqg_lock);
970 gtask->gt_taskqueue = NULL;
974 taskqgroup_binder(void *ctx, int pending)
976 struct taskq_bind_task *task = (struct taskq_bind_task *)ctx;
981 CPU_SET(task->bt_cpuid, &mask);
982 error = cpuset_setthread(curthread->td_tid, &mask);
983 thread_lock(curthread);
984 sched_bind(curthread, task->bt_cpuid);
985 thread_unlock(curthread);
988 printf("taskqgroup_binder: setaffinity failed: %d\n",
990 free(task, M_DEVBUF);
994 taskqgroup_bind(struct taskqgroup *qgroup)
996 struct taskq_bind_task *task;
1000 * Bind taskqueue threads to specific CPUs, if they have been assigned
1003 for (i = 0; i < qgroup->tqg_cnt; i++) {
1004 task = malloc(sizeof (*task), M_DEVBUF, M_NOWAIT);
1005 TASK_INIT(&task->bt_task, 0, taskqgroup_binder, task);
1006 task->bt_cpuid = qgroup->tqg_queue[i].tgc_cpu;
1007 taskqueue_enqueue(qgroup->tqg_queue[i].tgc_taskq,
1013 _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
1015 LIST_HEAD(, grouptask) gtask_head = LIST_HEAD_INITIALIZER(NULL);
1017 struct grouptask *gtask;
1018 int i, old_cnt, qid;
1020 mtx_assert(&qgroup->tqg_lock, MA_OWNED);
1022 if (cnt < 1 || cnt * stride > mp_ncpus || !smp_started) {
1023 printf("taskqgroup_adjust failed cnt: %d stride: %d mp_ncpus: %d smp_started: %d\n",
1024 cnt, stride, mp_ncpus, smp_started);
1027 if (qgroup->tqg_adjusting) {
1028 printf("taskqgroup_adjust failed: adjusting\n");
1031 qgroup->tqg_adjusting = 1;
1032 old_cnt = qgroup->tqg_cnt;
1033 mtx_unlock(&qgroup->tqg_lock);
1035 * Set up queue for tasks added before boot.
1038 LIST_SWAP(>ask_head, &qgroup->tqg_queue[0].tgc_tasks,
1039 grouptask, gt_list);
1040 qgroup->tqg_queue[0].tgc_cnt = 0;
1044 * If new taskq threads have been added.
1046 for (i = old_cnt; i < cnt; i++)
1047 taskqgroup_cpu_create(qgroup, i);
1048 mtx_lock(&qgroup->tqg_lock);
1049 qgroup->tqg_cnt = cnt;
1050 qgroup->tqg_stride = stride;
1053 * Adjust drivers to use new taskqs.
1055 for (i = 0; i < old_cnt; i++) {
1056 while ((gtask = LIST_FIRST(&qgroup->tqg_queue[i].tgc_tasks))) {
1057 LIST_REMOVE(gtask, gt_list);
1058 qgroup->tqg_queue[i].tgc_cnt--;
1059 LIST_INSERT_HEAD(>ask_head, gtask, gt_list);
1063 while ((gtask = LIST_FIRST(>ask_head))) {
1064 LIST_REMOVE(gtask, gt_list);
1065 if (gtask->gt_cpu == -1)
1066 qid = taskqgroup_find(qgroup, gtask->gt_uniq);
1068 for (i = 0; i < qgroup->tqg_cnt; i++)
1069 if (qgroup->tqg_queue[i].tgc_cpu == gtask->gt_cpu) {
1074 qgroup->tqg_queue[qid].tgc_cnt++;
1075 LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask,
1077 gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
1080 * Set new CPU and IRQ affinity
1082 for (i = 0; i < cnt; i++) {
1083 qgroup->tqg_queue[i].tgc_cpu = i * qgroup->tqg_stride;
1085 CPU_SET(qgroup->tqg_queue[i].tgc_cpu, &mask);
1086 LIST_FOREACH(gtask, &qgroup->tqg_queue[i].tgc_tasks, gt_list) {
1087 if (gtask->gt_irq == -1)
1089 intr_setaffinity(gtask->gt_irq, &mask);
1092 mtx_unlock(&qgroup->tqg_lock);
1095 * If taskq thread count has been reduced.
1097 for (i = cnt; i < old_cnt; i++)
1098 taskqgroup_cpu_remove(qgroup, i);
1100 mtx_lock(&qgroup->tqg_lock);
1101 qgroup->tqg_adjusting = 0;
1103 taskqgroup_bind(qgroup);
1109 taskqgroup_adjust(struct taskqgroup *qgroup, int cpu, int stride)
1113 mtx_lock(&qgroup->tqg_lock);
1114 error = _taskqgroup_adjust(qgroup, cpu, stride);
1115 mtx_unlock(&qgroup->tqg_lock);
1121 taskqgroup_create(char *name)
1123 struct taskqgroup *qgroup;
1125 qgroup = malloc(sizeof(*qgroup), M_TASKQUEUE, M_WAITOK | M_ZERO);
1126 mtx_init(&qgroup->tqg_lock, "taskqgroup", NULL, MTX_DEF);
1127 qgroup->tqg_name = name;
1128 LIST_INIT(&qgroup->tqg_queue[0].tgc_tasks);
1134 taskqgroup_destroy(struct taskqgroup *qgroup)