2 * Copyright (c) 2000 Doug Rabson
3 * Copyright (c) 2014 Jeff Roberson
4 * Copyright (c) 2016 Matthew Macy
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
35 #include <sys/cpuset.h>
36 #include <sys/interrupt.h>
37 #include <sys/kernel.h>
38 #include <sys/kthread.h>
39 #include <sys/libkern.h>
40 #include <sys/limits.h>
42 #include <sys/malloc.h>
43 #include <sys/mutex.h>
45 #include <sys/sched.h>
47 #include <sys/gtaskqueue.h>
48 #include <sys/unistd.h>
49 #include <machine/stdarg.h>
51 static MALLOC_DEFINE(M_GTASKQUEUE, "gtaskqueue", "Group Task Queues");
52 static void gtaskqueue_thread_enqueue(void *);
53 static void gtaskqueue_thread_loop(void *arg);
55 TASKQGROUP_DEFINE(softirq, mp_ncpus, 1);
56 TASKQGROUP_DEFINE(config, 1, 1);
58 struct gtaskqueue_busy {
59 struct gtask *tb_running;
60 TAILQ_ENTRY(gtaskqueue_busy) tb_link;
63 static struct gtask * const TB_DRAIN_WAITER = (struct gtask *)0x1;
66 STAILQ_HEAD(, gtask) tq_queue;
67 gtaskqueue_enqueue_fn tq_enqueue;
70 TAILQ_HEAD(, gtaskqueue_busy) tq_active;
72 struct thread **tq_threads;
77 taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
78 void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
81 #define TQ_FLAGS_ACTIVE (1 << 0)
82 #define TQ_FLAGS_BLOCKED (1 << 1)
83 #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2)
85 #define DT_CALLOUT_ARMED (1 << 0)
90 mtx_lock_spin(&(tq)->tq_mutex); \
92 mtx_lock(&(tq)->tq_mutex); \
94 #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED)
96 #define TQ_UNLOCK(tq) \
99 mtx_unlock_spin(&(tq)->tq_mutex); \
101 mtx_unlock(&(tq)->tq_mutex); \
103 #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
107 gtask_dump(struct gtask *gtask)
109 printf("gtask: %p ta_flags=%x ta_priority=%d ta_func=%p ta_context=%p\n",
110 gtask, gtask->ta_flags, gtask->ta_priority, gtask->ta_func, gtask->ta_context);
115 TQ_SLEEP(struct gtaskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
119 return (msleep_spin(p, m, wm, t));
120 return (msleep(p, m, pri, wm, t));
123 static struct gtaskqueue *
124 _gtaskqueue_create(const char *name, int mflags,
125 taskqueue_enqueue_fn enqueue, void *context,
126 int mtxflags, const char *mtxname __unused)
128 struct gtaskqueue *queue;
131 tq_name = malloc(TASKQUEUE_NAMELEN, M_GTASKQUEUE, mflags | M_ZERO);
135 snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue");
137 queue = malloc(sizeof(struct gtaskqueue), M_GTASKQUEUE, mflags | M_ZERO);
139 free(tq_name, M_GTASKQUEUE);
143 STAILQ_INIT(&queue->tq_queue);
144 TAILQ_INIT(&queue->tq_active);
145 queue->tq_enqueue = enqueue;
146 queue->tq_context = context;
147 queue->tq_name = tq_name;
148 queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
149 queue->tq_flags |= TQ_FLAGS_ACTIVE;
150 if (enqueue == gtaskqueue_thread_enqueue)
151 queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
152 mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags);
159 * Signal a taskqueue thread to terminate.
162 gtaskqueue_terminate(struct thread **pp, struct gtaskqueue *tq)
165 while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
167 TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
172 gtaskqueue_free(struct gtaskqueue *queue)
176 queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
177 gtaskqueue_terminate(queue->tq_threads, queue);
178 KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
179 KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
180 mtx_destroy(&queue->tq_mutex);
181 free(queue->tq_threads, M_GTASKQUEUE);
182 free(queue->tq_name, M_GTASKQUEUE);
183 free(queue, M_GTASKQUEUE);
187 grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *gtask)
192 panic("queue == NULL");
196 if (gtask->ta_flags & TASK_ENQUEUED) {
200 STAILQ_INSERT_TAIL(&queue->tq_queue, gtask, ta_link);
201 gtask->ta_flags |= TASK_ENQUEUED;
203 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
204 queue->tq_enqueue(queue->tq_context);
209 gtaskqueue_task_nop_fn(void *context)
214 * Block until all currently queued tasks in this taskqueue
215 * have begun execution. Tasks queued during execution of
216 * this function are ignored.
219 gtaskqueue_drain_tq_queue(struct gtaskqueue *queue)
221 struct gtask t_barrier;
223 if (STAILQ_EMPTY(&queue->tq_queue))
227 * Enqueue our barrier after all current tasks, but with
228 * the highest priority so that newly queued tasks cannot
229 * pass it. Because of the high priority, we can not use
230 * taskqueue_enqueue_locked directly (which drops the lock
231 * anyway) so just insert it at tail while we have the
234 GTASK_INIT(&t_barrier, 0, USHRT_MAX, gtaskqueue_task_nop_fn, &t_barrier);
235 STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
236 t_barrier.ta_flags |= TASK_ENQUEUED;
239 * Once the barrier has executed, all previously queued tasks
240 * have completed or are currently executing.
242 while (t_barrier.ta_flags & TASK_ENQUEUED)
243 TQ_SLEEP(queue, &t_barrier, &queue->tq_mutex, PWAIT, "-", 0);
247 * Block until all currently executing tasks for this taskqueue
248 * complete. Tasks that begin execution during the execution
249 * of this function are ignored.
252 gtaskqueue_drain_tq_active(struct gtaskqueue *queue)
254 struct gtaskqueue_busy tb_marker, *tb_first;
256 if (TAILQ_EMPTY(&queue->tq_active))
259 /* Block taskq_terminate().*/
260 queue->tq_callouts++;
263 * Wait for all currently executing taskqueue threads
266 tb_marker.tb_running = TB_DRAIN_WAITER;
267 TAILQ_INSERT_TAIL(&queue->tq_active, &tb_marker, tb_link);
268 while (TAILQ_FIRST(&queue->tq_active) != &tb_marker)
269 TQ_SLEEP(queue, &tb_marker, &queue->tq_mutex, PWAIT, "-", 0);
270 TAILQ_REMOVE(&queue->tq_active, &tb_marker, tb_link);
273 * Wakeup any other drain waiter that happened to queue up
274 * without any intervening active thread.
276 tb_first = TAILQ_FIRST(&queue->tq_active);
277 if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER)
280 /* Release taskqueue_terminate(). */
281 queue->tq_callouts--;
282 if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
283 wakeup_one(queue->tq_threads);
287 gtaskqueue_block(struct gtaskqueue *queue)
291 queue->tq_flags |= TQ_FLAGS_BLOCKED;
296 gtaskqueue_unblock(struct gtaskqueue *queue)
300 queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
301 if (!STAILQ_EMPTY(&queue->tq_queue))
302 queue->tq_enqueue(queue->tq_context);
307 gtaskqueue_run_locked(struct gtaskqueue *queue)
309 struct gtaskqueue_busy tb;
310 struct gtaskqueue_busy *tb_first;
313 KASSERT(queue != NULL, ("tq is NULL"));
314 TQ_ASSERT_LOCKED(queue);
315 tb.tb_running = NULL;
317 while (STAILQ_FIRST(&queue->tq_queue)) {
318 TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
321 * Carefully remove the first task from the queue and
322 * clear its TASK_ENQUEUED flag
324 gtask = STAILQ_FIRST(&queue->tq_queue);
325 KASSERT(gtask != NULL, ("task is NULL"));
326 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
327 gtask->ta_flags &= ~TASK_ENQUEUED;
328 tb.tb_running = gtask;
331 KASSERT(gtask->ta_func != NULL, ("task->ta_func is NULL"));
332 gtask->ta_func(gtask->ta_context);
335 tb.tb_running = NULL;
338 TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
339 tb_first = TAILQ_FIRST(&queue->tq_active);
340 if (tb_first != NULL &&
341 tb_first->tb_running == TB_DRAIN_WAITER)
347 task_is_running(struct gtaskqueue *queue, struct gtask *gtask)
349 struct gtaskqueue_busy *tb;
351 TQ_ASSERT_LOCKED(queue);
352 TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
353 if (tb->tb_running == gtask)
360 gtaskqueue_cancel_locked(struct gtaskqueue *queue, struct gtask *gtask)
363 if (gtask->ta_flags & TASK_ENQUEUED)
364 STAILQ_REMOVE(&queue->tq_queue, gtask, gtask, ta_link);
365 gtask->ta_flags &= ~TASK_ENQUEUED;
366 return (task_is_running(queue, gtask) ? EBUSY : 0);
370 gtaskqueue_cancel(struct gtaskqueue *queue, struct gtask *gtask)
375 error = gtaskqueue_cancel_locked(queue, gtask);
382 gtaskqueue_drain(struct gtaskqueue *queue, struct gtask *gtask)
386 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
389 while ((gtask->ta_flags & TASK_ENQUEUED) || task_is_running(queue, gtask))
390 TQ_SLEEP(queue, gtask, &queue->tq_mutex, PWAIT, "-", 0);
395 gtaskqueue_drain_all(struct gtaskqueue *queue)
399 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
402 gtaskqueue_drain_tq_queue(queue);
403 gtaskqueue_drain_tq_active(queue);
408 _gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
409 cpuset_t *mask, const char *name, va_list ap)
411 char ktname[MAXCOMLEN + 1];
413 struct gtaskqueue *tq;
419 vsnprintf(ktname, sizeof(ktname), name, ap);
422 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_GTASKQUEUE,
424 if (tq->tq_threads == NULL) {
425 printf("%s: no memory for %s threads\n", __func__, ktname);
429 for (i = 0; i < count; i++) {
431 error = kthread_add(gtaskqueue_thread_loop, tqp, NULL,
432 &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
434 error = kthread_add(gtaskqueue_thread_loop, tqp, NULL,
435 &tq->tq_threads[i], RFSTOPPED, 0,
438 /* should be ok to continue, taskqueue_free will dtrt */
439 printf("%s: kthread_add(%s): error %d", __func__,
441 tq->tq_threads[i] = NULL; /* paranoid */
445 for (i = 0; i < count; i++) {
446 if (tq->tq_threads[i] == NULL)
448 td = tq->tq_threads[i];
450 error = cpuset_setthread(td->td_tid, mask);
452 * Failing to pin is rarely an actual fatal error;
453 * it'll just affect performance.
456 printf("%s: curthread=%llu: can't pin; "
459 (unsigned long long) td->td_tid,
464 sched_add(td, SRQ_BORING);
472 gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
473 const char *name, ...)
479 error = _gtaskqueue_start_threads(tqp, count, pri, NULL, name, ap);
485 gtaskqueue_run_callback(struct gtaskqueue *tq,
486 enum taskqueue_callback_type cb_type)
488 taskqueue_callback_fn tq_callback;
490 TQ_ASSERT_UNLOCKED(tq);
491 tq_callback = tq->tq_callbacks[cb_type];
492 if (tq_callback != NULL)
493 tq_callback(tq->tq_cb_contexts[cb_type]);
497 gtaskqueue_thread_loop(void *arg)
499 struct gtaskqueue **tqp, *tq;
503 gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
505 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
507 gtaskqueue_run_locked(tq);
509 * Because taskqueue_run() can drop tq_mutex, we need to
510 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
511 * meantime, which means we missed a wakeup.
513 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
515 TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
517 gtaskqueue_run_locked(tq);
519 * This thread is on its way out, so just drop the lock temporarily
520 * in order to call the shutdown callback. This allows the callback
521 * to look at the taskqueue, even just before it dies.
524 gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
527 /* rendezvous with thread that asked us to terminate */
529 wakeup_one(tq->tq_threads);
535 gtaskqueue_thread_enqueue(void *context)
537 struct gtaskqueue **tqp, *tq;
545 static struct gtaskqueue *
546 gtaskqueue_create_fast(const char *name, int mflags,
547 taskqueue_enqueue_fn enqueue, void *context)
549 return _gtaskqueue_create(name, mflags, enqueue, context,
550 MTX_SPIN, "fast_taskqueue");
554 struct taskqgroup_cpu {
555 LIST_HEAD(, grouptask) tgc_tasks;
556 struct gtaskqueue *tgc_taskq;
562 struct taskqgroup_cpu tqg_queue[MAXCPU];
570 struct taskq_bind_task {
571 struct gtask bt_task;
576 taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx, int cpu)
578 struct taskqgroup_cpu *qcpu;
580 qcpu = &qgroup->tqg_queue[idx];
581 LIST_INIT(&qcpu->tgc_tasks);
582 qcpu->tgc_taskq = gtaskqueue_create_fast(NULL, M_WAITOK,
583 taskqueue_thread_enqueue, &qcpu->tgc_taskq);
584 gtaskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT,
585 "%s_%d", qgroup->tqg_name, idx);
590 taskqgroup_cpu_remove(struct taskqgroup *qgroup, int idx)
593 gtaskqueue_free(qgroup->tqg_queue[idx].tgc_taskq);
597 * Find the taskq with least # of tasks that doesn't currently have any
598 * other queues from the uniq identifier.
601 taskqgroup_find(struct taskqgroup *qgroup, void *uniq)
607 mtx_assert(&qgroup->tqg_lock, MA_OWNED);
608 if (qgroup->tqg_cnt == 0)
613 * Two passes; First scan for a queue with the least tasks that
614 * does not already service this uniq id. If that fails simply find
615 * the queue with the least total tasks;
617 for (strict = 1; mincnt == INT_MAX; strict = 0) {
618 for (i = 0; i < qgroup->tqg_cnt; i++) {
619 if (qgroup->tqg_queue[i].tgc_cnt > mincnt)
623 &qgroup->tqg_queue[i].tgc_tasks, gt_list)
624 if (n->gt_uniq == uniq)
629 mincnt = qgroup->tqg_queue[i].tgc_cnt;
634 panic("taskqgroup_find: Failed to pick a qid.");
640 * smp_started is unusable since it is not set for UP kernels or even for
641 * SMP kernels when there is 1 CPU. This is usually handled by adding a
642 * (mp_ncpus == 1) test, but that would be broken here since we need to
643 * to synchronize with the SI_SUB_SMP ordering. Even in the pure SMP case
644 * smp_started only gives a fuzzy ordering relative to SI_SUB_SMP.
646 * So maintain our own flag. It must be set after all CPUs are started
647 * and before SI_SUB_SMP:SI_ORDER_ANY so that the SYSINIT for delayed
648 * adjustment is properly delayed. SI_ORDER_FOURTH is clearly before
649 * SI_ORDER_ANY and unclearly after the CPUs are started. It would be
650 * simpler for adjustment to pass a flag indicating if it is delayed.
653 static int tqg_smp_started;
656 tqg_record_smp_started(void *arg)
661 SYSINIT(tqg_record_smp_started, SI_SUB_SMP, SI_ORDER_FOURTH,
662 tqg_record_smp_started, NULL);
665 taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
666 void *uniq, int irq, const char *name)
671 gtask->gt_uniq = uniq;
672 snprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask");
675 mtx_lock(&qgroup->tqg_lock);
676 qid = taskqgroup_find(qgroup, uniq);
677 qgroup->tqg_queue[qid].tgc_cnt++;
678 LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
679 gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
680 if (irq != -1 && tqg_smp_started) {
681 gtask->gt_cpu = qgroup->tqg_queue[qid].tgc_cpu;
683 CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
684 mtx_unlock(&qgroup->tqg_lock);
685 error = intr_setaffinity(irq, CPU_WHICH_IRQ, &mask);
687 printf("%s: setaffinity failed for %s: %d\n", __func__, gtask->gt_name, error);
689 mtx_unlock(&qgroup->tqg_lock);
693 taskqgroup_attach_deferred(struct taskqgroup *qgroup, struct grouptask *gtask)
698 mtx_lock(&qgroup->tqg_lock);
699 qid = taskqgroup_find(qgroup, gtask->gt_uniq);
700 cpu = qgroup->tqg_queue[qid].tgc_cpu;
701 if (gtask->gt_irq != -1) {
702 mtx_unlock(&qgroup->tqg_lock);
706 error = intr_setaffinity(gtask->gt_irq, CPU_WHICH_IRQ, &mask);
707 mtx_lock(&qgroup->tqg_lock);
709 printf("%s: %s setaffinity failed: %d\n", __func__, gtask->gt_name, error);
712 qgroup->tqg_queue[qid].tgc_cnt++;
714 LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask,
716 MPASS(qgroup->tqg_queue[qid].tgc_taskq != NULL);
717 gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
718 mtx_unlock(&qgroup->tqg_lock);
722 taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
723 void *uniq, int cpu, int irq, char *name)
729 gtask->gt_uniq = uniq;
730 snprintf(gtask->gt_name, GROUPTASK_NAMELEN, "%s", name ? name : "grouptask");
733 mtx_lock(&qgroup->tqg_lock);
734 if (tqg_smp_started) {
735 for (i = 0; i < qgroup->tqg_cnt; i++)
736 if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
741 mtx_unlock(&qgroup->tqg_lock);
742 printf("%s: qid not found for %s cpu=%d\n", __func__, gtask->gt_name, cpu);
747 qgroup->tqg_queue[qid].tgc_cnt++;
748 LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
749 gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
750 cpu = qgroup->tqg_queue[qid].tgc_cpu;
751 mtx_unlock(&qgroup->tqg_lock);
755 if (irq != -1 && tqg_smp_started) {
756 error = intr_setaffinity(irq, CPU_WHICH_IRQ, &mask);
758 printf("%s: setaffinity failed: %d\n", __func__, error);
764 taskqgroup_attach_cpu_deferred(struct taskqgroup *qgroup, struct grouptask *gtask)
767 int i, qid, irq, cpu, error;
772 MPASS(tqg_smp_started);
773 mtx_lock(&qgroup->tqg_lock);
774 for (i = 0; i < qgroup->tqg_cnt; i++)
775 if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
780 mtx_unlock(&qgroup->tqg_lock);
781 printf("%s: qid not found for %s cpu=%d\n", __func__, gtask->gt_name, cpu);
784 qgroup->tqg_queue[qid].tgc_cnt++;
785 LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
786 MPASS(qgroup->tqg_queue[qid].tgc_taskq != NULL);
787 gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
788 mtx_unlock(&qgroup->tqg_lock);
794 error = intr_setaffinity(irq, CPU_WHICH_IRQ, &mask);
796 printf("%s: setaffinity failed: %d\n", __func__, error);
802 taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask)
806 mtx_lock(&qgroup->tqg_lock);
807 for (i = 0; i < qgroup->tqg_cnt; i++)
808 if (qgroup->tqg_queue[i].tgc_taskq == gtask->gt_taskqueue)
810 if (i == qgroup->tqg_cnt)
811 panic("taskqgroup_detach: task %s not in group\n", gtask->gt_name);
812 qgroup->tqg_queue[i].tgc_cnt--;
813 LIST_REMOVE(gtask, gt_list);
814 mtx_unlock(&qgroup->tqg_lock);
815 gtask->gt_taskqueue = NULL;
819 taskqgroup_binder(void *ctx)
821 struct taskq_bind_task *gtask = (struct taskq_bind_task *)ctx;
826 CPU_SET(gtask->bt_cpuid, &mask);
827 error = cpuset_setthread(curthread->td_tid, &mask);
828 thread_lock(curthread);
829 sched_bind(curthread, gtask->bt_cpuid);
830 thread_unlock(curthread);
833 printf("%s: setaffinity failed: %d\n", __func__,
835 free(gtask, M_DEVBUF);
839 taskqgroup_bind(struct taskqgroup *qgroup)
841 struct taskq_bind_task *gtask;
845 * Bind taskqueue threads to specific CPUs, if they have been assigned
848 if (qgroup->tqg_cnt == 1)
851 for (i = 0; i < qgroup->tqg_cnt; i++) {
852 gtask = malloc(sizeof (*gtask), M_DEVBUF, M_WAITOK);
853 GTASK_INIT(>ask->bt_task, 0, 0, taskqgroup_binder, gtask);
854 gtask->bt_cpuid = qgroup->tqg_queue[i].tgc_cpu;
855 grouptaskqueue_enqueue(qgroup->tqg_queue[i].tgc_taskq,
861 _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
863 LIST_HEAD(, grouptask) gtask_head = LIST_HEAD_INITIALIZER(NULL);
864 struct grouptask *gtask;
865 int i, k, old_cnt, old_cpu, cpu;
867 mtx_assert(&qgroup->tqg_lock, MA_OWNED);
869 if (cnt < 1 || cnt * stride > mp_ncpus || !tqg_smp_started) {
870 printf("%s: failed cnt: %d stride: %d "
871 "mp_ncpus: %d tqg_smp_started: %d\n",
872 __func__, cnt, stride, mp_ncpus, tqg_smp_started);
875 if (qgroup->tqg_adjusting) {
876 printf("%s failed: adjusting\n", __func__);
879 qgroup->tqg_adjusting = 1;
880 old_cnt = qgroup->tqg_cnt;
883 old_cpu = qgroup->tqg_queue[old_cnt].tgc_cpu;
884 mtx_unlock(&qgroup->tqg_lock);
886 * Set up queue for tasks added before boot.
889 LIST_SWAP(>ask_head, &qgroup->tqg_queue[0].tgc_tasks,
891 qgroup->tqg_queue[0].tgc_cnt = 0;
895 * If new taskq threads have been added.
898 for (i = old_cnt; i < cnt; i++) {
899 taskqgroup_cpu_create(qgroup, i, cpu);
901 for (k = 0; k < stride; k++)
904 mtx_lock(&qgroup->tqg_lock);
905 qgroup->tqg_cnt = cnt;
906 qgroup->tqg_stride = stride;
909 * Adjust drivers to use new taskqs.
911 for (i = 0; i < old_cnt; i++) {
912 while ((gtask = LIST_FIRST(&qgroup->tqg_queue[i].tgc_tasks))) {
913 LIST_REMOVE(gtask, gt_list);
914 qgroup->tqg_queue[i].tgc_cnt--;
915 LIST_INSERT_HEAD(>ask_head, gtask, gt_list);
918 mtx_unlock(&qgroup->tqg_lock);
920 while ((gtask = LIST_FIRST(>ask_head))) {
921 LIST_REMOVE(gtask, gt_list);
922 if (gtask->gt_cpu == -1)
923 taskqgroup_attach_deferred(qgroup, gtask);
924 else if (taskqgroup_attach_cpu_deferred(qgroup, gtask))
925 taskqgroup_attach_deferred(qgroup, gtask);
929 mtx_lock(&qgroup->tqg_lock);
930 for (i = 0; i < qgroup->tqg_cnt; i++) {
931 MPASS(qgroup->tqg_queue[i].tgc_taskq != NULL);
932 LIST_FOREACH(gtask, &qgroup->tqg_queue[i].tgc_tasks, gt_list)
933 MPASS(gtask->gt_taskqueue != NULL);
935 mtx_unlock(&qgroup->tqg_lock);
938 * If taskq thread count has been reduced.
940 for (i = cnt; i < old_cnt; i++)
941 taskqgroup_cpu_remove(qgroup, i);
943 taskqgroup_bind(qgroup);
945 mtx_lock(&qgroup->tqg_lock);
946 qgroup->tqg_adjusting = 0;
952 taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
956 mtx_lock(&qgroup->tqg_lock);
957 error = _taskqgroup_adjust(qgroup, cnt, stride);
958 mtx_unlock(&qgroup->tqg_lock);
964 taskqgroup_create(char *name)
966 struct taskqgroup *qgroup;
968 qgroup = malloc(sizeof(*qgroup), M_GTASKQUEUE, M_WAITOK | M_ZERO);
969 mtx_init(&qgroup->tqg_lock, "taskqgroup", NULL, MTX_DEF);
970 qgroup->tqg_name = name;
971 LIST_INIT(&qgroup->tqg_queue[0].tgc_tasks);
977 taskqgroup_destroy(struct taskqgroup *qgroup)
983 taskqgroup_config_gtask_init(void *ctx, struct grouptask *gtask, gtask_fn_t *fn,
987 GROUPTASK_INIT(gtask, 0, fn, ctx);
988 taskqgroup_attach(qgroup_config, gtask, gtask, -1, name);