2 * Copyright (c) 2000 Doug Rabson
3 * Copyright (c) 2014 Jeff Roberson
4 * Copyright (c) 2016 Matthew Macy
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
35 #include <sys/cpuset.h>
36 #include <sys/interrupt.h>
37 #include <sys/kernel.h>
38 #include <sys/kthread.h>
39 #include <sys/libkern.h>
40 #include <sys/limits.h>
42 #include <sys/malloc.h>
43 #include <sys/mutex.h>
45 #include <sys/sched.h>
47 #include <sys/gtaskqueue.h>
48 #include <sys/unistd.h>
49 #include <machine/stdarg.h>
51 static MALLOC_DEFINE(M_GTASKQUEUE, "taskqueue", "Task Queues");
52 static void gtaskqueue_thread_enqueue(void *);
53 static void gtaskqueue_thread_loop(void *arg);
56 struct gtaskqueue_busy {
57 struct gtask *tb_running;
58 TAILQ_ENTRY(gtaskqueue_busy) tb_link;
61 static struct gtask * const TB_DRAIN_WAITER = (struct gtask *)0x1;
64 STAILQ_HEAD(, gtask) tq_queue;
65 gtaskqueue_enqueue_fn tq_enqueue;
68 TAILQ_HEAD(, gtaskqueue_busy) tq_active;
70 struct thread **tq_threads;
75 taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
76 void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
79 #define TQ_FLAGS_ACTIVE (1 << 0)
80 #define TQ_FLAGS_BLOCKED (1 << 1)
81 #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2)
83 #define DT_CALLOUT_ARMED (1 << 0)
88 mtx_lock_spin(&(tq)->tq_mutex); \
90 mtx_lock(&(tq)->tq_mutex); \
92 #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED)
94 #define TQ_UNLOCK(tq) \
97 mtx_unlock_spin(&(tq)->tq_mutex); \
99 mtx_unlock(&(tq)->tq_mutex); \
101 #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
104 TQ_SLEEP(struct gtaskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
108 return (msleep_spin(p, m, wm, t));
109 return (msleep(p, m, pri, wm, t));
112 static struct gtaskqueue *
113 _gtaskqueue_create(const char *name, int mflags,
114 taskqueue_enqueue_fn enqueue, void *context,
115 int mtxflags, const char *mtxname __unused)
117 struct gtaskqueue *queue;
120 tq_name = malloc(TASKQUEUE_NAMELEN, M_GTASKQUEUE, mflags | M_ZERO);
124 snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue");
126 queue = malloc(sizeof(struct gtaskqueue), M_GTASKQUEUE, mflags | M_ZERO);
130 STAILQ_INIT(&queue->tq_queue);
131 TAILQ_INIT(&queue->tq_active);
132 queue->tq_enqueue = enqueue;
133 queue->tq_context = context;
134 queue->tq_name = tq_name;
135 queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
136 queue->tq_flags |= TQ_FLAGS_ACTIVE;
137 if (enqueue == gtaskqueue_thread_enqueue)
138 queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
139 mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags);
146 * Signal a taskqueue thread to terminate.
149 gtaskqueue_terminate(struct thread **pp, struct gtaskqueue *tq)
152 while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
154 TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
159 gtaskqueue_free(struct gtaskqueue *queue)
163 queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
164 gtaskqueue_terminate(queue->tq_threads, queue);
165 KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
166 KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
167 mtx_destroy(&queue->tq_mutex);
168 free(queue->tq_threads, M_GTASKQUEUE);
169 free(queue->tq_name, M_GTASKQUEUE);
170 free(queue, M_GTASKQUEUE);
174 grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *gtask)
177 if (gtask->ta_flags & TASK_ENQUEUED) {
181 STAILQ_INSERT_TAIL(&queue->tq_queue, gtask, ta_link);
182 gtask->ta_flags |= TASK_ENQUEUED;
184 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
185 queue->tq_enqueue(queue->tq_context);
190 gtaskqueue_task_nop_fn(void *context)
195 * Block until all currently queued tasks in this taskqueue
196 * have begun execution. Tasks queued during execution of
197 * this function are ignored.
200 gtaskqueue_drain_tq_queue(struct gtaskqueue *queue)
202 struct gtask t_barrier;
204 if (STAILQ_EMPTY(&queue->tq_queue))
208 * Enqueue our barrier after all current tasks, but with
209 * the highest priority so that newly queued tasks cannot
210 * pass it. Because of the high priority, we can not use
211 * taskqueue_enqueue_locked directly (which drops the lock
212 * anyway) so just insert it at tail while we have the
215 GTASK_INIT(&t_barrier, 0, USHRT_MAX, gtaskqueue_task_nop_fn, &t_barrier);
216 STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
217 t_barrier.ta_flags |= TASK_ENQUEUED;
220 * Once the barrier has executed, all previously queued tasks
221 * have completed or are currently executing.
223 while (t_barrier.ta_flags & TASK_ENQUEUED)
224 TQ_SLEEP(queue, &t_barrier, &queue->tq_mutex, PWAIT, "-", 0);
228 * Block until all currently executing tasks for this taskqueue
229 * complete. Tasks that begin execution during the execution
230 * of this function are ignored.
233 gtaskqueue_drain_tq_active(struct gtaskqueue *queue)
235 struct gtaskqueue_busy tb_marker, *tb_first;
237 if (TAILQ_EMPTY(&queue->tq_active))
240 /* Block taskq_terminate().*/
241 queue->tq_callouts++;
244 * Wait for all currently executing taskqueue threads
247 tb_marker.tb_running = TB_DRAIN_WAITER;
248 TAILQ_INSERT_TAIL(&queue->tq_active, &tb_marker, tb_link);
249 while (TAILQ_FIRST(&queue->tq_active) != &tb_marker)
250 TQ_SLEEP(queue, &tb_marker, &queue->tq_mutex, PWAIT, "-", 0);
251 TAILQ_REMOVE(&queue->tq_active, &tb_marker, tb_link);
254 * Wakeup any other drain waiter that happened to queue up
255 * without any intervening active thread.
257 tb_first = TAILQ_FIRST(&queue->tq_active);
258 if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER)
261 /* Release taskqueue_terminate(). */
262 queue->tq_callouts--;
263 if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
264 wakeup_one(queue->tq_threads);
268 gtaskqueue_block(struct gtaskqueue *queue)
272 queue->tq_flags |= TQ_FLAGS_BLOCKED;
277 gtaskqueue_unblock(struct gtaskqueue *queue)
281 queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
282 if (!STAILQ_EMPTY(&queue->tq_queue))
283 queue->tq_enqueue(queue->tq_context);
288 gtaskqueue_run_locked(struct gtaskqueue *queue)
290 struct gtaskqueue_busy tb;
291 struct gtaskqueue_busy *tb_first;
294 KASSERT(queue != NULL, ("tq is NULL"));
295 TQ_ASSERT_LOCKED(queue);
296 tb.tb_running = NULL;
298 while (STAILQ_FIRST(&queue->tq_queue)) {
299 TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
302 * Carefully remove the first task from the queue and
303 * clear its TASK_ENQUEUED flag
305 gtask = STAILQ_FIRST(&queue->tq_queue);
306 KASSERT(gtask != NULL, ("task is NULL"));
307 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
308 gtask->ta_flags &= ~TASK_ENQUEUED;
309 tb.tb_running = gtask;
312 KASSERT(gtask->ta_func != NULL, ("task->ta_func is NULL"));
313 gtask->ta_func(gtask->ta_context);
316 tb.tb_running = NULL;
319 TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
320 tb_first = TAILQ_FIRST(&queue->tq_active);
321 if (tb_first != NULL &&
322 tb_first->tb_running == TB_DRAIN_WAITER)
328 task_is_running(struct gtaskqueue *queue, struct gtask *gtask)
330 struct gtaskqueue_busy *tb;
332 TQ_ASSERT_LOCKED(queue);
333 TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
334 if (tb->tb_running == gtask)
341 gtaskqueue_cancel_locked(struct gtaskqueue *queue, struct gtask *gtask)
344 if (gtask->ta_flags & TASK_ENQUEUED)
345 STAILQ_REMOVE(&queue->tq_queue, gtask, gtask, ta_link);
346 gtask->ta_flags &= ~TASK_ENQUEUED;
347 return (task_is_running(queue, gtask) ? EBUSY : 0);
351 gtaskqueue_cancel(struct gtaskqueue *queue, struct gtask *gtask)
356 error = gtaskqueue_cancel_locked(queue, gtask);
363 gtaskqueue_drain(struct gtaskqueue *queue, struct gtask *gtask)
367 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
370 while ((gtask->ta_flags & TASK_ENQUEUED) || task_is_running(queue, gtask))
371 TQ_SLEEP(queue, gtask, &queue->tq_mutex, PWAIT, "-", 0);
376 gtaskqueue_drain_all(struct gtaskqueue *queue)
380 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
383 gtaskqueue_drain_tq_queue(queue);
384 gtaskqueue_drain_tq_active(queue);
389 _gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
390 cpuset_t *mask, const char *name, va_list ap)
392 char ktname[MAXCOMLEN + 1];
394 struct gtaskqueue *tq;
400 vsnprintf(ktname, sizeof(ktname), name, ap);
403 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_GTASKQUEUE,
405 if (tq->tq_threads == NULL) {
406 printf("%s: no memory for %s threads\n", __func__, ktname);
410 for (i = 0; i < count; i++) {
412 error = kthread_add(gtaskqueue_thread_loop, tqp, NULL,
413 &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
415 error = kthread_add(gtaskqueue_thread_loop, tqp, NULL,
416 &tq->tq_threads[i], RFSTOPPED, 0,
419 /* should be ok to continue, taskqueue_free will dtrt */
420 printf("%s: kthread_add(%s): error %d", __func__,
422 tq->tq_threads[i] = NULL; /* paranoid */
426 for (i = 0; i < count; i++) {
427 if (tq->tq_threads[i] == NULL)
429 td = tq->tq_threads[i];
431 error = cpuset_setthread(td->td_tid, mask);
433 * Failing to pin is rarely an actual fatal error;
434 * it'll just affect performance.
437 printf("%s: curthread=%llu: can't pin; "
440 (unsigned long long) td->td_tid,
445 sched_add(td, SRQ_BORING);
453 gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
454 const char *name, ...)
460 error = _gtaskqueue_start_threads(tqp, count, pri, NULL, name, ap);
466 gtaskqueue_run_callback(struct gtaskqueue *tq,
467 enum taskqueue_callback_type cb_type)
469 taskqueue_callback_fn tq_callback;
471 TQ_ASSERT_UNLOCKED(tq);
472 tq_callback = tq->tq_callbacks[cb_type];
473 if (tq_callback != NULL)
474 tq_callback(tq->tq_cb_contexts[cb_type]);
478 gtaskqueue_thread_loop(void *arg)
480 struct gtaskqueue **tqp, *tq;
484 gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
486 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
488 gtaskqueue_run_locked(tq);
490 * Because taskqueue_run() can drop tq_mutex, we need to
491 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
492 * meantime, which means we missed a wakeup.
494 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
496 TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
498 gtaskqueue_run_locked(tq);
500 * This thread is on its way out, so just drop the lock temporarily
501 * in order to call the shutdown callback. This allows the callback
502 * to look at the taskqueue, even just before it dies.
505 gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
508 /* rendezvous with thread that asked us to terminate */
510 wakeup_one(tq->tq_threads);
516 gtaskqueue_thread_enqueue(void *context)
518 struct gtaskqueue **tqp, *tq;
526 static struct gtaskqueue *
527 gtaskqueue_create_fast(const char *name, int mflags,
528 taskqueue_enqueue_fn enqueue, void *context)
530 return _gtaskqueue_create(name, mflags, enqueue, context,
531 MTX_SPIN, "fast_taskqueue");
535 struct taskqgroup_cpu {
536 LIST_HEAD(, grouptask) tgc_tasks;
537 struct gtaskqueue *tgc_taskq;
543 struct taskqgroup_cpu tqg_queue[MAXCPU];
551 struct taskq_bind_task {
552 struct gtask bt_task;
557 taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx)
559 struct taskqgroup_cpu *qcpu;
561 qcpu = &qgroup->tqg_queue[idx];
562 LIST_INIT(&qcpu->tgc_tasks);
563 qcpu->tgc_taskq = gtaskqueue_create_fast(NULL, M_WAITOK,
564 taskqueue_thread_enqueue, &qcpu->tgc_taskq);
565 gtaskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT,
566 "%s_%d", qgroup->tqg_name, idx);
567 qcpu->tgc_cpu = idx * qgroup->tqg_stride;
571 taskqgroup_cpu_remove(struct taskqgroup *qgroup, int idx)
574 gtaskqueue_free(qgroup->tqg_queue[idx].tgc_taskq);
578 * Find the taskq with least # of tasks that doesn't currently have any
579 * other queues from the uniq identifier.
582 taskqgroup_find(struct taskqgroup *qgroup, void *uniq)
588 mtx_assert(&qgroup->tqg_lock, MA_OWNED);
589 if (qgroup->tqg_cnt == 0)
594 * Two passes; First scan for a queue with the least tasks that
595 * does not already service this uniq id. If that fails simply find
596 * the queue with the least total tasks;
598 for (strict = 1; mincnt == INT_MAX; strict = 0) {
599 for (i = 0; i < qgroup->tqg_cnt; i++) {
600 if (qgroup->tqg_queue[i].tgc_cnt > mincnt)
604 &qgroup->tqg_queue[i].tgc_tasks, gt_list)
605 if (n->gt_uniq == uniq)
610 mincnt = qgroup->tqg_queue[i].tgc_cnt;
615 panic("taskqgroup_find: Failed to pick a qid.");
621 taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
622 void *uniq, int irq, char *name)
627 gtask->gt_uniq = uniq;
628 gtask->gt_name = name;
631 mtx_lock(&qgroup->tqg_lock);
632 qid = taskqgroup_find(qgroup, uniq);
633 qgroup->tqg_queue[qid].tgc_cnt++;
634 LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
635 gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
636 if (irq != -1 && smp_started) {
638 CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
639 mtx_unlock(&qgroup->tqg_lock);
640 intr_setaffinity(irq, &mask);
642 mtx_unlock(&qgroup->tqg_lock);
646 taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
647 void *uniq, int cpu, int irq, char *name)
653 gtask->gt_uniq = uniq;
654 gtask->gt_name = name;
657 mtx_lock(&qgroup->tqg_lock);
659 for (i = 0; i < qgroup->tqg_cnt; i++)
660 if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
665 mtx_unlock(&qgroup->tqg_lock);
670 qgroup->tqg_queue[qid].tgc_cnt++;
671 LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
672 gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
673 if (irq != -1 && smp_started) {
675 CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
676 mtx_unlock(&qgroup->tqg_lock);
677 intr_setaffinity(irq, &mask);
679 mtx_unlock(&qgroup->tqg_lock);
684 taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask)
688 mtx_lock(&qgroup->tqg_lock);
689 for (i = 0; i < qgroup->tqg_cnt; i++)
690 if (qgroup->tqg_queue[i].tgc_taskq == gtask->gt_taskqueue)
692 if (i == qgroup->tqg_cnt)
693 panic("taskqgroup_detach: task not in group\n");
694 qgroup->tqg_queue[i].tgc_cnt--;
695 LIST_REMOVE(gtask, gt_list);
696 mtx_unlock(&qgroup->tqg_lock);
697 gtask->gt_taskqueue = NULL;
701 taskqgroup_binder(void *ctx)
703 struct taskq_bind_task *gtask = (struct taskq_bind_task *)ctx;
708 CPU_SET(gtask->bt_cpuid, &mask);
709 error = cpuset_setthread(curthread->td_tid, &mask);
710 thread_lock(curthread);
711 sched_bind(curthread, gtask->bt_cpuid);
712 thread_unlock(curthread);
715 printf("taskqgroup_binder: setaffinity failed: %d\n",
717 free(gtask, M_DEVBUF);
721 taskqgroup_bind(struct taskqgroup *qgroup)
723 struct taskq_bind_task *gtask;
727 * Bind taskqueue threads to specific CPUs, if they have been assigned
730 for (i = 0; i < qgroup->tqg_cnt; i++) {
731 gtask = malloc(sizeof (*gtask), M_DEVBUF, M_NOWAIT);
732 GTASK_INIT(>ask->bt_task, 0, 0, taskqgroup_binder, gtask);
733 gtask->bt_cpuid = qgroup->tqg_queue[i].tgc_cpu;
734 grouptaskqueue_enqueue(qgroup->tqg_queue[i].tgc_taskq,
740 _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
742 LIST_HEAD(, grouptask) gtask_head = LIST_HEAD_INITIALIZER(NULL);
744 struct grouptask *gtask;
747 mtx_assert(&qgroup->tqg_lock, MA_OWNED);
749 if (cnt < 1 || cnt * stride > mp_ncpus || !smp_started) {
750 printf("taskqgroup_adjust failed cnt: %d stride: %d mp_ncpus: %d smp_started: %d\n",
751 cnt, stride, mp_ncpus, smp_started);
754 if (qgroup->tqg_adjusting) {
755 printf("taskqgroup_adjust failed: adjusting\n");
758 qgroup->tqg_adjusting = 1;
759 old_cnt = qgroup->tqg_cnt;
760 mtx_unlock(&qgroup->tqg_lock);
762 * Set up queue for tasks added before boot.
765 LIST_SWAP(>ask_head, &qgroup->tqg_queue[0].tgc_tasks,
767 qgroup->tqg_queue[0].tgc_cnt = 0;
771 * If new taskq threads have been added.
773 for (i = old_cnt; i < cnt; i++)
774 taskqgroup_cpu_create(qgroup, i);
775 mtx_lock(&qgroup->tqg_lock);
776 qgroup->tqg_cnt = cnt;
777 qgroup->tqg_stride = stride;
780 * Adjust drivers to use new taskqs.
782 for (i = 0; i < old_cnt; i++) {
783 while ((gtask = LIST_FIRST(&qgroup->tqg_queue[i].tgc_tasks))) {
784 LIST_REMOVE(gtask, gt_list);
785 qgroup->tqg_queue[i].tgc_cnt--;
786 LIST_INSERT_HEAD(>ask_head, gtask, gt_list);
790 while ((gtask = LIST_FIRST(>ask_head))) {
791 LIST_REMOVE(gtask, gt_list);
792 if (gtask->gt_cpu == -1)
793 qid = taskqgroup_find(qgroup, gtask->gt_uniq);
795 for (i = 0; i < qgroup->tqg_cnt; i++)
796 if (qgroup->tqg_queue[i].tgc_cpu == gtask->gt_cpu) {
801 qgroup->tqg_queue[qid].tgc_cnt++;
802 LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask,
804 gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
807 * Set new CPU and IRQ affinity
809 for (i = 0; i < cnt; i++) {
810 qgroup->tqg_queue[i].tgc_cpu = i * qgroup->tqg_stride;
812 CPU_SET(qgroup->tqg_queue[i].tgc_cpu, &mask);
813 LIST_FOREACH(gtask, &qgroup->tqg_queue[i].tgc_tasks, gt_list) {
814 if (gtask->gt_irq == -1)
816 intr_setaffinity(gtask->gt_irq, &mask);
819 mtx_unlock(&qgroup->tqg_lock);
822 * If taskq thread count has been reduced.
824 for (i = cnt; i < old_cnt; i++)
825 taskqgroup_cpu_remove(qgroup, i);
827 mtx_lock(&qgroup->tqg_lock);
828 qgroup->tqg_adjusting = 0;
830 taskqgroup_bind(qgroup);
836 taskqgroup_adjust(struct taskqgroup *qgroup, int cpu, int stride)
840 mtx_lock(&qgroup->tqg_lock);
841 error = _taskqgroup_adjust(qgroup, cpu, stride);
842 mtx_unlock(&qgroup->tqg_lock);
848 taskqgroup_create(char *name)
850 struct taskqgroup *qgroup;
852 qgroup = malloc(sizeof(*qgroup), M_GTASKQUEUE, M_WAITOK | M_ZERO);
853 mtx_init(&qgroup->tqg_lock, "taskqgroup", NULL, MTX_DEF);
854 qgroup->tqg_name = name;
855 LIST_INIT(&qgroup->tqg_queue[0].tgc_tasks);
861 taskqgroup_destroy(struct taskqgroup *qgroup)