2 * Copyright (c) 2000 Doug Rabson
3 * Copyright (c) 2014 Jeff Roberson
4 * Copyright (c) 2016 Matthew Macy
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
35 #include <sys/cpuset.h>
36 #include <sys/interrupt.h>
37 #include <sys/kernel.h>
38 #include <sys/kthread.h>
39 #include <sys/libkern.h>
40 #include <sys/limits.h>
42 #include <sys/malloc.h>
43 #include <sys/mutex.h>
45 #include <sys/sched.h>
47 #include <sys/gtaskqueue.h>
48 #include <sys/unistd.h>
49 #include <machine/stdarg.h>
51 static MALLOC_DEFINE(M_GTASKQUEUE, "taskqueue", "Task Queues");
52 static void gtaskqueue_thread_enqueue(void *);
53 static void gtaskqueue_thread_loop(void *arg);
55 TASKQGROUP_DEFINE(softirq, mp_ncpus, 1);
57 struct gtaskqueue_busy {
58 struct gtask *tb_running;
59 TAILQ_ENTRY(gtaskqueue_busy) tb_link;
62 static struct gtask * const TB_DRAIN_WAITER = (struct gtask *)0x1;
65 STAILQ_HEAD(, gtask) tq_queue;
66 gtaskqueue_enqueue_fn tq_enqueue;
69 TAILQ_HEAD(, gtaskqueue_busy) tq_active;
71 struct thread **tq_threads;
76 taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
77 void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
80 #define TQ_FLAGS_ACTIVE (1 << 0)
81 #define TQ_FLAGS_BLOCKED (1 << 1)
82 #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2)
84 #define DT_CALLOUT_ARMED (1 << 0)
89 mtx_lock_spin(&(tq)->tq_mutex); \
91 mtx_lock(&(tq)->tq_mutex); \
93 #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED)
95 #define TQ_UNLOCK(tq) \
98 mtx_unlock_spin(&(tq)->tq_mutex); \
100 mtx_unlock(&(tq)->tq_mutex); \
102 #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
106 gtask_dump(struct gtask *gtask)
108 printf("gtask: %p ta_flags=%x ta_priority=%d ta_func=%p ta_context=%p\n",
109 gtask, gtask->ta_flags, gtask->ta_priority, gtask->ta_func, gtask->ta_context);
114 TQ_SLEEP(struct gtaskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
118 return (msleep_spin(p, m, wm, t));
119 return (msleep(p, m, pri, wm, t));
122 static struct gtaskqueue *
123 _gtaskqueue_create(const char *name, int mflags,
124 taskqueue_enqueue_fn enqueue, void *context,
125 int mtxflags, const char *mtxname __unused)
127 struct gtaskqueue *queue;
130 tq_name = malloc(TASKQUEUE_NAMELEN, M_GTASKQUEUE, mflags | M_ZERO);
134 snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue");
136 queue = malloc(sizeof(struct gtaskqueue), M_GTASKQUEUE, mflags | M_ZERO);
140 STAILQ_INIT(&queue->tq_queue);
141 TAILQ_INIT(&queue->tq_active);
142 queue->tq_enqueue = enqueue;
143 queue->tq_context = context;
144 queue->tq_name = tq_name;
145 queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
146 queue->tq_flags |= TQ_FLAGS_ACTIVE;
147 if (enqueue == gtaskqueue_thread_enqueue)
148 queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
149 mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags);
156 * Signal a taskqueue thread to terminate.
159 gtaskqueue_terminate(struct thread **pp, struct gtaskqueue *tq)
162 while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
164 TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
169 gtaskqueue_free(struct gtaskqueue *queue)
173 queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
174 gtaskqueue_terminate(queue->tq_threads, queue);
175 KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
176 KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
177 mtx_destroy(&queue->tq_mutex);
178 free(queue->tq_threads, M_GTASKQUEUE);
179 free(queue->tq_name, M_GTASKQUEUE);
180 free(queue, M_GTASKQUEUE);
184 grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *gtask)
189 panic("queue == NULL");
193 if (gtask->ta_flags & TASK_ENQUEUED) {
197 STAILQ_INSERT_TAIL(&queue->tq_queue, gtask, ta_link);
198 gtask->ta_flags |= TASK_ENQUEUED;
200 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
201 queue->tq_enqueue(queue->tq_context);
206 gtaskqueue_task_nop_fn(void *context)
211 * Block until all currently queued tasks in this taskqueue
212 * have begun execution. Tasks queued during execution of
213 * this function are ignored.
216 gtaskqueue_drain_tq_queue(struct gtaskqueue *queue)
218 struct gtask t_barrier;
220 if (STAILQ_EMPTY(&queue->tq_queue))
224 * Enqueue our barrier after all current tasks, but with
225 * the highest priority so that newly queued tasks cannot
226 * pass it. Because of the high priority, we can not use
227 * taskqueue_enqueue_locked directly (which drops the lock
228 * anyway) so just insert it at tail while we have the
231 GTASK_INIT(&t_barrier, 0, USHRT_MAX, gtaskqueue_task_nop_fn, &t_barrier);
232 STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
233 t_barrier.ta_flags |= TASK_ENQUEUED;
236 * Once the barrier has executed, all previously queued tasks
237 * have completed or are currently executing.
239 while (t_barrier.ta_flags & TASK_ENQUEUED)
240 TQ_SLEEP(queue, &t_barrier, &queue->tq_mutex, PWAIT, "-", 0);
244 * Block until all currently executing tasks for this taskqueue
245 * complete. Tasks that begin execution during the execution
246 * of this function are ignored.
249 gtaskqueue_drain_tq_active(struct gtaskqueue *queue)
251 struct gtaskqueue_busy tb_marker, *tb_first;
253 if (TAILQ_EMPTY(&queue->tq_active))
256 /* Block taskq_terminate().*/
257 queue->tq_callouts++;
260 * Wait for all currently executing taskqueue threads
263 tb_marker.tb_running = TB_DRAIN_WAITER;
264 TAILQ_INSERT_TAIL(&queue->tq_active, &tb_marker, tb_link);
265 while (TAILQ_FIRST(&queue->tq_active) != &tb_marker)
266 TQ_SLEEP(queue, &tb_marker, &queue->tq_mutex, PWAIT, "-", 0);
267 TAILQ_REMOVE(&queue->tq_active, &tb_marker, tb_link);
270 * Wakeup any other drain waiter that happened to queue up
271 * without any intervening active thread.
273 tb_first = TAILQ_FIRST(&queue->tq_active);
274 if (tb_first != NULL && tb_first->tb_running == TB_DRAIN_WAITER)
277 /* Release taskqueue_terminate(). */
278 queue->tq_callouts--;
279 if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
280 wakeup_one(queue->tq_threads);
284 gtaskqueue_block(struct gtaskqueue *queue)
288 queue->tq_flags |= TQ_FLAGS_BLOCKED;
293 gtaskqueue_unblock(struct gtaskqueue *queue)
297 queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
298 if (!STAILQ_EMPTY(&queue->tq_queue))
299 queue->tq_enqueue(queue->tq_context);
304 gtaskqueue_run_locked(struct gtaskqueue *queue)
306 struct gtaskqueue_busy tb;
307 struct gtaskqueue_busy *tb_first;
310 KASSERT(queue != NULL, ("tq is NULL"));
311 TQ_ASSERT_LOCKED(queue);
312 tb.tb_running = NULL;
314 while (STAILQ_FIRST(&queue->tq_queue)) {
315 TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
318 * Carefully remove the first task from the queue and
319 * clear its TASK_ENQUEUED flag
321 gtask = STAILQ_FIRST(&queue->tq_queue);
322 KASSERT(gtask != NULL, ("task is NULL"));
323 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
324 gtask->ta_flags &= ~TASK_ENQUEUED;
325 tb.tb_running = gtask;
328 KASSERT(gtask->ta_func != NULL, ("task->ta_func is NULL"));
329 gtask->ta_func(gtask->ta_context);
332 tb.tb_running = NULL;
335 TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
336 tb_first = TAILQ_FIRST(&queue->tq_active);
337 if (tb_first != NULL &&
338 tb_first->tb_running == TB_DRAIN_WAITER)
344 task_is_running(struct gtaskqueue *queue, struct gtask *gtask)
346 struct gtaskqueue_busy *tb;
348 TQ_ASSERT_LOCKED(queue);
349 TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
350 if (tb->tb_running == gtask)
357 gtaskqueue_cancel_locked(struct gtaskqueue *queue, struct gtask *gtask)
360 if (gtask->ta_flags & TASK_ENQUEUED)
361 STAILQ_REMOVE(&queue->tq_queue, gtask, gtask, ta_link);
362 gtask->ta_flags &= ~TASK_ENQUEUED;
363 return (task_is_running(queue, gtask) ? EBUSY : 0);
367 gtaskqueue_cancel(struct gtaskqueue *queue, struct gtask *gtask)
372 error = gtaskqueue_cancel_locked(queue, gtask);
379 gtaskqueue_drain(struct gtaskqueue *queue, struct gtask *gtask)
383 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
386 while ((gtask->ta_flags & TASK_ENQUEUED) || task_is_running(queue, gtask))
387 TQ_SLEEP(queue, gtask, &queue->tq_mutex, PWAIT, "-", 0);
392 gtaskqueue_drain_all(struct gtaskqueue *queue)
396 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
399 gtaskqueue_drain_tq_queue(queue);
400 gtaskqueue_drain_tq_active(queue);
405 _gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
406 cpuset_t *mask, const char *name, va_list ap)
408 char ktname[MAXCOMLEN + 1];
410 struct gtaskqueue *tq;
416 vsnprintf(ktname, sizeof(ktname), name, ap);
419 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_GTASKQUEUE,
421 if (tq->tq_threads == NULL) {
422 printf("%s: no memory for %s threads\n", __func__, ktname);
426 for (i = 0; i < count; i++) {
428 error = kthread_add(gtaskqueue_thread_loop, tqp, NULL,
429 &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
431 error = kthread_add(gtaskqueue_thread_loop, tqp, NULL,
432 &tq->tq_threads[i], RFSTOPPED, 0,
435 /* should be ok to continue, taskqueue_free will dtrt */
436 printf("%s: kthread_add(%s): error %d", __func__,
438 tq->tq_threads[i] = NULL; /* paranoid */
442 for (i = 0; i < count; i++) {
443 if (tq->tq_threads[i] == NULL)
445 td = tq->tq_threads[i];
447 error = cpuset_setthread(td->td_tid, mask);
449 * Failing to pin is rarely an actual fatal error;
450 * it'll just affect performance.
453 printf("%s: curthread=%llu: can't pin; "
456 (unsigned long long) td->td_tid,
461 sched_add(td, SRQ_BORING);
469 gtaskqueue_start_threads(struct gtaskqueue **tqp, int count, int pri,
470 const char *name, ...)
476 error = _gtaskqueue_start_threads(tqp, count, pri, NULL, name, ap);
482 gtaskqueue_run_callback(struct gtaskqueue *tq,
483 enum taskqueue_callback_type cb_type)
485 taskqueue_callback_fn tq_callback;
487 TQ_ASSERT_UNLOCKED(tq);
488 tq_callback = tq->tq_callbacks[cb_type];
489 if (tq_callback != NULL)
490 tq_callback(tq->tq_cb_contexts[cb_type]);
494 gtaskqueue_thread_loop(void *arg)
496 struct gtaskqueue **tqp, *tq;
500 gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
502 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
504 gtaskqueue_run_locked(tq);
506 * Because taskqueue_run() can drop tq_mutex, we need to
507 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
508 * meantime, which means we missed a wakeup.
510 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
512 TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
514 gtaskqueue_run_locked(tq);
516 * This thread is on its way out, so just drop the lock temporarily
517 * in order to call the shutdown callback. This allows the callback
518 * to look at the taskqueue, even just before it dies.
521 gtaskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
524 /* rendezvous with thread that asked us to terminate */
526 wakeup_one(tq->tq_threads);
532 gtaskqueue_thread_enqueue(void *context)
534 struct gtaskqueue **tqp, *tq;
542 static struct gtaskqueue *
543 gtaskqueue_create_fast(const char *name, int mflags,
544 taskqueue_enqueue_fn enqueue, void *context)
546 return _gtaskqueue_create(name, mflags, enqueue, context,
547 MTX_SPIN, "fast_taskqueue");
551 struct taskqgroup_cpu {
552 LIST_HEAD(, grouptask) tgc_tasks;
553 struct gtaskqueue *tgc_taskq;
559 struct taskqgroup_cpu tqg_queue[MAXCPU];
567 struct taskq_bind_task {
568 struct gtask bt_task;
573 taskqgroup_cpu_create(struct taskqgroup *qgroup, int idx, int cpu)
575 struct taskqgroup_cpu *qcpu;
577 qcpu = &qgroup->tqg_queue[idx];
578 LIST_INIT(&qcpu->tgc_tasks);
579 qcpu->tgc_taskq = gtaskqueue_create_fast(NULL, M_WAITOK,
580 taskqueue_thread_enqueue, &qcpu->tgc_taskq);
581 gtaskqueue_start_threads(&qcpu->tgc_taskq, 1, PI_SOFT,
582 "%s_%d", qgroup->tqg_name, idx);
587 taskqgroup_cpu_remove(struct taskqgroup *qgroup, int idx)
590 gtaskqueue_free(qgroup->tqg_queue[idx].tgc_taskq);
594 * Find the taskq with least # of tasks that doesn't currently have any
595 * other queues from the uniq identifier.
598 taskqgroup_find(struct taskqgroup *qgroup, void *uniq)
604 mtx_assert(&qgroup->tqg_lock, MA_OWNED);
605 if (qgroup->tqg_cnt == 0)
610 * Two passes; First scan for a queue with the least tasks that
611 * does not already service this uniq id. If that fails simply find
612 * the queue with the least total tasks;
614 for (strict = 1; mincnt == INT_MAX; strict = 0) {
615 for (i = 0; i < qgroup->tqg_cnt; i++) {
616 if (qgroup->tqg_queue[i].tgc_cnt > mincnt)
620 &qgroup->tqg_queue[i].tgc_tasks, gt_list)
621 if (n->gt_uniq == uniq)
626 mincnt = qgroup->tqg_queue[i].tgc_cnt;
631 panic("taskqgroup_find: Failed to pick a qid.");
637 * smp_started is unusable since it is not set for UP kernels or even for
638 * SMP kernels when there is 1 CPU. This is usually handled by adding a
639 * (mp_ncpus == 1) test, but that would be broken here since we need to
640 * to synchronize with the SI_SUB_SMP ordering. Even in the pure SMP case
641 * smp_started only gives a fuzzy ordering relative to SI_SUB_SMP.
643 * So maintain our own flag. It must be set after all CPUs are started
644 * and before SI_SUB_SMP:SI_ORDER_ANY so that the SYSINIT for delayed
645 * adjustment is properly delayed. SI_ORDER_FOURTH is clearly before
646 * SI_ORDER_ANY and unclearly after the CPUs are started. It would be
647 * simpler for adjustment to pass a flag indicating if it is delayed.
650 static int tqg_smp_started;
653 tqg_record_smp_started(void *arg)
658 SYSINIT(tqg_record_smp_started, SI_SUB_SMP, SI_ORDER_FOURTH,
659 tqg_record_smp_started, NULL);
662 taskqgroup_attach(struct taskqgroup *qgroup, struct grouptask *gtask,
663 void *uniq, int irq, char *name)
668 gtask->gt_uniq = uniq;
669 gtask->gt_name = name;
672 mtx_lock(&qgroup->tqg_lock);
673 qid = taskqgroup_find(qgroup, uniq);
674 qgroup->tqg_queue[qid].tgc_cnt++;
675 LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
676 gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
677 if (irq != -1 && tqg_smp_started) {
678 gtask->gt_cpu = qgroup->tqg_queue[qid].tgc_cpu;
680 CPU_SET(qgroup->tqg_queue[qid].tgc_cpu, &mask);
681 mtx_unlock(&qgroup->tqg_lock);
682 intr_setaffinity(irq, CPU_WHICH_IRQ, &mask);
684 mtx_unlock(&qgroup->tqg_lock);
688 taskqgroup_attach_deferred(struct taskqgroup *qgroup, struct grouptask *gtask)
693 mtx_lock(&qgroup->tqg_lock);
694 qid = taskqgroup_find(qgroup, gtask->gt_uniq);
695 cpu = qgroup->tqg_queue[qid].tgc_cpu;
696 if (gtask->gt_irq != -1) {
697 mtx_unlock(&qgroup->tqg_lock);
701 intr_setaffinity(gtask->gt_irq, CPU_WHICH_IRQ, &mask);
703 mtx_lock(&qgroup->tqg_lock);
705 qgroup->tqg_queue[qid].tgc_cnt++;
707 LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask,
709 MPASS(qgroup->tqg_queue[qid].tgc_taskq != NULL);
710 gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
711 mtx_unlock(&qgroup->tqg_lock);
715 taskqgroup_attach_cpu(struct taskqgroup *qgroup, struct grouptask *gtask,
716 void *uniq, int cpu, int irq, char *name)
722 gtask->gt_uniq = uniq;
723 gtask->gt_name = name;
726 mtx_lock(&qgroup->tqg_lock);
727 if (tqg_smp_started) {
728 for (i = 0; i < qgroup->tqg_cnt; i++)
729 if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
734 mtx_unlock(&qgroup->tqg_lock);
739 qgroup->tqg_queue[qid].tgc_cnt++;
740 LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
741 gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
742 cpu = qgroup->tqg_queue[qid].tgc_cpu;
743 mtx_unlock(&qgroup->tqg_lock);
747 if (irq != -1 && tqg_smp_started)
748 intr_setaffinity(irq, CPU_WHICH_IRQ, &mask);
753 taskqgroup_attach_cpu_deferred(struct taskqgroup *qgroup, struct grouptask *gtask)
756 int i, qid, irq, cpu;
761 MPASS(tqg_smp_started);
762 mtx_lock(&qgroup->tqg_lock);
763 for (i = 0; i < qgroup->tqg_cnt; i++)
764 if (qgroup->tqg_queue[i].tgc_cpu == cpu) {
769 mtx_unlock(&qgroup->tqg_lock);
772 qgroup->tqg_queue[qid].tgc_cnt++;
773 LIST_INSERT_HEAD(&qgroup->tqg_queue[qid].tgc_tasks, gtask, gt_list);
774 MPASS(qgroup->tqg_queue[qid].tgc_taskq != NULL);
775 gtask->gt_taskqueue = qgroup->tqg_queue[qid].tgc_taskq;
776 mtx_unlock(&qgroup->tqg_lock);
782 intr_setaffinity(irq, CPU_WHICH_IRQ, &mask);
787 taskqgroup_detach(struct taskqgroup *qgroup, struct grouptask *gtask)
791 mtx_lock(&qgroup->tqg_lock);
792 for (i = 0; i < qgroup->tqg_cnt; i++)
793 if (qgroup->tqg_queue[i].tgc_taskq == gtask->gt_taskqueue)
795 if (i == qgroup->tqg_cnt)
796 panic("taskqgroup_detach: task not in group\n");
797 qgroup->tqg_queue[i].tgc_cnt--;
798 LIST_REMOVE(gtask, gt_list);
799 mtx_unlock(&qgroup->tqg_lock);
800 gtask->gt_taskqueue = NULL;
804 taskqgroup_binder(void *ctx)
806 struct taskq_bind_task *gtask = (struct taskq_bind_task *)ctx;
811 CPU_SET(gtask->bt_cpuid, &mask);
812 error = cpuset_setthread(curthread->td_tid, &mask);
813 thread_lock(curthread);
814 sched_bind(curthread, gtask->bt_cpuid);
815 thread_unlock(curthread);
818 printf("taskqgroup_binder: setaffinity failed: %d\n",
820 free(gtask, M_DEVBUF);
824 taskqgroup_bind(struct taskqgroup *qgroup)
826 struct taskq_bind_task *gtask;
830 * Bind taskqueue threads to specific CPUs, if they have been assigned
833 if (qgroup->tqg_cnt == 1)
836 for (i = 0; i < qgroup->tqg_cnt; i++) {
837 gtask = malloc(sizeof (*gtask), M_DEVBUF, M_WAITOK);
838 GTASK_INIT(>ask->bt_task, 0, 0, taskqgroup_binder, gtask);
839 gtask->bt_cpuid = qgroup->tqg_queue[i].tgc_cpu;
840 grouptaskqueue_enqueue(qgroup->tqg_queue[i].tgc_taskq,
846 _taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
848 LIST_HEAD(, grouptask) gtask_head = LIST_HEAD_INITIALIZER(NULL);
849 struct grouptask *gtask;
850 int i, k, old_cnt, old_cpu, cpu;
852 mtx_assert(&qgroup->tqg_lock, MA_OWNED);
854 if (cnt < 1 || cnt * stride > mp_ncpus || !tqg_smp_started) {
855 printf("%s: failed cnt: %d stride: %d "
856 "mp_ncpus: %d tqg_smp_started: %d\n",
857 __func__, cnt, stride, mp_ncpus, tqg_smp_started);
860 if (qgroup->tqg_adjusting) {
861 printf("taskqgroup_adjust failed: adjusting\n");
864 qgroup->tqg_adjusting = 1;
865 old_cnt = qgroup->tqg_cnt;
868 old_cpu = qgroup->tqg_queue[old_cnt].tgc_cpu;
869 mtx_unlock(&qgroup->tqg_lock);
871 * Set up queue for tasks added before boot.
874 LIST_SWAP(>ask_head, &qgroup->tqg_queue[0].tgc_tasks,
876 qgroup->tqg_queue[0].tgc_cnt = 0;
880 * If new taskq threads have been added.
883 for (i = old_cnt; i < cnt; i++) {
884 taskqgroup_cpu_create(qgroup, i, cpu);
886 for (k = 0; k < stride; k++)
889 mtx_lock(&qgroup->tqg_lock);
890 qgroup->tqg_cnt = cnt;
891 qgroup->tqg_stride = stride;
894 * Adjust drivers to use new taskqs.
896 for (i = 0; i < old_cnt; i++) {
897 while ((gtask = LIST_FIRST(&qgroup->tqg_queue[i].tgc_tasks))) {
898 LIST_REMOVE(gtask, gt_list);
899 qgroup->tqg_queue[i].tgc_cnt--;
900 LIST_INSERT_HEAD(>ask_head, gtask, gt_list);
903 mtx_unlock(&qgroup->tqg_lock);
905 while ((gtask = LIST_FIRST(>ask_head))) {
906 LIST_REMOVE(gtask, gt_list);
907 if (gtask->gt_cpu == -1)
908 taskqgroup_attach_deferred(qgroup, gtask);
909 else if (taskqgroup_attach_cpu_deferred(qgroup, gtask))
910 taskqgroup_attach_deferred(qgroup, gtask);
914 mtx_lock(&qgroup->tqg_lock);
915 for (i = 0; i < qgroup->tqg_cnt; i++) {
916 MPASS(qgroup->tqg_queue[i].tgc_taskq != NULL);
917 LIST_FOREACH(gtask, &qgroup->tqg_queue[i].tgc_tasks, gt_list)
918 MPASS(gtask->gt_taskqueue != NULL);
920 mtx_unlock(&qgroup->tqg_lock);
923 * If taskq thread count has been reduced.
925 for (i = cnt; i < old_cnt; i++)
926 taskqgroup_cpu_remove(qgroup, i);
928 taskqgroup_bind(qgroup);
930 mtx_lock(&qgroup->tqg_lock);
931 qgroup->tqg_adjusting = 0;
937 taskqgroup_adjust(struct taskqgroup *qgroup, int cnt, int stride)
941 mtx_lock(&qgroup->tqg_lock);
942 error = _taskqgroup_adjust(qgroup, cnt, stride);
943 mtx_unlock(&qgroup->tqg_lock);
949 taskqgroup_create(char *name)
951 struct taskqgroup *qgroup;
953 qgroup = malloc(sizeof(*qgroup), M_GTASKQUEUE, M_WAITOK | M_ZERO);
954 mtx_init(&qgroup->tqg_lock, "taskqgroup", NULL, MTX_DEF);
955 qgroup->tqg_name = name;
956 LIST_INIT(&qgroup->tqg_queue[0].tgc_tasks);
962 taskqgroup_destroy(struct taskqgroup *qgroup)