2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2000 Doug Rabson
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/systm.h>
33 #include <sys/cpuset.h>
34 #include <sys/interrupt.h>
35 #include <sys/kernel.h>
36 #include <sys/kthread.h>
37 #include <sys/libkern.h>
38 #include <sys/limits.h>
40 #include <sys/malloc.h>
41 #include <sys/mutex.h>
43 #include <sys/epoch.h>
44 #include <sys/sched.h>
46 #include <sys/taskqueue.h>
47 #include <sys/unistd.h>
48 #include <machine/stdarg.h>
50 static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
51 static void *taskqueue_giant_ih;
52 static void *taskqueue_ih;
53 static void taskqueue_fast_enqueue(void *);
54 static void taskqueue_swi_enqueue(void *);
55 static void taskqueue_swi_giant_enqueue(void *);
57 struct taskqueue_busy {
58 struct task *tb_running;
61 LIST_ENTRY(taskqueue_busy) tb_link;
65 STAILQ_HEAD(, task) tq_queue;
66 LIST_HEAD(, taskqueue_busy) tq_active;
70 struct mtx_padalign tq_mutex;
71 taskqueue_enqueue_fn tq_enqueue;
74 struct thread **tq_threads;
78 taskqueue_callback_fn tq_callbacks[TASKQUEUE_NUM_CALLBACKS];
79 void *tq_cb_contexts[TASKQUEUE_NUM_CALLBACKS];
82 #define TQ_FLAGS_ACTIVE (1 << 0)
83 #define TQ_FLAGS_BLOCKED (1 << 1)
84 #define TQ_FLAGS_UNLOCKED_ENQUEUE (1 << 2)
86 #define DT_CALLOUT_ARMED (1 << 0)
87 #define DT_DRAIN_IN_PROGRESS (1 << 1)
92 mtx_lock_spin(&(tq)->tq_mutex); \
94 mtx_lock(&(tq)->tq_mutex); \
96 #define TQ_ASSERT_LOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_OWNED)
98 #define TQ_UNLOCK(tq) \
101 mtx_unlock_spin(&(tq)->tq_mutex); \
103 mtx_unlock(&(tq)->tq_mutex); \
105 #define TQ_ASSERT_UNLOCKED(tq) mtx_assert(&(tq)->tq_mutex, MA_NOTOWNED)
108 _timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
109 int priority, task_fn_t func, void *context)
112 TASK_INIT(&timeout_task->t, priority, func, context);
113 callout_init_mtx(&timeout_task->c, &queue->tq_mutex,
114 CALLOUT_RETURNUNLOCKED);
115 timeout_task->q = queue;
120 TQ_SLEEP(struct taskqueue *tq, void *p, const char *wm)
123 return (msleep_spin(p, (struct mtx *)&tq->tq_mutex, wm, 0));
124 return (msleep(p, &tq->tq_mutex, 0, wm, 0));
127 static struct taskqueue_busy *
128 task_get_busy(struct taskqueue *queue, struct task *task)
130 struct taskqueue_busy *tb;
132 TQ_ASSERT_LOCKED(queue);
133 LIST_FOREACH(tb, &queue->tq_active, tb_link) {
134 if (tb->tb_running == task)
140 static struct taskqueue *
141 _taskqueue_create(const char *name, int mflags,
142 taskqueue_enqueue_fn enqueue, void *context,
143 int mtxflags, const char *mtxname __unused)
145 struct taskqueue *queue;
148 tq_name = malloc(TASKQUEUE_NAMELEN, M_TASKQUEUE, mflags | M_ZERO);
152 queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
154 free(tq_name, M_TASKQUEUE);
158 snprintf(tq_name, TASKQUEUE_NAMELEN, "%s", (name) ? name : "taskqueue");
160 STAILQ_INIT(&queue->tq_queue);
161 LIST_INIT(&queue->tq_active);
162 queue->tq_enqueue = enqueue;
163 queue->tq_context = context;
164 queue->tq_name = tq_name;
165 queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
166 queue->tq_flags |= TQ_FLAGS_ACTIVE;
167 if (enqueue == taskqueue_fast_enqueue ||
168 enqueue == taskqueue_swi_enqueue ||
169 enqueue == taskqueue_swi_giant_enqueue ||
170 enqueue == taskqueue_thread_enqueue)
171 queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
172 mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags);
178 taskqueue_create(const char *name, int mflags,
179 taskqueue_enqueue_fn enqueue, void *context)
182 return _taskqueue_create(name, mflags, enqueue, context,
187 taskqueue_set_callback(struct taskqueue *queue,
188 enum taskqueue_callback_type cb_type, taskqueue_callback_fn callback,
192 KASSERT(((cb_type >= TASKQUEUE_CALLBACK_TYPE_MIN) &&
193 (cb_type <= TASKQUEUE_CALLBACK_TYPE_MAX)),
194 ("Callback type %d not valid, must be %d-%d", cb_type,
195 TASKQUEUE_CALLBACK_TYPE_MIN, TASKQUEUE_CALLBACK_TYPE_MAX));
196 KASSERT((queue->tq_callbacks[cb_type] == NULL),
197 ("Re-initialization of taskqueue callback?"));
199 queue->tq_callbacks[cb_type] = callback;
200 queue->tq_cb_contexts[cb_type] = context;
204 * Signal a taskqueue thread to terminate.
207 taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
210 while (tq->tq_tcount > 0 || tq->tq_callouts > 0) {
212 TQ_SLEEP(tq, pp, "tq_destroy");
217 taskqueue_free(struct taskqueue *queue)
221 queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
222 taskqueue_terminate(queue->tq_threads, queue);
223 KASSERT(LIST_EMPTY(&queue->tq_active), ("Tasks still running?"));
224 KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
225 mtx_destroy(&queue->tq_mutex);
226 free(queue->tq_threads, M_TASKQUEUE);
227 free(queue->tq_name, M_TASKQUEUE);
228 free(queue, M_TASKQUEUE);
232 taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task, int flags)
236 struct taskqueue_busy *tb;
238 KASSERT(task->ta_func != NULL, ("enqueueing task with NULL func"));
240 * Ignore canceling task if requested.
242 if (__predict_false((flags & TASKQUEUE_FAIL_IF_CANCELING) != 0)) {
243 tb = task_get_busy(queue, task);
244 if (tb != NULL && tb->tb_canceling) {
251 * Count multiple enqueues.
253 if (task->ta_pending) {
254 if (__predict_false((flags & TASKQUEUE_FAIL_IF_PENDING) != 0)) {
258 if (task->ta_pending < USHRT_MAX)
265 * Optimise cases when all tasks use small set of priorities.
266 * In case of only one priority we always insert at the end.
267 * In case of two tq_hint typically gives the insertion point.
268 * In case of more then two tq_hint should halve the search.
270 prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
271 if (!prev || prev->ta_priority >= task->ta_priority) {
272 STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
274 prev = queue->tq_hint;
275 if (prev && prev->ta_priority >= task->ta_priority) {
276 ins = STAILQ_NEXT(prev, ta_link);
279 ins = STAILQ_FIRST(&queue->tq_queue);
281 for (; ins; prev = ins, ins = STAILQ_NEXT(ins, ta_link))
282 if (ins->ta_priority < task->ta_priority)
286 STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
287 queue->tq_hint = task;
289 STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
292 task->ta_pending = 1;
293 if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0)
295 if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
296 queue->tq_enqueue(queue->tq_context);
297 if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0)
300 /* Return with lock released. */
305 taskqueue_enqueue_flags(struct taskqueue *queue, struct task *task, int flags)
310 res = taskqueue_enqueue_locked(queue, task, flags);
311 /* The lock is released inside. */
317 taskqueue_enqueue(struct taskqueue *queue, struct task *task)
319 return (taskqueue_enqueue_flags(queue, task, 0));
323 taskqueue_timeout_func(void *arg)
325 struct taskqueue *queue;
326 struct timeout_task *timeout_task;
329 queue = timeout_task->q;
330 KASSERT((timeout_task->f & DT_CALLOUT_ARMED) != 0, ("Stray timeout"));
331 timeout_task->f &= ~DT_CALLOUT_ARMED;
332 queue->tq_callouts--;
333 taskqueue_enqueue_locked(timeout_task->q, &timeout_task->t, 0);
334 /* The lock is released inside. */
338 taskqueue_enqueue_timeout_sbt(struct taskqueue *queue,
339 struct timeout_task *timeout_task, sbintime_t sbt, sbintime_t pr, int flags)
344 KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
346 timeout_task->q = queue;
347 res = timeout_task->t.ta_pending;
348 if (timeout_task->f & DT_DRAIN_IN_PROGRESS) {
352 } else if (sbt == 0) {
353 taskqueue_enqueue_locked(queue, &timeout_task->t, 0);
354 /* The lock is released inside. */
356 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
359 queue->tq_callouts++;
360 timeout_task->f |= DT_CALLOUT_ARMED;
362 sbt = -sbt; /* Ignore overflow. */
366 flags |= C_DIRECT_EXEC;
367 callout_reset_sbt(&timeout_task->c, sbt, pr,
368 taskqueue_timeout_func, timeout_task, flags);
376 taskqueue_enqueue_timeout(struct taskqueue *queue,
377 struct timeout_task *ttask, int ticks)
380 return (taskqueue_enqueue_timeout_sbt(queue, ttask, ticks * tick_sbt,
385 taskqueue_task_nop_fn(void *context, int pending)
390 * Block until all currently queued tasks in this taskqueue
391 * have begun execution. Tasks queued during execution of
392 * this function are ignored.
395 taskqueue_drain_tq_queue(struct taskqueue *queue)
397 struct task t_barrier;
399 if (STAILQ_EMPTY(&queue->tq_queue))
403 * Enqueue our barrier after all current tasks, but with
404 * the highest priority so that newly queued tasks cannot
405 * pass it. Because of the high priority, we can not use
406 * taskqueue_enqueue_locked directly (which drops the lock
407 * anyway) so just insert it at tail while we have the
410 TASK_INIT(&t_barrier, UCHAR_MAX, taskqueue_task_nop_fn, &t_barrier);
411 STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
412 queue->tq_hint = &t_barrier;
413 t_barrier.ta_pending = 1;
416 * Once the barrier has executed, all previously queued tasks
417 * have completed or are currently executing.
419 while (t_barrier.ta_pending != 0)
420 TQ_SLEEP(queue, &t_barrier, "tq_qdrain");
425 * Block until all currently executing tasks for this taskqueue
426 * complete. Tasks that begin execution during the execution
427 * of this function are ignored.
430 taskqueue_drain_tq_active(struct taskqueue *queue)
432 struct taskqueue_busy *tb;
435 if (LIST_EMPTY(&queue->tq_active))
438 /* Block taskq_terminate().*/
439 queue->tq_callouts++;
441 /* Wait for any active task with sequence from the past. */
444 LIST_FOREACH(tb, &queue->tq_active, tb_link) {
445 if ((int)(tb->tb_seq - seq) <= 0) {
446 TQ_SLEEP(queue, tb->tb_running, "tq_adrain");
451 /* Release taskqueue_terminate(). */
452 queue->tq_callouts--;
453 if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
454 wakeup_one(queue->tq_threads);
459 taskqueue_block(struct taskqueue *queue)
463 queue->tq_flags |= TQ_FLAGS_BLOCKED;
468 taskqueue_unblock(struct taskqueue *queue)
472 queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
473 if (!STAILQ_EMPTY(&queue->tq_queue))
474 queue->tq_enqueue(queue->tq_context);
479 taskqueue_run_locked(struct taskqueue *queue)
481 struct epoch_tracker et;
482 struct taskqueue_busy tb;
487 KASSERT(queue != NULL, ("tq is NULL"));
488 TQ_ASSERT_LOCKED(queue);
489 tb.tb_running = NULL;
490 LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link);
491 in_net_epoch = false;
493 while ((task = STAILQ_FIRST(&queue->tq_queue)) != NULL) {
494 STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
495 if (queue->tq_hint == task)
496 queue->tq_hint = NULL;
497 pending = task->ta_pending;
498 task->ta_pending = 0;
499 tb.tb_running = task;
500 tb.tb_seq = ++queue->tq_seq;
501 tb.tb_canceling = false;
504 KASSERT(task->ta_func != NULL, ("task->ta_func is NULL"));
505 if (!in_net_epoch && TASK_IS_NET(task)) {
508 } else if (in_net_epoch && !TASK_IS_NET(task)) {
510 in_net_epoch = false;
512 task->ta_func(task->ta_context, pending);
519 LIST_REMOVE(&tb, tb_link);
523 taskqueue_run(struct taskqueue *queue)
527 taskqueue_run_locked(queue);
532 * Only use this function in single threaded contexts. It returns
533 * non-zero if the given task is either pending or running. Else the
534 * task is idle and can be queued again or freed.
537 taskqueue_poll_is_busy(struct taskqueue *queue, struct task *task)
542 retval = task->ta_pending > 0 || task_get_busy(queue, task) != NULL;
549 taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
552 struct taskqueue_busy *tb;
555 if (task->ta_pending > 0) {
556 STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
557 if (queue->tq_hint == task)
558 queue->tq_hint = NULL;
561 *pendp = task->ta_pending;
562 task->ta_pending = 0;
563 tb = task_get_busy(queue, task);
565 tb->tb_canceling = true;
573 taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
578 error = taskqueue_cancel_locked(queue, task, pendp);
585 taskqueue_cancel_timeout(struct taskqueue *queue,
586 struct timeout_task *timeout_task, u_int *pendp)
588 u_int pending, pending1;
592 pending = !!(callout_stop(&timeout_task->c) > 0);
593 error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
594 if ((timeout_task->f & DT_CALLOUT_ARMED) != 0) {
595 timeout_task->f &= ~DT_CALLOUT_ARMED;
596 queue->tq_callouts--;
601 *pendp = pending + pending1;
606 taskqueue_drain(struct taskqueue *queue, struct task *task)
610 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
613 while (task->ta_pending != 0 || task_get_busy(queue, task) != NULL)
614 TQ_SLEEP(queue, task, "tq_drain");
619 taskqueue_drain_all(struct taskqueue *queue)
623 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
626 (void)taskqueue_drain_tq_queue(queue);
627 (void)taskqueue_drain_tq_active(queue);
632 taskqueue_drain_timeout(struct taskqueue *queue,
633 struct timeout_task *timeout_task)
637 * Set flag to prevent timer from re-starting during drain:
640 KASSERT((timeout_task->f & DT_DRAIN_IN_PROGRESS) == 0,
641 ("Drain already in progress"));
642 timeout_task->f |= DT_DRAIN_IN_PROGRESS;
645 callout_drain(&timeout_task->c);
646 taskqueue_drain(queue, &timeout_task->t);
649 * Clear flag to allow timer to re-start:
652 timeout_task->f &= ~DT_DRAIN_IN_PROGRESS;
657 taskqueue_quiesce(struct taskqueue *queue)
663 ret = taskqueue_drain_tq_queue(queue);
665 ret = taskqueue_drain_tq_active(queue);
671 taskqueue_swi_enqueue(void *context)
673 swi_sched(taskqueue_ih, 0);
677 taskqueue_swi_run(void *dummy)
679 taskqueue_run(taskqueue_swi);
683 taskqueue_swi_giant_enqueue(void *context)
685 swi_sched(taskqueue_giant_ih, 0);
689 taskqueue_swi_giant_run(void *dummy)
691 taskqueue_run(taskqueue_swi_giant);
695 _taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
696 cpuset_t *mask, struct proc *p, const char *name, va_list ap)
698 char ktname[MAXCOMLEN + 1];
700 struct taskqueue *tq;
706 vsnprintf(ktname, sizeof(ktname), name, ap);
709 tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
711 if (tq->tq_threads == NULL) {
712 printf("%s: no memory for %s threads\n", __func__, ktname);
716 for (i = 0; i < count; i++) {
718 error = kthread_add(taskqueue_thread_loop, tqp, p,
719 &tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
721 error = kthread_add(taskqueue_thread_loop, tqp, p,
722 &tq->tq_threads[i], RFSTOPPED, 0,
725 /* should be ok to continue, taskqueue_free will dtrt */
726 printf("%s: kthread_add(%s): error %d", __func__,
728 tq->tq_threads[i] = NULL; /* paranoid */
732 if (tq->tq_tcount == 0) {
733 free(tq->tq_threads, M_TASKQUEUE);
734 tq->tq_threads = NULL;
737 for (i = 0; i < count; i++) {
738 if (tq->tq_threads[i] == NULL)
740 td = tq->tq_threads[i];
742 error = cpuset_setthread(td->td_tid, mask);
744 * Failing to pin is rarely an actual fatal error;
745 * it'll just affect performance.
748 printf("%s: curthread=%llu: can't pin; "
751 (unsigned long long) td->td_tid,
756 sched_add(td, SRQ_BORING);
763 taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
764 const char *name, ...)
770 error = _taskqueue_start_threads(tqp, count, pri, NULL, NULL, name, ap);
776 taskqueue_start_threads_in_proc(struct taskqueue **tqp, int count, int pri,
777 struct proc *proc, const char *name, ...)
783 error = _taskqueue_start_threads(tqp, count, pri, NULL, proc, name, ap);
789 taskqueue_start_threads_cpuset(struct taskqueue **tqp, int count, int pri,
790 cpuset_t *mask, const char *name, ...)
796 error = _taskqueue_start_threads(tqp, count, pri, mask, NULL, name, ap);
802 taskqueue_run_callback(struct taskqueue *tq,
803 enum taskqueue_callback_type cb_type)
805 taskqueue_callback_fn tq_callback;
807 TQ_ASSERT_UNLOCKED(tq);
808 tq_callback = tq->tq_callbacks[cb_type];
809 if (tq_callback != NULL)
810 tq_callback(tq->tq_cb_contexts[cb_type]);
814 taskqueue_thread_loop(void *arg)
816 struct taskqueue **tqp, *tq;
820 taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_INIT);
822 while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
824 taskqueue_run_locked(tq);
826 * Because taskqueue_run() can drop tq_mutex, we need to
827 * check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
828 * meantime, which means we missed a wakeup.
830 if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
832 TQ_SLEEP(tq, tq, "-");
834 taskqueue_run_locked(tq);
836 * This thread is on its way out, so just drop the lock temporarily
837 * in order to call the shutdown callback. This allows the callback
838 * to look at the taskqueue, even just before it dies.
841 taskqueue_run_callback(tq, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN);
844 /* rendezvous with thread that asked us to terminate */
846 wakeup_one(tq->tq_threads);
852 taskqueue_thread_enqueue(void *context)
854 struct taskqueue **tqp, *tq;
861 TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
862 swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
863 INTR_MPSAFE, &taskqueue_ih));
865 TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
866 swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
867 NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
869 TASKQUEUE_DEFINE_THREAD(thread);
872 taskqueue_create_fast(const char *name, int mflags,
873 taskqueue_enqueue_fn enqueue, void *context)
875 return _taskqueue_create(name, mflags, enqueue, context,
876 MTX_SPIN, "fast_taskqueue");
879 static void *taskqueue_fast_ih;
882 taskqueue_fast_enqueue(void *context)
884 swi_sched(taskqueue_fast_ih, 0);
888 taskqueue_fast_run(void *dummy)
890 taskqueue_run(taskqueue_fast);
893 TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
894 swi_add(NULL, "fast taskq", taskqueue_fast_run, NULL,
895 SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
898 taskqueue_member(struct taskqueue *queue, struct thread *td)
902 for (i = 0, j = 0; ; i++) {
903 if (queue->tq_threads[i] == NULL)
905 if (queue->tq_threads[i] == td) {
909 if (++j >= queue->tq_tcount)