2 * Copyright (C) 2004-2012 Internet Systems Consortium, Inc. ("ISC")
3 * Copyright (C) 1998-2003 Internet Software Consortium.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
10 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
11 * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
12 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
13 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
14 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
15 * PERFORMANCE OF THIS SOFTWARE.
21 * \author Principal Author: Bob Halley
25 * XXXRTH Need to document the states a task can be in, and the rules
26 * for changing states.
31 #include <isc/condition.h>
32 #include <isc/event.h>
33 #include <isc/magic.h>
36 #include <isc/platform.h>
37 #include <isc/string.h>
39 #include <isc/thread.h>
44 #include <openssl/err.h>
48 * For BIND9 internal applications:
49 * when built with threads we use multiple worker threads shared by the whole
51 * when built without threads we share a single global task manager and use
52 * an integrated event loop for socket, timer, and other generic task events.
53 * For generic library:
54 * we don't use either of them: an application can have multiple task managers
55 * whether or not it's threaded, and if the application is threaded each thread
56 * is expected to have a separate manager; no "worker threads" are shared by
57 * the application threads.
60 #ifdef ISC_PLATFORM_USETHREADS
61 #define USE_WORKER_THREADS
63 #define USE_SHARED_MANAGER
64 #endif /* ISC_PLATFORM_USETHREADS */
70 #define XTRACE(m) fprintf(stderr, "task %p thread %lu: %s\n", \
71 task, isc_thread_self(), (m))
72 #define XTTRACE(t, m) fprintf(stderr, "task %p thread %lu: %s\n", \
73 (t), isc_thread_self(), (m))
74 #define XTHREADTRACE(m) fprintf(stderr, "thread %lu: %s\n", \
75 isc_thread_self(), (m))
79 #define XTHREADTRACE(m)
87 task_state_idle, task_state_ready, task_state_running,
91 #if defined(HAVE_LIBXML2) && defined(BIND9)
92 static const char *statenames[] = {
93 "idle", "ready", "running", "done",
97 #define TASK_MAGIC ISC_MAGIC('T', 'A', 'S', 'K')
98 #define VALID_TASK(t) ISC_MAGIC_VALID(t, TASK_MAGIC)
100 typedef struct isc__task isc__task_t;
101 typedef struct isc__taskmgr isc__taskmgr_t;
106 isc__taskmgr_t * manager;
108 /* Locked by task lock. */
110 unsigned int references;
111 isc_eventlist_t events;
112 isc_eventlist_t on_shutdown;
113 unsigned int quantum;
118 /* Locked by task manager lock. */
119 LINK(isc__task_t) link;
120 LINK(isc__task_t) ready_link;
121 LINK(isc__task_t) ready_priority_link;
124 #define TASK_F_SHUTTINGDOWN 0x01
125 #define TASK_F_PRIVILEGED 0x02
127 #define TASK_SHUTTINGDOWN(t) (((t)->flags & TASK_F_SHUTTINGDOWN) \
130 #define TASK_MANAGER_MAGIC ISC_MAGIC('T', 'S', 'K', 'M')
131 #define VALID_MANAGER(m) ISC_MAGIC_VALID(m, TASK_MANAGER_MAGIC)
133 typedef ISC_LIST(isc__task_t) isc__tasklist_t;
135 struct isc__taskmgr {
137 isc_taskmgr_t common;
140 #ifdef ISC_PLATFORM_USETHREADS
141 unsigned int workers;
142 isc_thread_t * threads;
143 #endif /* ISC_PLATFORM_USETHREADS */
144 /* Locked by task manager lock. */
145 unsigned int default_quantum;
146 LIST(isc__task_t) tasks;
147 isc__tasklist_t ready_tasks;
148 isc__tasklist_t ready_priority_tasks;
149 isc_taskmgrmode_t mode;
150 #ifdef ISC_PLATFORM_USETHREADS
151 isc_condition_t work_available;
152 isc_condition_t exclusive_granted;
153 isc_condition_t paused;
154 #endif /* ISC_PLATFORM_USETHREADS */
155 unsigned int tasks_running;
156 isc_boolean_t pause_requested;
157 isc_boolean_t exclusive_requested;
158 isc_boolean_t exiting;
160 #ifdef USE_SHARED_MANAGER
162 #endif /* ISC_PLATFORM_USETHREADS */
165 #define DEFAULT_TASKMGR_QUANTUM 10
166 #define DEFAULT_DEFAULT_QUANTUM 5
167 #define FINISHED(m) ((m)->exiting && EMPTY((m)->tasks))
169 #ifdef USE_SHARED_MANAGER
170 static isc__taskmgr_t *taskmgr = NULL;
171 #endif /* USE_SHARED_MANAGER */
174 * The following can be either static or public, depending on build environment.
178 #define ISC_TASKFUNC_SCOPE
180 #define ISC_TASKFUNC_SCOPE static
183 ISC_TASKFUNC_SCOPE isc_result_t
184 isc__task_create(isc_taskmgr_t *manager0, unsigned int quantum,
186 ISC_TASKFUNC_SCOPE void
187 isc__task_attach(isc_task_t *source0, isc_task_t **targetp);
188 ISC_TASKFUNC_SCOPE void
189 isc__task_detach(isc_task_t **taskp);
190 ISC_TASKFUNC_SCOPE void
191 isc__task_send(isc_task_t *task0, isc_event_t **eventp);
192 ISC_TASKFUNC_SCOPE void
193 isc__task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp);
194 ISC_TASKFUNC_SCOPE unsigned int
195 isc__task_purgerange(isc_task_t *task0, void *sender, isc_eventtype_t first,
196 isc_eventtype_t last, void *tag);
197 ISC_TASKFUNC_SCOPE unsigned int
198 isc__task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
200 ISC_TASKFUNC_SCOPE isc_boolean_t
201 isc__task_purgeevent(isc_task_t *task0, isc_event_t *event);
202 ISC_TASKFUNC_SCOPE unsigned int
203 isc__task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
204 isc_eventtype_t last, void *tag,
205 isc_eventlist_t *events);
206 ISC_TASKFUNC_SCOPE unsigned int
207 isc__task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
208 void *tag, isc_eventlist_t *events);
209 ISC_TASKFUNC_SCOPE isc_result_t
210 isc__task_onshutdown(isc_task_t *task0, isc_taskaction_t action,
212 ISC_TASKFUNC_SCOPE void
213 isc__task_shutdown(isc_task_t *task0);
214 ISC_TASKFUNC_SCOPE void
215 isc__task_destroy(isc_task_t **taskp);
216 ISC_TASKFUNC_SCOPE void
217 isc__task_setname(isc_task_t *task0, const char *name, void *tag);
218 ISC_TASKFUNC_SCOPE const char *
219 isc__task_getname(isc_task_t *task0);
220 ISC_TASKFUNC_SCOPE void *
221 isc__task_gettag(isc_task_t *task0);
222 ISC_TASKFUNC_SCOPE void
223 isc__task_getcurrenttime(isc_task_t *task0, isc_stdtime_t *t);
224 ISC_TASKFUNC_SCOPE isc_result_t
225 isc__taskmgr_create(isc_mem_t *mctx, unsigned int workers,
226 unsigned int default_quantum, isc_taskmgr_t **managerp);
227 ISC_TASKFUNC_SCOPE void
228 isc__taskmgr_destroy(isc_taskmgr_t **managerp);
229 ISC_TASKFUNC_SCOPE void
230 isc__taskmgr_setexcltask(isc_taskmgr_t *mgr0, isc_task_t *task0);
231 ISC_TASKFUNC_SCOPE isc_result_t
232 isc__taskmgr_excltask(isc_taskmgr_t *mgr0, isc_task_t **taskp);
233 ISC_TASKFUNC_SCOPE isc_result_t
234 isc__task_beginexclusive(isc_task_t *task);
235 ISC_TASKFUNC_SCOPE void
236 isc__task_endexclusive(isc_task_t *task0);
237 ISC_TASKFUNC_SCOPE void
238 isc__task_setprivilege(isc_task_t *task0, isc_boolean_t priv);
239 ISC_TASKFUNC_SCOPE isc_boolean_t
240 isc__task_privilege(isc_task_t *task0);
241 ISC_TASKFUNC_SCOPE void
242 isc__taskmgr_setmode(isc_taskmgr_t *manager0, isc_taskmgrmode_t mode);
243 ISC_TASKFUNC_SCOPE isc_taskmgrmode_t
244 isc__taskmgr_mode(isc_taskmgr_t *manager0);
246 static inline isc_boolean_t
247 empty_readyq(isc__taskmgr_t *manager);
249 static inline isc__task_t *
250 pop_readyq(isc__taskmgr_t *manager);
253 push_readyq(isc__taskmgr_t *manager, isc__task_t *task);
255 static struct isc__taskmethods {
256 isc_taskmethods_t methods;
259 * The following are defined just for avoiding unused static functions.
262 void *purgeevent, *unsendrange, *getname, *gettag, *getcurrenttime;
270 isc__task_sendanddetach,
272 isc__task_onshutdown,
276 isc__task_purgerange,
277 isc__task_beginexclusive,
278 isc__task_endexclusive,
279 isc__task_setprivilege,
284 (void *)isc__task_purgeevent, (void *)isc__task_unsendrange,
285 (void *)isc__task_getname, (void *)isc__task_gettag,
286 (void *)isc__task_getcurrenttime
290 static isc_taskmgrmethods_t taskmgrmethods = {
291 isc__taskmgr_destroy,
292 isc__taskmgr_setmode,
295 isc__taskmgr_setexcltask,
296 isc__taskmgr_excltask
304 task_finished(isc__task_t *task) {
305 isc__taskmgr_t *manager = task->manager;
307 REQUIRE(EMPTY(task->events));
308 REQUIRE(EMPTY(task->on_shutdown));
309 REQUIRE(task->references == 0);
310 REQUIRE(task->state == task_state_done);
312 XTRACE("task_finished");
314 LOCK(&manager->lock);
315 UNLINK(manager->tasks, task, link);
316 #ifdef USE_WORKER_THREADS
317 if (FINISHED(manager)) {
319 * All tasks have completed and the
320 * task manager is exiting. Wake up
321 * any idle worker threads so they
324 BROADCAST(&manager->work_available);
326 #endif /* USE_WORKER_THREADS */
327 UNLOCK(&manager->lock);
329 DESTROYLOCK(&task->lock);
330 task->common.impmagic = 0;
331 task->common.magic = 0;
332 isc_mem_put(manager->mctx, task, sizeof(*task));
335 ISC_TASKFUNC_SCOPE isc_result_t
336 isc__task_create(isc_taskmgr_t *manager0, unsigned int quantum,
339 isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
341 isc_boolean_t exiting;
344 REQUIRE(VALID_MANAGER(manager));
345 REQUIRE(taskp != NULL && *taskp == NULL);
347 task = isc_mem_get(manager->mctx, sizeof(*task));
349 return (ISC_R_NOMEMORY);
350 XTRACE("isc_task_create");
351 task->manager = manager;
352 result = isc_mutex_init(&task->lock);
353 if (result != ISC_R_SUCCESS) {
354 isc_mem_put(manager->mctx, task, sizeof(*task));
357 task->state = task_state_idle;
358 task->references = 1;
359 INIT_LIST(task->events);
360 INIT_LIST(task->on_shutdown);
361 task->quantum = quantum;
364 memset(task->name, 0, sizeof(task->name));
366 INIT_LINK(task, link);
367 INIT_LINK(task, ready_link);
368 INIT_LINK(task, ready_priority_link);
371 LOCK(&manager->lock);
372 if (!manager->exiting) {
373 if (task->quantum == 0)
374 task->quantum = manager->default_quantum;
375 APPEND(manager->tasks, task, link);
378 UNLOCK(&manager->lock);
381 DESTROYLOCK(&task->lock);
382 isc_mem_put(manager->mctx, task, sizeof(*task));
383 return (ISC_R_SHUTTINGDOWN);
386 task->common.methods = (isc_taskmethods_t *)&taskmethods;
387 task->common.magic = ISCAPI_TASK_MAGIC;
388 task->common.impmagic = TASK_MAGIC;
389 *taskp = (isc_task_t *)task;
391 return (ISC_R_SUCCESS);
394 ISC_TASKFUNC_SCOPE void
395 isc__task_attach(isc_task_t *source0, isc_task_t **targetp) {
396 isc__task_t *source = (isc__task_t *)source0;
399 * Attach *targetp to source.
402 REQUIRE(VALID_TASK(source));
403 REQUIRE(targetp != NULL && *targetp == NULL);
405 XTTRACE(source, "isc_task_attach");
408 source->references++;
409 UNLOCK(&source->lock);
411 *targetp = (isc_task_t *)source;
414 static inline isc_boolean_t
415 task_shutdown(isc__task_t *task) {
416 isc_boolean_t was_idle = ISC_FALSE;
417 isc_event_t *event, *prev;
420 * Caller must be holding the task's lock.
423 XTRACE("task_shutdown");
425 if (! TASK_SHUTTINGDOWN(task)) {
426 XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
427 ISC_MSG_SHUTTINGDOWN, "shutting down"));
428 task->flags |= TASK_F_SHUTTINGDOWN;
429 if (task->state == task_state_idle) {
430 INSIST(EMPTY(task->events));
431 task->state = task_state_ready;
434 INSIST(task->state == task_state_ready ||
435 task->state == task_state_running);
438 * Note that we post shutdown events LIFO.
440 for (event = TAIL(task->on_shutdown);
443 prev = PREV(event, ev_link);
444 DEQUEUE(task->on_shutdown, event, ev_link);
445 ENQUEUE(task->events, event, ev_link);
453 * Moves a task onto the appropriate run queue.
455 * Caller must NOT hold manager lock.
458 task_ready(isc__task_t *task) {
459 isc__taskmgr_t *manager = task->manager;
460 #ifdef USE_WORKER_THREADS
461 isc_boolean_t has_privilege = isc__task_privilege((isc_task_t *) task);
462 #endif /* USE_WORKER_THREADS */
464 REQUIRE(VALID_MANAGER(manager));
465 REQUIRE(task->state == task_state_ready);
467 XTRACE("task_ready");
469 LOCK(&manager->lock);
470 push_readyq(manager, task);
471 #ifdef USE_WORKER_THREADS
472 if (manager->mode == isc_taskmgrmode_normal || has_privilege)
473 SIGNAL(&manager->work_available);
474 #endif /* USE_WORKER_THREADS */
475 UNLOCK(&manager->lock);
478 static inline isc_boolean_t
479 task_detach(isc__task_t *task) {
482 * Caller must be holding the task lock.
485 REQUIRE(task->references > 0);
490 if (task->references == 0 && task->state == task_state_idle) {
491 INSIST(EMPTY(task->events));
493 * There are no references to this task, and no
494 * pending events. We could try to optimize and
495 * either initiate shutdown or clean up the task,
496 * depending on its state, but it's easier to just
497 * make the task ready and allow run() or the event
498 * loop to deal with shutting down and termination.
500 task->state = task_state_ready;
507 ISC_TASKFUNC_SCOPE void
508 isc__task_detach(isc_task_t **taskp) {
510 isc_boolean_t was_idle;
513 * Detach *taskp from its task.
516 REQUIRE(taskp != NULL);
517 task = (isc__task_t *)*taskp;
518 REQUIRE(VALID_TASK(task));
520 XTRACE("isc_task_detach");
523 was_idle = task_detach(task);
532 static inline isc_boolean_t
533 task_send(isc__task_t *task, isc_event_t **eventp) {
534 isc_boolean_t was_idle = ISC_FALSE;
538 * Caller must be holding the task lock.
541 REQUIRE(eventp != NULL);
543 REQUIRE(event != NULL);
544 REQUIRE(event->ev_type > 0);
545 REQUIRE(task->state != task_state_done);
549 if (task->state == task_state_idle) {
551 INSIST(EMPTY(task->events));
552 task->state = task_state_ready;
554 INSIST(task->state == task_state_ready ||
555 task->state == task_state_running);
556 ENQUEUE(task->events, event, ev_link);
562 ISC_TASKFUNC_SCOPE void
563 isc__task_send(isc_task_t *task0, isc_event_t **eventp) {
564 isc__task_t *task = (isc__task_t *)task0;
565 isc_boolean_t was_idle;
568 * Send '*event' to 'task'.
571 REQUIRE(VALID_TASK(task));
573 XTRACE("isc_task_send");
576 * We're trying hard to hold locks for as short a time as possible.
577 * We're also trying to hold as few locks as possible. This is why
578 * some processing is deferred until after the lock is released.
581 was_idle = task_send(task, eventp);
586 * We need to add this task to the ready queue.
588 * We've waited until now to do it because making a task
589 * ready requires locking the manager. If we tried to do
590 * this while holding the task lock, we could deadlock.
592 * We've changed the state to ready, so no one else will
593 * be trying to add this task to the ready queue. The
594 * only way to leave the ready state is by executing the
595 * task. It thus doesn't matter if events are added,
596 * removed, or a shutdown is started in the interval
597 * between the time we released the task lock, and the time
598 * we add the task to the ready queue.
604 ISC_TASKFUNC_SCOPE void
605 isc__task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp) {
606 isc_boolean_t idle1, idle2;
610 * Send '*event' to '*taskp' and then detach '*taskp' from its
614 REQUIRE(taskp != NULL);
615 task = (isc__task_t *)*taskp;
616 REQUIRE(VALID_TASK(task));
618 XTRACE("isc_task_sendanddetach");
621 idle1 = task_send(task, eventp);
622 idle2 = task_detach(task);
626 * If idle1, then idle2 shouldn't be true as well since we're holding
627 * the task lock, and thus the task cannot switch from ready back to
630 INSIST(!(idle1 && idle2));
638 #define PURGE_OK(event) (((event)->ev_attributes & ISC_EVENTATTR_NOPURGE) == 0)
641 dequeue_events(isc__task_t *task, void *sender, isc_eventtype_t first,
642 isc_eventtype_t last, void *tag,
643 isc_eventlist_t *events, isc_boolean_t purging)
645 isc_event_t *event, *next_event;
646 unsigned int count = 0;
648 REQUIRE(VALID_TASK(task));
649 REQUIRE(last >= first);
651 XTRACE("dequeue_events");
654 * Events matching 'sender', whose type is >= first and <= last, and
655 * whose tag is 'tag' will be dequeued. If 'purging', matching events
656 * which are marked as unpurgable will not be dequeued.
658 * sender == NULL means "any sender", and tag == NULL means "any tag".
663 for (event = HEAD(task->events); event != NULL; event = next_event) {
664 next_event = NEXT(event, ev_link);
665 if (event->ev_type >= first && event->ev_type <= last &&
666 (sender == NULL || event->ev_sender == sender) &&
667 (tag == NULL || event->ev_tag == tag) &&
668 (!purging || PURGE_OK(event))) {
669 DEQUEUE(task->events, event, ev_link);
670 ENQUEUE(*events, event, ev_link);
680 ISC_TASKFUNC_SCOPE unsigned int
681 isc__task_purgerange(isc_task_t *task0, void *sender, isc_eventtype_t first,
682 isc_eventtype_t last, void *tag)
684 isc__task_t *task = (isc__task_t *)task0;
686 isc_eventlist_t events;
687 isc_event_t *event, *next_event;
690 * Purge events from a task's event queue.
693 XTRACE("isc_task_purgerange");
695 ISC_LIST_INIT(events);
697 count = dequeue_events(task, sender, first, last, tag, &events,
700 for (event = HEAD(events); event != NULL; event = next_event) {
701 next_event = NEXT(event, ev_link);
702 isc_event_free(&event);
706 * Note that purging never changes the state of the task.
712 ISC_TASKFUNC_SCOPE unsigned int
713 isc__task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
717 * Purge events from a task's event queue.
720 XTRACE("isc_task_purge");
722 return (isc__task_purgerange(task, sender, type, type, tag));
725 ISC_TASKFUNC_SCOPE isc_boolean_t
726 isc__task_purgeevent(isc_task_t *task0, isc_event_t *event) {
727 isc__task_t *task = (isc__task_t *)task0;
728 isc_event_t *curr_event, *next_event;
731 * Purge 'event' from a task's event queue.
733 * XXXRTH: WARNING: This method may be removed before beta.
736 REQUIRE(VALID_TASK(task));
739 * If 'event' is on the task's event queue, it will be purged,
740 * unless it is marked as unpurgeable. 'event' does not have to be
741 * on the task's event queue; in fact, it can even be an invalid
742 * pointer. Purging only occurs if the event is actually on the task's
745 * Purging never changes the state of the task.
749 for (curr_event = HEAD(task->events);
751 curr_event = next_event) {
752 next_event = NEXT(curr_event, ev_link);
753 if (curr_event == event && PURGE_OK(event)) {
754 DEQUEUE(task->events, curr_event, ev_link);
760 if (curr_event == NULL)
763 isc_event_free(&curr_event);
768 ISC_TASKFUNC_SCOPE unsigned int
769 isc__task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
770 isc_eventtype_t last, void *tag,
771 isc_eventlist_t *events)
774 * Remove events from a task's event queue.
777 XTRACE("isc_task_unsendrange");
779 return (dequeue_events((isc__task_t *)task, sender, first,
780 last, tag, events, ISC_FALSE));
783 ISC_TASKFUNC_SCOPE unsigned int
784 isc__task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
785 void *tag, isc_eventlist_t *events)
788 * Remove events from a task's event queue.
791 XTRACE("isc_task_unsend");
793 return (dequeue_events((isc__task_t *)task, sender, type,
794 type, tag, events, ISC_FALSE));
797 ISC_TASKFUNC_SCOPE isc_result_t
798 isc__task_onshutdown(isc_task_t *task0, isc_taskaction_t action,
801 isc__task_t *task = (isc__task_t *)task0;
802 isc_boolean_t disallowed = ISC_FALSE;
803 isc_result_t result = ISC_R_SUCCESS;
807 * Send a shutdown event with action 'action' and argument 'arg' when
808 * 'task' is shutdown.
811 REQUIRE(VALID_TASK(task));
812 REQUIRE(action != NULL);
814 event = isc_event_allocate(task->manager->mctx,
816 ISC_TASKEVENT_SHUTDOWN,
821 return (ISC_R_NOMEMORY);
824 if (TASK_SHUTTINGDOWN(task)) {
825 disallowed = ISC_TRUE;
826 result = ISC_R_SHUTTINGDOWN;
828 ENQUEUE(task->on_shutdown, event, ev_link);
832 isc_mem_put(task->manager->mctx, event, sizeof(*event));
837 ISC_TASKFUNC_SCOPE void
838 isc__task_shutdown(isc_task_t *task0) {
839 isc__task_t *task = (isc__task_t *)task0;
840 isc_boolean_t was_idle;
846 REQUIRE(VALID_TASK(task));
849 was_idle = task_shutdown(task);
856 ISC_TASKFUNC_SCOPE void
857 isc__task_destroy(isc_task_t **taskp) {
863 REQUIRE(taskp != NULL);
865 isc_task_shutdown(*taskp);
866 isc_task_detach(taskp);
869 ISC_TASKFUNC_SCOPE void
870 isc__task_setname(isc_task_t *task0, const char *name, void *tag) {
871 isc__task_t *task = (isc__task_t *)task0;
877 REQUIRE(VALID_TASK(task));
880 memset(task->name, 0, sizeof(task->name));
881 strncpy(task->name, name, sizeof(task->name) - 1);
886 ISC_TASKFUNC_SCOPE const char *
887 isc__task_getname(isc_task_t *task0) {
888 isc__task_t *task = (isc__task_t *)task0;
890 REQUIRE(VALID_TASK(task));
895 ISC_TASKFUNC_SCOPE void *
896 isc__task_gettag(isc_task_t *task0) {
897 isc__task_t *task = (isc__task_t *)task0;
899 REQUIRE(VALID_TASK(task));
904 ISC_TASKFUNC_SCOPE void
905 isc__task_getcurrenttime(isc_task_t *task0, isc_stdtime_t *t) {
906 isc__task_t *task = (isc__task_t *)task0;
908 REQUIRE(VALID_TASK(task));
921 * Return ISC_TRUE if the current ready list for the manager, which is
922 * either ready_tasks or the ready_priority_tasks, depending on whether
923 * the manager is currently in normal or privileged execution mode.
925 * Caller must hold the task manager lock.
927 static inline isc_boolean_t
928 empty_readyq(isc__taskmgr_t *manager) {
929 isc__tasklist_t queue;
931 if (manager->mode == isc_taskmgrmode_normal)
932 queue = manager->ready_tasks;
934 queue = manager->ready_priority_tasks;
936 return (ISC_TF(EMPTY(queue)));
940 * Dequeue and return a pointer to the first task on the current ready
941 * list for the manager.
942 * If the task is privileged, dequeue it from the other ready list
945 * Caller must hold the task manager lock.
947 static inline isc__task_t *
948 pop_readyq(isc__taskmgr_t *manager) {
951 if (manager->mode == isc_taskmgrmode_normal)
952 task = HEAD(manager->ready_tasks);
954 task = HEAD(manager->ready_priority_tasks);
957 DEQUEUE(manager->ready_tasks, task, ready_link);
958 if (ISC_LINK_LINKED(task, ready_priority_link))
959 DEQUEUE(manager->ready_priority_tasks, task,
960 ready_priority_link);
967 * Push 'task' onto the ready_tasks queue. If 'task' has the privilege
968 * flag set, then also push it onto the ready_priority_tasks queue.
970 * Caller must hold the task manager lock.
973 push_readyq(isc__taskmgr_t *manager, isc__task_t *task) {
974 ENQUEUE(manager->ready_tasks, task, ready_link);
975 if ((task->flags & TASK_F_PRIVILEGED) != 0)
976 ENQUEUE(manager->ready_priority_tasks, task,
977 ready_priority_link);
981 dispatch(isc__taskmgr_t *manager) {
983 #ifndef USE_WORKER_THREADS
984 unsigned int total_dispatch_count = 0;
985 isc__tasklist_t new_ready_tasks;
986 isc__tasklist_t new_priority_tasks;
987 #endif /* USE_WORKER_THREADS */
989 REQUIRE(VALID_MANAGER(manager));
992 * Again we're trying to hold the lock for as short a time as possible
993 * and to do as little locking and unlocking as possible.
995 * In both while loops, the appropriate lock must be held before the
996 * while body starts. Code which acquired the lock at the top of
997 * the loop would be more readable, but would result in a lot of
998 * extra locking. Compare:
1005 * while (expression) {
1010 * Unlocked part here...
1017 * Note how if the loop continues we unlock and then immediately lock.
1018 * For N iterations of the loop, this code does 2N+1 locks and 2N+1
1019 * unlocks. Also note that the lock is not held when the while
1020 * condition is tested, which may or may not be important, depending
1021 * on the expression.
1026 * while (expression) {
1030 * Unlocked part here...
1037 * For N iterations of the loop, this code does N+1 locks and N+1
1038 * unlocks. The while expression is always protected by the lock.
1041 #ifndef USE_WORKER_THREADS
1042 ISC_LIST_INIT(new_ready_tasks);
1043 ISC_LIST_INIT(new_priority_tasks);
1045 LOCK(&manager->lock);
1047 while (!FINISHED(manager)) {
1048 #ifdef USE_WORKER_THREADS
1050 * For reasons similar to those given in the comment in
1051 * isc_task_send() above, it is safe for us to dequeue
1052 * the task while only holding the manager lock, and then
1053 * change the task to running state while only holding the
1056 * If a pause has been requested, don't do any work
1057 * until it's been released.
1059 while ((empty_readyq(manager) || manager->pause_requested ||
1060 manager->exclusive_requested) && !FINISHED(manager))
1062 XTHREADTRACE(isc_msgcat_get(isc_msgcat,
1064 ISC_MSG_WAIT, "wait"));
1065 WAIT(&manager->work_available, &manager->lock);
1066 XTHREADTRACE(isc_msgcat_get(isc_msgcat,
1068 ISC_MSG_AWAKE, "awake"));
1070 #else /* USE_WORKER_THREADS */
1071 if (total_dispatch_count >= DEFAULT_TASKMGR_QUANTUM ||
1072 empty_readyq(manager))
1074 #endif /* USE_WORKER_THREADS */
1075 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK,
1076 ISC_MSG_WORKING, "working"));
1078 task = pop_readyq(manager);
1080 unsigned int dispatch_count = 0;
1081 isc_boolean_t done = ISC_FALSE;
1082 isc_boolean_t requeue = ISC_FALSE;
1083 isc_boolean_t finished = ISC_FALSE;
1086 INSIST(VALID_TASK(task));
1089 * Note we only unlock the manager lock if we actually
1090 * have a task to do. We must reacquire the manager
1091 * lock before exiting the 'if (task != NULL)' block.
1093 manager->tasks_running++;
1094 UNLOCK(&manager->lock);
1097 INSIST(task->state == task_state_ready);
1098 task->state = task_state_running;
1099 XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1100 ISC_MSG_RUNNING, "running"));
1101 isc_stdtime_get(&task->now);
1103 if (!EMPTY(task->events)) {
1104 event = HEAD(task->events);
1105 DEQUEUE(task->events, event, ev_link);
1108 * Execute the event action.
1110 XTRACE(isc_msgcat_get(isc_msgcat,
1114 if (event->ev_action != NULL) {
1115 UNLOCK(&task->lock);
1122 #ifndef USE_WORKER_THREADS
1123 total_dispatch_count++;
1124 #endif /* USE_WORKER_THREADS */
1127 if (task->references == 0 &&
1128 EMPTY(task->events) &&
1129 !TASK_SHUTTINGDOWN(task)) {
1130 isc_boolean_t was_idle;
1133 * There are no references and no
1134 * pending events for this task,
1135 * which means it will not become
1136 * runnable again via an external
1137 * action (such as sending an event
1140 * We initiate shutdown to prevent
1141 * it from becoming a zombie.
1143 * We do this here instead of in
1144 * the "if EMPTY(task->events)" block
1147 * If we post no shutdown events,
1148 * we want the task to finish.
1150 * If we did post shutdown events,
1151 * will still want the task's
1152 * quantum to be applied.
1154 was_idle = task_shutdown(task);
1158 if (EMPTY(task->events)) {
1160 * Nothing else to do for this task
1163 XTRACE(isc_msgcat_get(isc_msgcat,
1167 if (task->references == 0 &&
1168 TASK_SHUTTINGDOWN(task)) {
1172 XTRACE(isc_msgcat_get(
1177 finished = ISC_TRUE;
1178 task->state = task_state_done;
1180 task->state = task_state_idle;
1182 } else if (dispatch_count >= task->quantum) {
1184 * Our quantum has expired, but
1185 * there is more work to be done.
1186 * We'll requeue it to the ready
1189 * We don't check quantum until
1190 * dispatching at least one event,
1191 * so the minimum quantum is one.
1193 XTRACE(isc_msgcat_get(isc_msgcat,
1197 task->state = task_state_ready;
1202 UNLOCK(&task->lock);
1205 task_finished(task);
1207 LOCK(&manager->lock);
1208 manager->tasks_running--;
1209 #ifdef USE_WORKER_THREADS
1210 if (manager->exclusive_requested &&
1211 manager->tasks_running == 1) {
1212 SIGNAL(&manager->exclusive_granted);
1213 } else if (manager->pause_requested &&
1214 manager->tasks_running == 0) {
1215 SIGNAL(&manager->paused);
1217 #endif /* USE_WORKER_THREADS */
1220 * We know we're awake, so we don't have
1221 * to wakeup any sleeping threads if the
1222 * ready queue is empty before we requeue.
1224 * A possible optimization if the queue is
1225 * empty is to 'goto' the 'if (task != NULL)'
1226 * block, avoiding the ENQUEUE of the task
1227 * and the subsequent immediate DEQUEUE
1228 * (since it is the only executable task).
1229 * We don't do this because then we'd be
1230 * skipping the exit_requested check. The
1231 * cost of ENQUEUE is low anyway, especially
1232 * when you consider that we'd have to do
1233 * an extra EMPTY check to see if we could
1234 * do the optimization. If the ready queue
1235 * were usually nonempty, the 'optimization'
1236 * might even hurt rather than help.
1238 #ifdef USE_WORKER_THREADS
1239 push_readyq(manager, task);
1241 ENQUEUE(new_ready_tasks, task, ready_link);
1242 if ((task->flags & TASK_F_PRIVILEGED) != 0)
1243 ENQUEUE(new_priority_tasks, task,
1244 ready_priority_link);
1249 #ifdef USE_WORKER_THREADS
1251 * If we are in privileged execution mode and there are no
1252 * tasks remaining on the current ready queue, then
1253 * we're stuck. Automatically drop privileges at that
1254 * point and continue with the regular ready queue.
1256 if (manager->tasks_running == 0 && empty_readyq(manager)) {
1257 manager->mode = isc_taskmgrmode_normal;
1258 if (!empty_readyq(manager))
1259 BROADCAST(&manager->work_available);
1264 #ifndef USE_WORKER_THREADS
1265 ISC_LIST_APPENDLIST(manager->ready_tasks, new_ready_tasks, ready_link);
1266 ISC_LIST_APPENDLIST(manager->ready_priority_tasks, new_priority_tasks,
1267 ready_priority_link);
1268 if (empty_readyq(manager))
1269 manager->mode = isc_taskmgrmode_normal;
1272 UNLOCK(&manager->lock);
1275 #ifdef USE_WORKER_THREADS
1276 static isc_threadresult_t
1281 isc__taskmgr_t *manager = uap;
1283 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1284 ISC_MSG_STARTING, "starting"));
1288 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1289 ISC_MSG_EXITING, "exiting"));
1291 #ifdef OPENSSL_LEAKS
1292 ERR_remove_state(0);
1295 return ((isc_threadresult_t)0);
1297 #endif /* USE_WORKER_THREADS */
1300 manager_free(isc__taskmgr_t *manager) {
1303 #ifdef USE_WORKER_THREADS
1304 (void)isc_condition_destroy(&manager->exclusive_granted);
1305 (void)isc_condition_destroy(&manager->work_available);
1306 (void)isc_condition_destroy(&manager->paused);
1307 isc_mem_free(manager->mctx, manager->threads);
1308 #endif /* USE_WORKER_THREADS */
1309 DESTROYLOCK(&manager->lock);
1310 manager->common.impmagic = 0;
1311 manager->common.magic = 0;
1312 mctx = manager->mctx;
1313 isc_mem_put(mctx, manager, sizeof(*manager));
1314 isc_mem_detach(&mctx);
1316 #ifdef USE_SHARED_MANAGER
1318 #endif /* USE_SHARED_MANAGER */
1321 ISC_TASKFUNC_SCOPE isc_result_t
1322 isc__taskmgr_create(isc_mem_t *mctx, unsigned int workers,
1323 unsigned int default_quantum, isc_taskmgr_t **managerp)
1325 isc_result_t result;
1326 unsigned int i, started = 0;
1327 isc__taskmgr_t *manager;
1330 * Create a new task manager.
1333 REQUIRE(workers > 0);
1334 REQUIRE(managerp != NULL && *managerp == NULL);
1336 #ifndef USE_WORKER_THREADS
1341 #ifdef USE_SHARED_MANAGER
1342 if (taskmgr != NULL) {
1343 if (taskmgr->refs == 0)
1344 return (ISC_R_SHUTTINGDOWN);
1346 *managerp = (isc_taskmgr_t *)taskmgr;
1347 return (ISC_R_SUCCESS);
1349 #endif /* USE_SHARED_MANAGER */
1351 manager = isc_mem_get(mctx, sizeof(*manager));
1352 if (manager == NULL)
1353 return (ISC_R_NOMEMORY);
1354 manager->common.methods = &taskmgrmethods;
1355 manager->common.impmagic = TASK_MANAGER_MAGIC;
1356 manager->common.magic = ISCAPI_TASKMGR_MAGIC;
1357 manager->mode = isc_taskmgrmode_normal;
1358 manager->mctx = NULL;
1359 result = isc_mutex_init(&manager->lock);
1360 if (result != ISC_R_SUCCESS)
1363 #ifdef USE_WORKER_THREADS
1364 manager->workers = 0;
1365 manager->threads = isc_mem_allocate(mctx,
1366 workers * sizeof(isc_thread_t));
1367 if (manager->threads == NULL) {
1368 result = ISC_R_NOMEMORY;
1371 if (isc_condition_init(&manager->work_available) != ISC_R_SUCCESS) {
1372 UNEXPECTED_ERROR(__FILE__, __LINE__,
1373 "isc_condition_init() %s",
1374 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1375 ISC_MSG_FAILED, "failed"));
1376 result = ISC_R_UNEXPECTED;
1377 goto cleanup_threads;
1379 if (isc_condition_init(&manager->exclusive_granted) != ISC_R_SUCCESS) {
1380 UNEXPECTED_ERROR(__FILE__, __LINE__,
1381 "isc_condition_init() %s",
1382 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1383 ISC_MSG_FAILED, "failed"));
1384 result = ISC_R_UNEXPECTED;
1385 goto cleanup_workavailable;
1387 if (isc_condition_init(&manager->paused) != ISC_R_SUCCESS) {
1388 UNEXPECTED_ERROR(__FILE__, __LINE__,
1389 "isc_condition_init() %s",
1390 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1391 ISC_MSG_FAILED, "failed"));
1392 result = ISC_R_UNEXPECTED;
1393 goto cleanup_exclusivegranted;
1395 #endif /* USE_WORKER_THREADS */
1396 if (default_quantum == 0)
1397 default_quantum = DEFAULT_DEFAULT_QUANTUM;
1398 manager->default_quantum = default_quantum;
1399 INIT_LIST(manager->tasks);
1400 INIT_LIST(manager->ready_tasks);
1401 INIT_LIST(manager->ready_priority_tasks);
1402 manager->tasks_running = 0;
1403 manager->exclusive_requested = ISC_FALSE;
1404 manager->pause_requested = ISC_FALSE;
1405 manager->exiting = ISC_FALSE;
1406 manager->excl = NULL;
1408 isc_mem_attach(mctx, &manager->mctx);
1410 #ifdef USE_WORKER_THREADS
1411 LOCK(&manager->lock);
1415 for (i = 0; i < workers; i++) {
1416 if (isc_thread_create(run, manager,
1417 &manager->threads[manager->workers]) ==
1423 UNLOCK(&manager->lock);
1426 manager_free(manager);
1427 return (ISC_R_NOTHREADS);
1429 isc_thread_setconcurrency(workers);
1430 #endif /* USE_WORKER_THREADS */
1431 #ifdef USE_SHARED_MANAGER
1434 #endif /* USE_SHARED_MANAGER */
1436 *managerp = (isc_taskmgr_t *)manager;
1438 return (ISC_R_SUCCESS);
1440 #ifdef USE_WORKER_THREADS
1441 cleanup_exclusivegranted:
1442 (void)isc_condition_destroy(&manager->exclusive_granted);
1443 cleanup_workavailable:
1444 (void)isc_condition_destroy(&manager->work_available);
1446 isc_mem_free(mctx, manager->threads);
1448 DESTROYLOCK(&manager->lock);
1451 isc_mem_put(mctx, manager, sizeof(*manager));
1455 ISC_TASKFUNC_SCOPE void
1456 isc__taskmgr_destroy(isc_taskmgr_t **managerp) {
1457 isc__taskmgr_t *manager;
1462 * Destroy '*managerp'.
1465 REQUIRE(managerp != NULL);
1466 manager = (isc__taskmgr_t *)*managerp;
1467 REQUIRE(VALID_MANAGER(manager));
1469 #ifndef USE_WORKER_THREADS
1471 #endif /* USE_WORKER_THREADS */
1473 #ifdef USE_SHARED_MANAGER
1475 if (manager->refs > 0) {
1481 XTHREADTRACE("isc_taskmgr_destroy");
1483 * Only one non-worker thread may ever call this routine.
1484 * If a worker thread wants to initiate shutdown of the
1485 * task manager, it should ask some non-worker thread to call
1486 * isc_taskmgr_destroy(), e.g. by signalling a condition variable
1487 * that the startup thread is sleeping on.
1491 * Detach the exclusive task before acquiring the manager lock
1493 if (manager->excl != NULL)
1494 isc__task_detach((isc_task_t **) &manager->excl);
1497 * Unlike elsewhere, we're going to hold this lock a long time.
1498 * We need to do so, because otherwise the list of tasks could
1499 * change while we were traversing it.
1501 * This is also the only function where we will hold both the
1502 * task manager lock and a task lock at the same time.
1505 LOCK(&manager->lock);
1508 * Make sure we only get called once.
1510 INSIST(!manager->exiting);
1511 manager->exiting = ISC_TRUE;
1514 * If privileged mode was on, turn it off.
1516 manager->mode = isc_taskmgrmode_normal;
1519 * Post shutdown event(s) to every task (if they haven't already been
1522 for (task = HEAD(manager->tasks);
1524 task = NEXT(task, link)) {
1526 if (task_shutdown(task))
1527 push_readyq(manager, task);
1528 UNLOCK(&task->lock);
1530 #ifdef USE_WORKER_THREADS
1532 * Wake up any sleeping workers. This ensures we get work done if
1533 * there's work left to do, and if there are already no tasks left
1534 * it will cause the workers to see manager->exiting.
1536 BROADCAST(&manager->work_available);
1537 UNLOCK(&manager->lock);
1540 * Wait for all the worker threads to exit.
1542 for (i = 0; i < manager->workers; i++)
1543 (void)isc_thread_join(manager->threads[i], NULL);
1544 #else /* USE_WORKER_THREADS */
1546 * Dispatch the shutdown events.
1548 UNLOCK(&manager->lock);
1549 while (isc__taskmgr_ready((isc_taskmgr_t *)manager))
1550 (void)isc__taskmgr_dispatch((isc_taskmgr_t *)manager);
1552 if (!ISC_LIST_EMPTY(manager->tasks))
1553 isc_mem_printallactive(stderr);
1555 INSIST(ISC_LIST_EMPTY(manager->tasks));
1556 #ifdef USE_SHARED_MANAGER
1559 #endif /* USE_WORKER_THREADS */
1561 manager_free(manager);
1566 ISC_TASKFUNC_SCOPE void
1567 isc__taskmgr_setmode(isc_taskmgr_t *manager0, isc_taskmgrmode_t mode) {
1568 isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1570 LOCK(&manager->lock);
1571 manager->mode = mode;
1572 UNLOCK(&manager->lock);
1575 ISC_TASKFUNC_SCOPE isc_taskmgrmode_t
1576 isc__taskmgr_mode(isc_taskmgr_t *manager0) {
1577 isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1578 isc_taskmgrmode_t mode;
1579 LOCK(&manager->lock);
1580 mode = manager->mode;
1581 UNLOCK(&manager->lock);
1585 #ifndef USE_WORKER_THREADS
1587 isc__taskmgr_ready(isc_taskmgr_t *manager0) {
1588 isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1589 isc_boolean_t is_ready;
1591 #ifdef USE_SHARED_MANAGER
1592 if (manager == NULL)
1595 if (manager == NULL)
1598 LOCK(&manager->lock);
1599 is_ready = !empty_readyq(manager);
1600 UNLOCK(&manager->lock);
1606 isc__taskmgr_dispatch(isc_taskmgr_t *manager0) {
1607 isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1609 #ifdef USE_SHARED_MANAGER
1610 if (manager == NULL)
1613 if (manager == NULL)
1614 return (ISC_R_NOTFOUND);
1618 return (ISC_R_SUCCESS);
1622 ISC_TASKFUNC_SCOPE void
1623 isc__taskmgr_pause(isc_taskmgr_t *manager0) {
1624 isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1625 LOCK(&manager->lock);
1626 while (manager->tasks_running > 0) {
1627 WAIT(&manager->paused, &manager->lock);
1629 manager->pause_requested = ISC_TRUE;
1630 UNLOCK(&manager->lock);
1633 ISC_TASKFUNC_SCOPE void
1634 isc__taskmgr_resume(isc_taskmgr_t *manager0) {
1635 isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1637 LOCK(&manager->lock);
1638 if (manager->pause_requested) {
1639 manager->pause_requested = ISC_FALSE;
1640 BROADCAST(&manager->work_available);
1642 UNLOCK(&manager->lock);
1644 #endif /* USE_WORKER_THREADS */
1646 ISC_TASKFUNC_SCOPE void
1647 isc__taskmgr_setexcltask(isc_taskmgr_t *mgr0, isc_task_t *task0) {
1648 isc__taskmgr_t *mgr = (isc__taskmgr_t *) mgr0;
1649 isc__task_t *task = (isc__task_t *) task0;
1651 REQUIRE(VALID_MANAGER(mgr));
1652 REQUIRE(VALID_TASK(task));
1653 if (mgr->excl != NULL)
1654 isc__task_detach((isc_task_t **) &mgr->excl);
1655 isc__task_attach(task0, (isc_task_t **) &mgr->excl);
1658 ISC_TASKFUNC_SCOPE isc_result_t
1659 isc__taskmgr_excltask(isc_taskmgr_t *mgr0, isc_task_t **taskp) {
1660 isc__taskmgr_t *mgr = (isc__taskmgr_t *) mgr0;
1662 REQUIRE(VALID_MANAGER(mgr));
1663 REQUIRE(taskp != NULL && *taskp == NULL);
1665 if (mgr->excl == NULL)
1666 return (ISC_R_NOTFOUND);
1668 isc__task_attach((isc_task_t *) mgr->excl, taskp);
1669 return (ISC_R_SUCCESS);
1672 ISC_TASKFUNC_SCOPE isc_result_t
1673 isc__task_beginexclusive(isc_task_t *task0) {
1674 #ifdef USE_WORKER_THREADS
1675 isc__task_t *task = (isc__task_t *)task0;
1676 isc__taskmgr_t *manager = task->manager;
1678 REQUIRE(task->state == task_state_running);
1679 /* XXX: Require task == manager->excl? */
1681 LOCK(&manager->lock);
1682 if (manager->exclusive_requested) {
1683 UNLOCK(&manager->lock);
1684 return (ISC_R_LOCKBUSY);
1686 manager->exclusive_requested = ISC_TRUE;
1687 while (manager->tasks_running > 1) {
1688 WAIT(&manager->exclusive_granted, &manager->lock);
1690 UNLOCK(&manager->lock);
1694 return (ISC_R_SUCCESS);
1697 ISC_TASKFUNC_SCOPE void
1698 isc__task_endexclusive(isc_task_t *task0) {
1699 #ifdef USE_WORKER_THREADS
1700 isc__task_t *task = (isc__task_t *)task0;
1701 isc__taskmgr_t *manager = task->manager;
1703 REQUIRE(task->state == task_state_running);
1704 LOCK(&manager->lock);
1705 REQUIRE(manager->exclusive_requested);
1706 manager->exclusive_requested = ISC_FALSE;
1707 BROADCAST(&manager->work_available);
1708 UNLOCK(&manager->lock);
1714 ISC_TASKFUNC_SCOPE void
1715 isc__task_setprivilege(isc_task_t *task0, isc_boolean_t priv) {
1716 isc__task_t *task = (isc__task_t *)task0;
1717 isc__taskmgr_t *manager = task->manager;
1718 isc_boolean_t oldpriv;
1721 oldpriv = ISC_TF((task->flags & TASK_F_PRIVILEGED) != 0);
1723 task->flags |= TASK_F_PRIVILEGED;
1725 task->flags &= ~TASK_F_PRIVILEGED;
1726 UNLOCK(&task->lock);
1728 if (priv == oldpriv)
1731 LOCK(&manager->lock);
1732 if (priv && ISC_LINK_LINKED(task, ready_link))
1733 ENQUEUE(manager->ready_priority_tasks, task,
1734 ready_priority_link);
1735 else if (!priv && ISC_LINK_LINKED(task, ready_priority_link))
1736 DEQUEUE(manager->ready_priority_tasks, task,
1737 ready_priority_link);
1738 UNLOCK(&manager->lock);
1741 ISC_TASKFUNC_SCOPE isc_boolean_t
1742 isc__task_privilege(isc_task_t *task0) {
1743 isc__task_t *task = (isc__task_t *)task0;
1747 priv = ISC_TF((task->flags & TASK_F_PRIVILEGED) != 0);
1748 UNLOCK(&task->lock);
1752 #ifdef USE_SOCKETIMPREGISTER
1754 isc__task_register() {
1755 return (isc_task_register(isc__taskmgr_create));
1760 isc_task_exiting(isc_task_t *t) {
1761 isc__task_t *task = (isc__task_t *)t;
1763 REQUIRE(VALID_TASK(task));
1764 return (TASK_SHUTTINGDOWN(task));
1768 #if defined(HAVE_LIBXML2) && defined(BIND9)
1769 #define TRY0(a) do { xmlrc = (a); if (xmlrc < 0) goto error; } while(0)
1771 isc_taskmgr_renderxml(isc_taskmgr_t *mgr0, xmlTextWriterPtr writer) {
1772 isc__taskmgr_t *mgr = (isc__taskmgr_t *)mgr0;
1773 isc__task_t *task = NULL;
1779 * Write out the thread-model, and some details about each depending
1780 * on which type is enabled.
1782 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "thread-model"));
1783 #ifdef ISC_PLATFORM_USETHREADS
1784 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "type"));
1785 TRY0(xmlTextWriterWriteString(writer, ISC_XMLCHAR "threaded"));
1786 TRY0(xmlTextWriterEndElement(writer)); /* type */
1788 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "worker-threads"));
1789 TRY0(xmlTextWriterWriteFormatString(writer, "%d", mgr->workers));
1790 TRY0(xmlTextWriterEndElement(writer)); /* worker-threads */
1791 #else /* ISC_PLATFORM_USETHREADS */
1792 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "type"));
1793 TRY0(xmlTextWriterWriteString(writer, ISC_XMLCHAR "non-threaded"));
1794 TRY0(xmlTextWriterEndElement(writer)); /* type */
1796 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "references"));
1797 TRY0(xmlTextWriterWriteFormatString(writer, "%d", mgr->refs));
1798 TRY0(xmlTextWriterEndElement(writer)); /* references */
1799 #endif /* ISC_PLATFORM_USETHREADS */
1801 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "default-quantum"));
1802 TRY0(xmlTextWriterWriteFormatString(writer, "%d",
1803 mgr->default_quantum));
1804 TRY0(xmlTextWriterEndElement(writer)); /* default-quantum */
1806 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks-running"));
1807 TRY0(xmlTextWriterWriteFormatString(writer, "%d", mgr->tasks_running));
1808 TRY0(xmlTextWriterEndElement(writer)); /* tasks-running */
1810 TRY0(xmlTextWriterEndElement(writer)); /* thread-model */
1812 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks"));
1813 task = ISC_LIST_HEAD(mgr->tasks);
1814 while (task != NULL) {
1816 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "task"));
1818 if (task->name[0] != 0) {
1819 TRY0(xmlTextWriterStartElement(writer,
1820 ISC_XMLCHAR "name"));
1821 TRY0(xmlTextWriterWriteFormatString(writer, "%s",
1823 TRY0(xmlTextWriterEndElement(writer)); /* name */
1826 TRY0(xmlTextWriterStartElement(writer,
1827 ISC_XMLCHAR "references"));
1828 TRY0(xmlTextWriterWriteFormatString(writer, "%d",
1830 TRY0(xmlTextWriterEndElement(writer)); /* references */
1832 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "id"));
1833 TRY0(xmlTextWriterWriteFormatString(writer, "%p", task));
1834 TRY0(xmlTextWriterEndElement(writer)); /* id */
1836 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "state"));
1837 TRY0(xmlTextWriterWriteFormatString(writer, "%s",
1838 statenames[task->state]));
1839 TRY0(xmlTextWriterEndElement(writer)); /* state */
1841 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "quantum"));
1842 TRY0(xmlTextWriterWriteFormatString(writer, "%d",
1844 TRY0(xmlTextWriterEndElement(writer)); /* quantum */
1846 TRY0(xmlTextWriterEndElement(writer));
1848 UNLOCK(&task->lock);
1849 task = ISC_LIST_NEXT(task, link);
1851 TRY0(xmlTextWriterEndElement(writer)); /* tasks */
1855 UNLOCK(&task->lock);
1860 #endif /* HAVE_LIBXML2 && BIND9 */