2 * Copyright (C) 2004-2012, 2014, 2015 Internet Systems Consortium, Inc. ("ISC")
3 * Copyright (C) 1998-2003 Internet Software Consortium.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
10 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
11 * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
12 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
13 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
14 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
15 * PERFORMANCE OF THIS SOFTWARE.
19 * \author Principal Author: Bob Halley
23 * XXXRTH Need to document the states a task can be in, and the rules
24 * for changing states.
29 #include <isc/condition.h>
30 #include <isc/event.h>
31 #include <isc/magic.h>
34 #include <isc/platform.h>
35 #include <isc/print.h>
36 #include <isc/string.h>
38 #include <isc/thread.h>
43 #include <openssl/err.h>
47 * For BIND9 internal applications:
48 * when built with threads we use multiple worker threads shared by the whole
50 * when built without threads we share a single global task manager and use
51 * an integrated event loop for socket, timer, and other generic task events.
52 * For generic library:
53 * we don't use either of them: an application can have multiple task managers
54 * whether or not it's threaded, and if the application is threaded each thread
55 * is expected to have a separate manager; no "worker threads" are shared by
56 * the application threads.
59 #ifdef ISC_PLATFORM_USETHREADS
60 #define USE_WORKER_THREADS
62 #define USE_SHARED_MANAGER
63 #endif /* ISC_PLATFORM_USETHREADS */
69 #define XTRACE(m) fprintf(stderr, "task %p thread %lu: %s\n", \
70 task, isc_thread_self(), (m))
71 #define XTTRACE(t, m) fprintf(stderr, "task %p thread %lu: %s\n", \
72 (t), isc_thread_self(), (m))
73 #define XTHREADTRACE(m) fprintf(stderr, "thread %lu: %s\n", \
74 isc_thread_self(), (m))
78 #define XTHREADTRACE(m)
86 task_state_idle, task_state_ready, task_state_running,
90 #if defined(HAVE_LIBXML2) && defined(BIND9)
91 static const char *statenames[] = {
92 "idle", "ready", "running", "done",
96 #define TASK_MAGIC ISC_MAGIC('T', 'A', 'S', 'K')
97 #define VALID_TASK(t) ISC_MAGIC_VALID(t, TASK_MAGIC)
99 typedef struct isc__task isc__task_t;
100 typedef struct isc__taskmgr isc__taskmgr_t;
105 isc__taskmgr_t * manager;
107 /* Locked by task lock. */
109 unsigned int references;
110 isc_eventlist_t events;
111 isc_eventlist_t on_shutdown;
112 unsigned int quantum;
117 /* Locked by task manager lock. */
118 LINK(isc__task_t) link;
119 LINK(isc__task_t) ready_link;
120 LINK(isc__task_t) ready_priority_link;
123 #define TASK_F_SHUTTINGDOWN 0x01
124 #define TASK_F_PRIVILEGED 0x02
126 #define TASK_SHUTTINGDOWN(t) (((t)->flags & TASK_F_SHUTTINGDOWN) \
129 #define TASK_MANAGER_MAGIC ISC_MAGIC('T', 'S', 'K', 'M')
130 #define VALID_MANAGER(m) ISC_MAGIC_VALID(m, TASK_MANAGER_MAGIC)
132 typedef ISC_LIST(isc__task_t) isc__tasklist_t;
134 struct isc__taskmgr {
136 isc_taskmgr_t common;
139 #ifdef ISC_PLATFORM_USETHREADS
140 unsigned int workers;
141 isc_thread_t * threads;
142 #endif /* ISC_PLATFORM_USETHREADS */
143 /* Locked by task manager lock. */
144 unsigned int default_quantum;
145 LIST(isc__task_t) tasks;
146 isc__tasklist_t ready_tasks;
147 isc__tasklist_t ready_priority_tasks;
148 isc_taskmgrmode_t mode;
149 #ifdef ISC_PLATFORM_USETHREADS
150 isc_condition_t work_available;
151 isc_condition_t exclusive_granted;
152 isc_condition_t paused;
153 #endif /* ISC_PLATFORM_USETHREADS */
154 unsigned int tasks_running;
155 isc_boolean_t pause_requested;
156 isc_boolean_t exclusive_requested;
157 isc_boolean_t exiting;
160 * Multiple threads can read/write 'excl' at the same time, so we need
161 * to protect the access. We can't use 'lock' since isc_task_detach()
162 * will try to acquire it.
164 isc_mutex_t excl_lock;
166 #ifdef USE_SHARED_MANAGER
168 #endif /* ISC_PLATFORM_USETHREADS */
171 #define DEFAULT_TASKMGR_QUANTUM 10
172 #define DEFAULT_DEFAULT_QUANTUM 5
173 #define FINISHED(m) ((m)->exiting && EMPTY((m)->tasks))
175 #ifdef USE_SHARED_MANAGER
176 static isc__taskmgr_t *taskmgr = NULL;
177 #endif /* USE_SHARED_MANAGER */
180 * The following can be either static or public, depending on build environment.
184 #define ISC_TASKFUNC_SCOPE
186 #define ISC_TASKFUNC_SCOPE static
189 ISC_TASKFUNC_SCOPE isc_result_t
190 isc__task_create(isc_taskmgr_t *manager0, unsigned int quantum,
192 ISC_TASKFUNC_SCOPE void
193 isc__task_attach(isc_task_t *source0, isc_task_t **targetp);
194 ISC_TASKFUNC_SCOPE void
195 isc__task_detach(isc_task_t **taskp);
196 ISC_TASKFUNC_SCOPE void
197 isc__task_send(isc_task_t *task0, isc_event_t **eventp);
198 ISC_TASKFUNC_SCOPE void
199 isc__task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp);
200 ISC_TASKFUNC_SCOPE unsigned int
201 isc__task_purgerange(isc_task_t *task0, void *sender, isc_eventtype_t first,
202 isc_eventtype_t last, void *tag);
203 ISC_TASKFUNC_SCOPE unsigned int
204 isc__task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
206 ISC_TASKFUNC_SCOPE isc_boolean_t
207 isc__task_purgeevent(isc_task_t *task0, isc_event_t *event);
208 ISC_TASKFUNC_SCOPE unsigned int
209 isc__task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
210 isc_eventtype_t last, void *tag,
211 isc_eventlist_t *events);
212 ISC_TASKFUNC_SCOPE unsigned int
213 isc__task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
214 void *tag, isc_eventlist_t *events);
215 ISC_TASKFUNC_SCOPE isc_result_t
216 isc__task_onshutdown(isc_task_t *task0, isc_taskaction_t action,
218 ISC_TASKFUNC_SCOPE void
219 isc__task_shutdown(isc_task_t *task0);
220 ISC_TASKFUNC_SCOPE void
221 isc__task_destroy(isc_task_t **taskp);
222 ISC_TASKFUNC_SCOPE void
223 isc__task_setname(isc_task_t *task0, const char *name, void *tag);
224 ISC_TASKFUNC_SCOPE const char *
225 isc__task_getname(isc_task_t *task0);
226 ISC_TASKFUNC_SCOPE void *
227 isc__task_gettag(isc_task_t *task0);
228 ISC_TASKFUNC_SCOPE void
229 isc__task_getcurrenttime(isc_task_t *task0, isc_stdtime_t *t);
230 ISC_TASKFUNC_SCOPE isc_result_t
231 isc__taskmgr_create(isc_mem_t *mctx, unsigned int workers,
232 unsigned int default_quantum, isc_taskmgr_t **managerp);
233 ISC_TASKFUNC_SCOPE void
234 isc__taskmgr_destroy(isc_taskmgr_t **managerp);
235 ISC_TASKFUNC_SCOPE void
236 isc__taskmgr_setexcltask(isc_taskmgr_t *mgr0, isc_task_t *task0);
237 ISC_TASKFUNC_SCOPE isc_result_t
238 isc__taskmgr_excltask(isc_taskmgr_t *mgr0, isc_task_t **taskp);
239 ISC_TASKFUNC_SCOPE isc_result_t
240 isc__task_beginexclusive(isc_task_t *task);
241 ISC_TASKFUNC_SCOPE void
242 isc__task_endexclusive(isc_task_t *task0);
243 ISC_TASKFUNC_SCOPE void
244 isc__task_setprivilege(isc_task_t *task0, isc_boolean_t priv);
245 ISC_TASKFUNC_SCOPE isc_boolean_t
246 isc__task_privilege(isc_task_t *task0);
247 ISC_TASKFUNC_SCOPE void
248 isc__taskmgr_setmode(isc_taskmgr_t *manager0, isc_taskmgrmode_t mode);
249 ISC_TASKFUNC_SCOPE isc_taskmgrmode_t
250 isc__taskmgr_mode(isc_taskmgr_t *manager0);
252 static inline isc_boolean_t
253 empty_readyq(isc__taskmgr_t *manager);
255 static inline isc__task_t *
256 pop_readyq(isc__taskmgr_t *manager);
259 push_readyq(isc__taskmgr_t *manager, isc__task_t *task);
261 static struct isc__taskmethods {
262 isc_taskmethods_t methods;
265 * The following are defined just for avoiding unused static functions.
268 void *purgeevent, *unsendrange, *getname, *gettag, *getcurrenttime;
276 isc__task_sendanddetach,
278 isc__task_onshutdown,
282 isc__task_purgerange,
283 isc__task_beginexclusive,
284 isc__task_endexclusive,
285 isc__task_setprivilege,
290 (void *)isc__task_purgeevent, (void *)isc__task_unsendrange,
291 (void *)isc__task_getname, (void *)isc__task_gettag,
292 (void *)isc__task_getcurrenttime
296 static isc_taskmgrmethods_t taskmgrmethods = {
297 isc__taskmgr_destroy,
298 isc__taskmgr_setmode,
301 isc__taskmgr_setexcltask,
302 isc__taskmgr_excltask
310 task_finished(isc__task_t *task) {
311 isc__taskmgr_t *manager = task->manager;
313 REQUIRE(EMPTY(task->events));
314 REQUIRE(EMPTY(task->on_shutdown));
315 REQUIRE(task->references == 0);
316 REQUIRE(task->state == task_state_done);
318 XTRACE("task_finished");
320 LOCK(&manager->lock);
321 UNLINK(manager->tasks, task, link);
322 #ifdef USE_WORKER_THREADS
323 if (FINISHED(manager)) {
325 * All tasks have completed and the
326 * task manager is exiting. Wake up
327 * any idle worker threads so they
330 BROADCAST(&manager->work_available);
332 #endif /* USE_WORKER_THREADS */
333 UNLOCK(&manager->lock);
335 DESTROYLOCK(&task->lock);
336 task->common.impmagic = 0;
337 task->common.magic = 0;
338 isc_mem_put(manager->mctx, task, sizeof(*task));
341 ISC_TASKFUNC_SCOPE isc_result_t
342 isc__task_create(isc_taskmgr_t *manager0, unsigned int quantum,
345 isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
347 isc_boolean_t exiting;
350 REQUIRE(VALID_MANAGER(manager));
351 REQUIRE(taskp != NULL && *taskp == NULL);
353 task = isc_mem_get(manager->mctx, sizeof(*task));
355 return (ISC_R_NOMEMORY);
356 XTRACE("isc_task_create");
357 task->manager = manager;
358 result = isc_mutex_init(&task->lock);
359 if (result != ISC_R_SUCCESS) {
360 isc_mem_put(manager->mctx, task, sizeof(*task));
363 task->state = task_state_idle;
364 task->references = 1;
365 INIT_LIST(task->events);
366 INIT_LIST(task->on_shutdown);
367 task->quantum = quantum;
370 memset(task->name, 0, sizeof(task->name));
372 INIT_LINK(task, link);
373 INIT_LINK(task, ready_link);
374 INIT_LINK(task, ready_priority_link);
377 LOCK(&manager->lock);
378 if (!manager->exiting) {
379 if (task->quantum == 0)
380 task->quantum = manager->default_quantum;
381 APPEND(manager->tasks, task, link);
384 UNLOCK(&manager->lock);
387 DESTROYLOCK(&task->lock);
388 isc_mem_put(manager->mctx, task, sizeof(*task));
389 return (ISC_R_SHUTTINGDOWN);
392 task->common.methods = (isc_taskmethods_t *)&taskmethods;
393 task->common.magic = ISCAPI_TASK_MAGIC;
394 task->common.impmagic = TASK_MAGIC;
395 *taskp = (isc_task_t *)task;
397 return (ISC_R_SUCCESS);
400 ISC_TASKFUNC_SCOPE void
401 isc__task_attach(isc_task_t *source0, isc_task_t **targetp) {
402 isc__task_t *source = (isc__task_t *)source0;
405 * Attach *targetp to source.
408 REQUIRE(VALID_TASK(source));
409 REQUIRE(targetp != NULL && *targetp == NULL);
411 XTTRACE(source, "isc_task_attach");
414 source->references++;
415 UNLOCK(&source->lock);
417 *targetp = (isc_task_t *)source;
420 static inline isc_boolean_t
421 task_shutdown(isc__task_t *task) {
422 isc_boolean_t was_idle = ISC_FALSE;
423 isc_event_t *event, *prev;
426 * Caller must be holding the task's lock.
429 XTRACE("task_shutdown");
431 if (! TASK_SHUTTINGDOWN(task)) {
432 XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
433 ISC_MSG_SHUTTINGDOWN, "shutting down"));
434 task->flags |= TASK_F_SHUTTINGDOWN;
435 if (task->state == task_state_idle) {
436 INSIST(EMPTY(task->events));
437 task->state = task_state_ready;
440 INSIST(task->state == task_state_ready ||
441 task->state == task_state_running);
444 * Note that we post shutdown events LIFO.
446 for (event = TAIL(task->on_shutdown);
449 prev = PREV(event, ev_link);
450 DEQUEUE(task->on_shutdown, event, ev_link);
451 ENQUEUE(task->events, event, ev_link);
459 * Moves a task onto the appropriate run queue.
461 * Caller must NOT hold manager lock.
464 task_ready(isc__task_t *task) {
465 isc__taskmgr_t *manager = task->manager;
466 #ifdef USE_WORKER_THREADS
467 isc_boolean_t has_privilege = isc__task_privilege((isc_task_t *) task);
468 #endif /* USE_WORKER_THREADS */
470 REQUIRE(VALID_MANAGER(manager));
471 REQUIRE(task->state == task_state_ready);
473 XTRACE("task_ready");
475 LOCK(&manager->lock);
476 push_readyq(manager, task);
477 #ifdef USE_WORKER_THREADS
478 if (manager->mode == isc_taskmgrmode_normal || has_privilege)
479 SIGNAL(&manager->work_available);
480 #endif /* USE_WORKER_THREADS */
481 UNLOCK(&manager->lock);
484 static inline isc_boolean_t
485 task_detach(isc__task_t *task) {
488 * Caller must be holding the task lock.
491 REQUIRE(task->references > 0);
496 if (task->references == 0 && task->state == task_state_idle) {
497 INSIST(EMPTY(task->events));
499 * There are no references to this task, and no
500 * pending events. We could try to optimize and
501 * either initiate shutdown or clean up the task,
502 * depending on its state, but it's easier to just
503 * make the task ready and allow run() or the event
504 * loop to deal with shutting down and termination.
506 task->state = task_state_ready;
513 ISC_TASKFUNC_SCOPE void
514 isc__task_detach(isc_task_t **taskp) {
516 isc_boolean_t was_idle;
519 * Detach *taskp from its task.
522 REQUIRE(taskp != NULL);
523 task = (isc__task_t *)*taskp;
524 REQUIRE(VALID_TASK(task));
526 XTRACE("isc_task_detach");
529 was_idle = task_detach(task);
538 static inline isc_boolean_t
539 task_send(isc__task_t *task, isc_event_t **eventp) {
540 isc_boolean_t was_idle = ISC_FALSE;
544 * Caller must be holding the task lock.
547 REQUIRE(eventp != NULL);
549 REQUIRE(event != NULL);
550 REQUIRE(event->ev_type > 0);
551 REQUIRE(task->state != task_state_done);
555 if (task->state == task_state_idle) {
557 INSIST(EMPTY(task->events));
558 task->state = task_state_ready;
560 INSIST(task->state == task_state_ready ||
561 task->state == task_state_running);
562 ENQUEUE(task->events, event, ev_link);
568 ISC_TASKFUNC_SCOPE void
569 isc__task_send(isc_task_t *task0, isc_event_t **eventp) {
570 isc__task_t *task = (isc__task_t *)task0;
571 isc_boolean_t was_idle;
574 * Send '*event' to 'task'.
577 REQUIRE(VALID_TASK(task));
579 XTRACE("isc_task_send");
582 * We're trying hard to hold locks for as short a time as possible.
583 * We're also trying to hold as few locks as possible. This is why
584 * some processing is deferred until after the lock is released.
587 was_idle = task_send(task, eventp);
592 * We need to add this task to the ready queue.
594 * We've waited until now to do it because making a task
595 * ready requires locking the manager. If we tried to do
596 * this while holding the task lock, we could deadlock.
598 * We've changed the state to ready, so no one else will
599 * be trying to add this task to the ready queue. The
600 * only way to leave the ready state is by executing the
601 * task. It thus doesn't matter if events are added,
602 * removed, or a shutdown is started in the interval
603 * between the time we released the task lock, and the time
604 * we add the task to the ready queue.
610 ISC_TASKFUNC_SCOPE void
611 isc__task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp) {
612 isc_boolean_t idle1, idle2;
616 * Send '*event' to '*taskp' and then detach '*taskp' from its
620 REQUIRE(taskp != NULL);
621 task = (isc__task_t *)*taskp;
622 REQUIRE(VALID_TASK(task));
624 XTRACE("isc_task_sendanddetach");
627 idle1 = task_send(task, eventp);
628 idle2 = task_detach(task);
632 * If idle1, then idle2 shouldn't be true as well since we're holding
633 * the task lock, and thus the task cannot switch from ready back to
636 INSIST(!(idle1 && idle2));
644 #define PURGE_OK(event) (((event)->ev_attributes & ISC_EVENTATTR_NOPURGE) == 0)
647 dequeue_events(isc__task_t *task, void *sender, isc_eventtype_t first,
648 isc_eventtype_t last, void *tag,
649 isc_eventlist_t *events, isc_boolean_t purging)
651 isc_event_t *event, *next_event;
652 unsigned int count = 0;
654 REQUIRE(VALID_TASK(task));
655 REQUIRE(last >= first);
657 XTRACE("dequeue_events");
660 * Events matching 'sender', whose type is >= first and <= last, and
661 * whose tag is 'tag' will be dequeued. If 'purging', matching events
662 * which are marked as unpurgable will not be dequeued.
664 * sender == NULL means "any sender", and tag == NULL means "any tag".
669 for (event = HEAD(task->events); event != NULL; event = next_event) {
670 next_event = NEXT(event, ev_link);
671 if (event->ev_type >= first && event->ev_type <= last &&
672 (sender == NULL || event->ev_sender == sender) &&
673 (tag == NULL || event->ev_tag == tag) &&
674 (!purging || PURGE_OK(event))) {
675 DEQUEUE(task->events, event, ev_link);
676 ENQUEUE(*events, event, ev_link);
686 ISC_TASKFUNC_SCOPE unsigned int
687 isc__task_purgerange(isc_task_t *task0, void *sender, isc_eventtype_t first,
688 isc_eventtype_t last, void *tag)
690 isc__task_t *task = (isc__task_t *)task0;
692 isc_eventlist_t events;
693 isc_event_t *event, *next_event;
696 * Purge events from a task's event queue.
699 XTRACE("isc_task_purgerange");
701 ISC_LIST_INIT(events);
703 count = dequeue_events(task, sender, first, last, tag, &events,
706 for (event = HEAD(events); event != NULL; event = next_event) {
707 next_event = NEXT(event, ev_link);
708 isc_event_free(&event);
712 * Note that purging never changes the state of the task.
718 ISC_TASKFUNC_SCOPE unsigned int
719 isc__task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
723 * Purge events from a task's event queue.
726 XTRACE("isc_task_purge");
728 return (isc__task_purgerange(task, sender, type, type, tag));
731 ISC_TASKFUNC_SCOPE isc_boolean_t
732 isc__task_purgeevent(isc_task_t *task0, isc_event_t *event) {
733 isc__task_t *task = (isc__task_t *)task0;
734 isc_event_t *curr_event, *next_event;
737 * Purge 'event' from a task's event queue.
739 * XXXRTH: WARNING: This method may be removed before beta.
742 REQUIRE(VALID_TASK(task));
745 * If 'event' is on the task's event queue, it will be purged,
746 * unless it is marked as unpurgeable. 'event' does not have to be
747 * on the task's event queue; in fact, it can even be an invalid
748 * pointer. Purging only occurs if the event is actually on the task's
751 * Purging never changes the state of the task.
755 for (curr_event = HEAD(task->events);
757 curr_event = next_event) {
758 next_event = NEXT(curr_event, ev_link);
759 if (curr_event == event && PURGE_OK(event)) {
760 DEQUEUE(task->events, curr_event, ev_link);
766 if (curr_event == NULL)
769 isc_event_free(&curr_event);
774 ISC_TASKFUNC_SCOPE unsigned int
775 isc__task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
776 isc_eventtype_t last, void *tag,
777 isc_eventlist_t *events)
780 * Remove events from a task's event queue.
783 XTRACE("isc_task_unsendrange");
785 return (dequeue_events((isc__task_t *)task, sender, first,
786 last, tag, events, ISC_FALSE));
789 ISC_TASKFUNC_SCOPE unsigned int
790 isc__task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
791 void *tag, isc_eventlist_t *events)
794 * Remove events from a task's event queue.
797 XTRACE("isc_task_unsend");
799 return (dequeue_events((isc__task_t *)task, sender, type,
800 type, tag, events, ISC_FALSE));
803 ISC_TASKFUNC_SCOPE isc_result_t
804 isc__task_onshutdown(isc_task_t *task0, isc_taskaction_t action,
807 isc__task_t *task = (isc__task_t *)task0;
808 isc_boolean_t disallowed = ISC_FALSE;
809 isc_result_t result = ISC_R_SUCCESS;
813 * Send a shutdown event with action 'action' and argument 'arg' when
814 * 'task' is shutdown.
817 REQUIRE(VALID_TASK(task));
818 REQUIRE(action != NULL);
820 event = isc_event_allocate(task->manager->mctx,
822 ISC_TASKEVENT_SHUTDOWN,
827 return (ISC_R_NOMEMORY);
830 if (TASK_SHUTTINGDOWN(task)) {
831 disallowed = ISC_TRUE;
832 result = ISC_R_SHUTTINGDOWN;
834 ENQUEUE(task->on_shutdown, event, ev_link);
838 isc_mem_put(task->manager->mctx, event, sizeof(*event));
843 ISC_TASKFUNC_SCOPE void
844 isc__task_shutdown(isc_task_t *task0) {
845 isc__task_t *task = (isc__task_t *)task0;
846 isc_boolean_t was_idle;
852 REQUIRE(VALID_TASK(task));
855 was_idle = task_shutdown(task);
862 ISC_TASKFUNC_SCOPE void
863 isc__task_destroy(isc_task_t **taskp) {
869 REQUIRE(taskp != NULL);
871 isc_task_shutdown(*taskp);
872 isc_task_detach(taskp);
875 ISC_TASKFUNC_SCOPE void
876 isc__task_setname(isc_task_t *task0, const char *name, void *tag) {
877 isc__task_t *task = (isc__task_t *)task0;
883 REQUIRE(VALID_TASK(task));
886 memset(task->name, 0, sizeof(task->name));
887 strncpy(task->name, name, sizeof(task->name) - 1);
892 ISC_TASKFUNC_SCOPE const char *
893 isc__task_getname(isc_task_t *task0) {
894 isc__task_t *task = (isc__task_t *)task0;
896 REQUIRE(VALID_TASK(task));
901 ISC_TASKFUNC_SCOPE void *
902 isc__task_gettag(isc_task_t *task0) {
903 isc__task_t *task = (isc__task_t *)task0;
905 REQUIRE(VALID_TASK(task));
910 ISC_TASKFUNC_SCOPE void
911 isc__task_getcurrenttime(isc_task_t *task0, isc_stdtime_t *t) {
912 isc__task_t *task = (isc__task_t *)task0;
914 REQUIRE(VALID_TASK(task));
927 * Return ISC_TRUE if the current ready list for the manager, which is
928 * either ready_tasks or the ready_priority_tasks, depending on whether
929 * the manager is currently in normal or privileged execution mode.
931 * Caller must hold the task manager lock.
933 static inline isc_boolean_t
934 empty_readyq(isc__taskmgr_t *manager) {
935 isc__tasklist_t queue;
937 if (manager->mode == isc_taskmgrmode_normal)
938 queue = manager->ready_tasks;
940 queue = manager->ready_priority_tasks;
942 return (ISC_TF(EMPTY(queue)));
946 * Dequeue and return a pointer to the first task on the current ready
947 * list for the manager.
948 * If the task is privileged, dequeue it from the other ready list
951 * Caller must hold the task manager lock.
953 static inline isc__task_t *
954 pop_readyq(isc__taskmgr_t *manager) {
957 if (manager->mode == isc_taskmgrmode_normal)
958 task = HEAD(manager->ready_tasks);
960 task = HEAD(manager->ready_priority_tasks);
963 DEQUEUE(manager->ready_tasks, task, ready_link);
964 if (ISC_LINK_LINKED(task, ready_priority_link))
965 DEQUEUE(manager->ready_priority_tasks, task,
966 ready_priority_link);
973 * Push 'task' onto the ready_tasks queue. If 'task' has the privilege
974 * flag set, then also push it onto the ready_priority_tasks queue.
976 * Caller must hold the task manager lock.
979 push_readyq(isc__taskmgr_t *manager, isc__task_t *task) {
980 ENQUEUE(manager->ready_tasks, task, ready_link);
981 if ((task->flags & TASK_F_PRIVILEGED) != 0)
982 ENQUEUE(manager->ready_priority_tasks, task,
983 ready_priority_link);
987 dispatch(isc__taskmgr_t *manager) {
989 #ifndef USE_WORKER_THREADS
990 unsigned int total_dispatch_count = 0;
991 isc__tasklist_t new_ready_tasks;
992 isc__tasklist_t new_priority_tasks;
993 #endif /* USE_WORKER_THREADS */
995 REQUIRE(VALID_MANAGER(manager));
998 * Again we're trying to hold the lock for as short a time as possible
999 * and to do as little locking and unlocking as possible.
1001 * In both while loops, the appropriate lock must be held before the
1002 * while body starts. Code which acquired the lock at the top of
1003 * the loop would be more readable, but would result in a lot of
1004 * extra locking. Compare:
1011 * while (expression) {
1016 * Unlocked part here...
1023 * Note how if the loop continues we unlock and then immediately lock.
1024 * For N iterations of the loop, this code does 2N+1 locks and 2N+1
1025 * unlocks. Also note that the lock is not held when the while
1026 * condition is tested, which may or may not be important, depending
1027 * on the expression.
1032 * while (expression) {
1036 * Unlocked part here...
1043 * For N iterations of the loop, this code does N+1 locks and N+1
1044 * unlocks. The while expression is always protected by the lock.
1047 #ifndef USE_WORKER_THREADS
1048 ISC_LIST_INIT(new_ready_tasks);
1049 ISC_LIST_INIT(new_priority_tasks);
1051 LOCK(&manager->lock);
1053 while (!FINISHED(manager)) {
1054 #ifdef USE_WORKER_THREADS
1056 * For reasons similar to those given in the comment in
1057 * isc_task_send() above, it is safe for us to dequeue
1058 * the task while only holding the manager lock, and then
1059 * change the task to running state while only holding the
1062 * If a pause has been requested, don't do any work
1063 * until it's been released.
1065 while ((empty_readyq(manager) || manager->pause_requested ||
1066 manager->exclusive_requested) && !FINISHED(manager))
1068 XTHREADTRACE(isc_msgcat_get(isc_msgcat,
1070 ISC_MSG_WAIT, "wait"));
1071 WAIT(&manager->work_available, &manager->lock);
1072 XTHREADTRACE(isc_msgcat_get(isc_msgcat,
1074 ISC_MSG_AWAKE, "awake"));
1076 #else /* USE_WORKER_THREADS */
1077 if (total_dispatch_count >= DEFAULT_TASKMGR_QUANTUM ||
1078 empty_readyq(manager))
1080 #endif /* USE_WORKER_THREADS */
1081 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK,
1082 ISC_MSG_WORKING, "working"));
1084 task = pop_readyq(manager);
1086 unsigned int dispatch_count = 0;
1087 isc_boolean_t done = ISC_FALSE;
1088 isc_boolean_t requeue = ISC_FALSE;
1089 isc_boolean_t finished = ISC_FALSE;
1092 INSIST(VALID_TASK(task));
1095 * Note we only unlock the manager lock if we actually
1096 * have a task to do. We must reacquire the manager
1097 * lock before exiting the 'if (task != NULL)' block.
1099 manager->tasks_running++;
1100 UNLOCK(&manager->lock);
1103 INSIST(task->state == task_state_ready);
1104 task->state = task_state_running;
1105 XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1106 ISC_MSG_RUNNING, "running"));
1107 isc_stdtime_get(&task->now);
1109 if (!EMPTY(task->events)) {
1110 event = HEAD(task->events);
1111 DEQUEUE(task->events, event, ev_link);
1114 * Execute the event action.
1116 XTRACE(isc_msgcat_get(isc_msgcat,
1120 if (event->ev_action != NULL) {
1121 UNLOCK(&task->lock);
1128 #ifndef USE_WORKER_THREADS
1129 total_dispatch_count++;
1130 #endif /* USE_WORKER_THREADS */
1133 if (task->references == 0 &&
1134 EMPTY(task->events) &&
1135 !TASK_SHUTTINGDOWN(task)) {
1136 isc_boolean_t was_idle;
1139 * There are no references and no
1140 * pending events for this task,
1141 * which means it will not become
1142 * runnable again via an external
1143 * action (such as sending an event
1146 * We initiate shutdown to prevent
1147 * it from becoming a zombie.
1149 * We do this here instead of in
1150 * the "if EMPTY(task->events)" block
1153 * If we post no shutdown events,
1154 * we want the task to finish.
1156 * If we did post shutdown events,
1157 * will still want the task's
1158 * quantum to be applied.
1160 was_idle = task_shutdown(task);
1164 if (EMPTY(task->events)) {
1166 * Nothing else to do for this task
1169 XTRACE(isc_msgcat_get(isc_msgcat,
1173 if (task->references == 0 &&
1174 TASK_SHUTTINGDOWN(task)) {
1178 XTRACE(isc_msgcat_get(
1183 finished = ISC_TRUE;
1184 task->state = task_state_done;
1186 task->state = task_state_idle;
1188 } else if (dispatch_count >= task->quantum) {
1190 * Our quantum has expired, but
1191 * there is more work to be done.
1192 * We'll requeue it to the ready
1195 * We don't check quantum until
1196 * dispatching at least one event,
1197 * so the minimum quantum is one.
1199 XTRACE(isc_msgcat_get(isc_msgcat,
1203 task->state = task_state_ready;
1208 UNLOCK(&task->lock);
1211 task_finished(task);
1213 LOCK(&manager->lock);
1214 manager->tasks_running--;
1215 #ifdef USE_WORKER_THREADS
1216 if (manager->exclusive_requested &&
1217 manager->tasks_running == 1) {
1218 SIGNAL(&manager->exclusive_granted);
1219 } else if (manager->pause_requested &&
1220 manager->tasks_running == 0) {
1221 SIGNAL(&manager->paused);
1223 #endif /* USE_WORKER_THREADS */
1226 * We know we're awake, so we don't have
1227 * to wakeup any sleeping threads if the
1228 * ready queue is empty before we requeue.
1230 * A possible optimization if the queue is
1231 * empty is to 'goto' the 'if (task != NULL)'
1232 * block, avoiding the ENQUEUE of the task
1233 * and the subsequent immediate DEQUEUE
1234 * (since it is the only executable task).
1235 * We don't do this because then we'd be
1236 * skipping the exit_requested check. The
1237 * cost of ENQUEUE is low anyway, especially
1238 * when you consider that we'd have to do
1239 * an extra EMPTY check to see if we could
1240 * do the optimization. If the ready queue
1241 * were usually nonempty, the 'optimization'
1242 * might even hurt rather than help.
1244 #ifdef USE_WORKER_THREADS
1245 push_readyq(manager, task);
1247 ENQUEUE(new_ready_tasks, task, ready_link);
1248 if ((task->flags & TASK_F_PRIVILEGED) != 0)
1249 ENQUEUE(new_priority_tasks, task,
1250 ready_priority_link);
1255 #ifdef USE_WORKER_THREADS
1257 * If we are in privileged execution mode and there are no
1258 * tasks remaining on the current ready queue, then
1259 * we're stuck. Automatically drop privileges at that
1260 * point and continue with the regular ready queue.
1262 if (manager->tasks_running == 0 && empty_readyq(manager)) {
1263 manager->mode = isc_taskmgrmode_normal;
1264 if (!empty_readyq(manager))
1265 BROADCAST(&manager->work_available);
1270 #ifndef USE_WORKER_THREADS
1271 ISC_LIST_APPENDLIST(manager->ready_tasks, new_ready_tasks, ready_link);
1272 ISC_LIST_APPENDLIST(manager->ready_priority_tasks, new_priority_tasks,
1273 ready_priority_link);
1274 if (empty_readyq(manager))
1275 manager->mode = isc_taskmgrmode_normal;
1278 UNLOCK(&manager->lock);
1281 #ifdef USE_WORKER_THREADS
1282 static isc_threadresult_t
1287 isc__taskmgr_t *manager = uap;
1289 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1290 ISC_MSG_STARTING, "starting"));
1294 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1295 ISC_MSG_EXITING, "exiting"));
1297 #ifdef OPENSSL_LEAKS
1298 ERR_remove_state(0);
1301 return ((isc_threadresult_t)0);
1303 #endif /* USE_WORKER_THREADS */
1306 manager_free(isc__taskmgr_t *manager) {
1309 #ifdef USE_WORKER_THREADS
1310 (void)isc_condition_destroy(&manager->exclusive_granted);
1311 (void)isc_condition_destroy(&manager->work_available);
1312 (void)isc_condition_destroy(&manager->paused);
1313 isc_mem_free(manager->mctx, manager->threads);
1314 #endif /* USE_WORKER_THREADS */
1315 DESTROYLOCK(&manager->lock);
1316 DESTROYLOCK(&manager->excl_lock);
1317 manager->common.impmagic = 0;
1318 manager->common.magic = 0;
1319 mctx = manager->mctx;
1320 isc_mem_put(mctx, manager, sizeof(*manager));
1321 isc_mem_detach(&mctx);
1323 #ifdef USE_SHARED_MANAGER
1325 #endif /* USE_SHARED_MANAGER */
1328 ISC_TASKFUNC_SCOPE isc_result_t
1329 isc__taskmgr_create(isc_mem_t *mctx, unsigned int workers,
1330 unsigned int default_quantum, isc_taskmgr_t **managerp)
1332 isc_result_t result;
1333 unsigned int i, started = 0;
1334 isc__taskmgr_t *manager;
1337 * Create a new task manager.
1340 REQUIRE(workers > 0);
1341 REQUIRE(managerp != NULL && *managerp == NULL);
1343 #ifndef USE_WORKER_THREADS
1348 #ifdef USE_SHARED_MANAGER
1349 if (taskmgr != NULL) {
1350 if (taskmgr->refs == 0)
1351 return (ISC_R_SHUTTINGDOWN);
1353 *managerp = (isc_taskmgr_t *)taskmgr;
1354 return (ISC_R_SUCCESS);
1356 #endif /* USE_SHARED_MANAGER */
1358 manager = isc_mem_get(mctx, sizeof(*manager));
1359 if (manager == NULL)
1360 return (ISC_R_NOMEMORY);
1361 manager->common.methods = &taskmgrmethods;
1362 manager->common.impmagic = TASK_MANAGER_MAGIC;
1363 manager->common.magic = ISCAPI_TASKMGR_MAGIC;
1364 manager->mode = isc_taskmgrmode_normal;
1365 manager->mctx = NULL;
1366 result = isc_mutex_init(&manager->lock);
1367 if (result != ISC_R_SUCCESS)
1369 result = isc_mutex_init(&manager->excl_lock);
1370 if (result != ISC_R_SUCCESS) {
1371 DESTROYLOCK(&manager->lock);
1375 #ifdef USE_WORKER_THREADS
1376 manager->workers = 0;
1377 manager->threads = isc_mem_allocate(mctx,
1378 workers * sizeof(isc_thread_t));
1379 if (manager->threads == NULL) {
1380 result = ISC_R_NOMEMORY;
1383 if (isc_condition_init(&manager->work_available) != ISC_R_SUCCESS) {
1384 UNEXPECTED_ERROR(__FILE__, __LINE__,
1385 "isc_condition_init() %s",
1386 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1387 ISC_MSG_FAILED, "failed"));
1388 result = ISC_R_UNEXPECTED;
1389 goto cleanup_threads;
1391 if (isc_condition_init(&manager->exclusive_granted) != ISC_R_SUCCESS) {
1392 UNEXPECTED_ERROR(__FILE__, __LINE__,
1393 "isc_condition_init() %s",
1394 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1395 ISC_MSG_FAILED, "failed"));
1396 result = ISC_R_UNEXPECTED;
1397 goto cleanup_workavailable;
1399 if (isc_condition_init(&manager->paused) != ISC_R_SUCCESS) {
1400 UNEXPECTED_ERROR(__FILE__, __LINE__,
1401 "isc_condition_init() %s",
1402 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1403 ISC_MSG_FAILED, "failed"));
1404 result = ISC_R_UNEXPECTED;
1405 goto cleanup_exclusivegranted;
1407 #endif /* USE_WORKER_THREADS */
1408 if (default_quantum == 0)
1409 default_quantum = DEFAULT_DEFAULT_QUANTUM;
1410 manager->default_quantum = default_quantum;
1411 INIT_LIST(manager->tasks);
1412 INIT_LIST(manager->ready_tasks);
1413 INIT_LIST(manager->ready_priority_tasks);
1414 manager->tasks_running = 0;
1415 manager->exclusive_requested = ISC_FALSE;
1416 manager->pause_requested = ISC_FALSE;
1417 manager->exiting = ISC_FALSE;
1418 manager->excl = NULL;
1420 isc_mem_attach(mctx, &manager->mctx);
1422 #ifdef USE_WORKER_THREADS
1423 LOCK(&manager->lock);
1427 for (i = 0; i < workers; i++) {
1428 if (isc_thread_create(run, manager,
1429 &manager->threads[manager->workers]) ==
1435 UNLOCK(&manager->lock);
1438 manager_free(manager);
1439 return (ISC_R_NOTHREADS);
1441 isc_thread_setconcurrency(workers);
1442 #endif /* USE_WORKER_THREADS */
1443 #ifdef USE_SHARED_MANAGER
1446 #endif /* USE_SHARED_MANAGER */
1448 *managerp = (isc_taskmgr_t *)manager;
1450 return (ISC_R_SUCCESS);
1452 #ifdef USE_WORKER_THREADS
1453 cleanup_exclusivegranted:
1454 (void)isc_condition_destroy(&manager->exclusive_granted);
1455 cleanup_workavailable:
1456 (void)isc_condition_destroy(&manager->work_available);
1458 isc_mem_free(mctx, manager->threads);
1460 DESTROYLOCK(&manager->lock);
1463 isc_mem_put(mctx, manager, sizeof(*manager));
1467 ISC_TASKFUNC_SCOPE void
1468 isc__taskmgr_destroy(isc_taskmgr_t **managerp) {
1469 isc__taskmgr_t *manager;
1474 * Destroy '*managerp'.
1477 REQUIRE(managerp != NULL);
1478 manager = (isc__taskmgr_t *)*managerp;
1479 REQUIRE(VALID_MANAGER(manager));
1481 #ifndef USE_WORKER_THREADS
1483 #endif /* USE_WORKER_THREADS */
1485 #ifdef USE_SHARED_MANAGER
1487 if (manager->refs > 0) {
1493 XTHREADTRACE("isc_taskmgr_destroy");
1495 * Only one non-worker thread may ever call this routine.
1496 * If a worker thread wants to initiate shutdown of the
1497 * task manager, it should ask some non-worker thread to call
1498 * isc_taskmgr_destroy(), e.g. by signalling a condition variable
1499 * that the startup thread is sleeping on.
1503 * Detach the exclusive task before acquiring the manager lock
1505 LOCK(&manager->excl_lock);
1506 if (manager->excl != NULL)
1507 isc__task_detach((isc_task_t **) &manager->excl);
1508 UNLOCK(&manager->excl_lock);
1511 * Unlike elsewhere, we're going to hold this lock a long time.
1512 * We need to do so, because otherwise the list of tasks could
1513 * change while we were traversing it.
1515 * This is also the only function where we will hold both the
1516 * task manager lock and a task lock at the same time.
1519 LOCK(&manager->lock);
1522 * Make sure we only get called once.
1524 INSIST(!manager->exiting);
1525 manager->exiting = ISC_TRUE;
1528 * If privileged mode was on, turn it off.
1530 manager->mode = isc_taskmgrmode_normal;
1533 * Post shutdown event(s) to every task (if they haven't already been
1536 for (task = HEAD(manager->tasks);
1538 task = NEXT(task, link)) {
1540 if (task_shutdown(task))
1541 push_readyq(manager, task);
1542 UNLOCK(&task->lock);
1544 #ifdef USE_WORKER_THREADS
1546 * Wake up any sleeping workers. This ensures we get work done if
1547 * there's work left to do, and if there are already no tasks left
1548 * it will cause the workers to see manager->exiting.
1550 BROADCAST(&manager->work_available);
1551 UNLOCK(&manager->lock);
1554 * Wait for all the worker threads to exit.
1556 for (i = 0; i < manager->workers; i++)
1557 (void)isc_thread_join(manager->threads[i], NULL);
1558 #else /* USE_WORKER_THREADS */
1560 * Dispatch the shutdown events.
1562 UNLOCK(&manager->lock);
1563 while (isc__taskmgr_ready((isc_taskmgr_t *)manager))
1564 (void)isc__taskmgr_dispatch((isc_taskmgr_t *)manager);
1566 if (!ISC_LIST_EMPTY(manager->tasks))
1567 isc_mem_printallactive(stderr);
1569 INSIST(ISC_LIST_EMPTY(manager->tasks));
1570 #ifdef USE_SHARED_MANAGER
1573 #endif /* USE_WORKER_THREADS */
1575 manager_free(manager);
1580 ISC_TASKFUNC_SCOPE void
1581 isc__taskmgr_setmode(isc_taskmgr_t *manager0, isc_taskmgrmode_t mode) {
1582 isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1584 LOCK(&manager->lock);
1585 manager->mode = mode;
1586 UNLOCK(&manager->lock);
1589 ISC_TASKFUNC_SCOPE isc_taskmgrmode_t
1590 isc__taskmgr_mode(isc_taskmgr_t *manager0) {
1591 isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1592 isc_taskmgrmode_t mode;
1593 LOCK(&manager->lock);
1594 mode = manager->mode;
1595 UNLOCK(&manager->lock);
1599 #ifndef USE_WORKER_THREADS
1601 isc__taskmgr_ready(isc_taskmgr_t *manager0) {
1602 isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1603 isc_boolean_t is_ready;
1605 #ifdef USE_SHARED_MANAGER
1606 if (manager == NULL)
1609 if (manager == NULL)
1612 LOCK(&manager->lock);
1613 is_ready = !empty_readyq(manager);
1614 UNLOCK(&manager->lock);
1620 isc__taskmgr_dispatch(isc_taskmgr_t *manager0) {
1621 isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1623 #ifdef USE_SHARED_MANAGER
1624 if (manager == NULL)
1627 if (manager == NULL)
1628 return (ISC_R_NOTFOUND);
1632 return (ISC_R_SUCCESS);
1636 ISC_TASKFUNC_SCOPE void
1637 isc__taskmgr_pause(isc_taskmgr_t *manager0) {
1638 isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1639 manager->pause_requested = ISC_TRUE;
1640 LOCK(&manager->lock);
1641 while (manager->tasks_running > 0) {
1642 WAIT(&manager->paused, &manager->lock);
1644 UNLOCK(&manager->lock);
1647 ISC_TASKFUNC_SCOPE void
1648 isc__taskmgr_resume(isc_taskmgr_t *manager0) {
1649 isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1651 LOCK(&manager->lock);
1652 if (manager->pause_requested) {
1653 manager->pause_requested = ISC_FALSE;
1654 BROADCAST(&manager->work_available);
1656 UNLOCK(&manager->lock);
1658 #endif /* USE_WORKER_THREADS */
1660 ISC_TASKFUNC_SCOPE void
1661 isc__taskmgr_setexcltask(isc_taskmgr_t *mgr0, isc_task_t *task0) {
1662 isc__taskmgr_t *mgr = (isc__taskmgr_t *) mgr0;
1663 isc__task_t *task = (isc__task_t *) task0;
1665 REQUIRE(VALID_MANAGER(mgr));
1666 REQUIRE(VALID_TASK(task));
1667 LOCK(&mgr->excl_lock);
1668 if (mgr->excl != NULL)
1669 isc__task_detach((isc_task_t **) &mgr->excl);
1670 isc__task_attach(task0, (isc_task_t **) &mgr->excl);
1671 UNLOCK(&mgr->excl_lock);
1674 ISC_TASKFUNC_SCOPE isc_result_t
1675 isc__taskmgr_excltask(isc_taskmgr_t *mgr0, isc_task_t **taskp) {
1676 isc__taskmgr_t *mgr = (isc__taskmgr_t *) mgr0;
1677 isc_result_t result = ISC_R_SUCCESS;
1679 REQUIRE(VALID_MANAGER(mgr));
1680 REQUIRE(taskp != NULL && *taskp == NULL);
1682 LOCK(&mgr->excl_lock);
1683 if (mgr->excl != NULL)
1684 isc__task_attach((isc_task_t *) mgr->excl, taskp);
1686 result = ISC_R_NOTFOUND;
1687 UNLOCK(&mgr->excl_lock);
1692 ISC_TASKFUNC_SCOPE isc_result_t
1693 isc__task_beginexclusive(isc_task_t *task0) {
1694 #ifdef USE_WORKER_THREADS
1695 isc__task_t *task = (isc__task_t *)task0;
1696 isc__taskmgr_t *manager = task->manager;
1698 REQUIRE(task->state == task_state_running);
1699 /* XXX: Require task == manager->excl? */
1701 LOCK(&manager->lock);
1702 if (manager->exclusive_requested) {
1703 UNLOCK(&manager->lock);
1704 return (ISC_R_LOCKBUSY);
1706 manager->exclusive_requested = ISC_TRUE;
1707 while (manager->tasks_running > 1) {
1708 WAIT(&manager->exclusive_granted, &manager->lock);
1710 UNLOCK(&manager->lock);
1714 return (ISC_R_SUCCESS);
1717 ISC_TASKFUNC_SCOPE void
1718 isc__task_endexclusive(isc_task_t *task0) {
1719 #ifdef USE_WORKER_THREADS
1720 isc__task_t *task = (isc__task_t *)task0;
1721 isc__taskmgr_t *manager = task->manager;
1723 REQUIRE(task->state == task_state_running);
1724 LOCK(&manager->lock);
1725 REQUIRE(manager->exclusive_requested);
1726 manager->exclusive_requested = ISC_FALSE;
1727 BROADCAST(&manager->work_available);
1728 UNLOCK(&manager->lock);
1734 ISC_TASKFUNC_SCOPE void
1735 isc__task_setprivilege(isc_task_t *task0, isc_boolean_t priv) {
1736 isc__task_t *task = (isc__task_t *)task0;
1737 isc__taskmgr_t *manager = task->manager;
1738 isc_boolean_t oldpriv;
1741 oldpriv = ISC_TF((task->flags & TASK_F_PRIVILEGED) != 0);
1743 task->flags |= TASK_F_PRIVILEGED;
1745 task->flags &= ~TASK_F_PRIVILEGED;
1746 UNLOCK(&task->lock);
1748 if (priv == oldpriv)
1751 LOCK(&manager->lock);
1752 if (priv && ISC_LINK_LINKED(task, ready_link))
1753 ENQUEUE(manager->ready_priority_tasks, task,
1754 ready_priority_link);
1755 else if (!priv && ISC_LINK_LINKED(task, ready_priority_link))
1756 DEQUEUE(manager->ready_priority_tasks, task,
1757 ready_priority_link);
1758 UNLOCK(&manager->lock);
1761 ISC_TASKFUNC_SCOPE isc_boolean_t
1762 isc__task_privilege(isc_task_t *task0) {
1763 isc__task_t *task = (isc__task_t *)task0;
1767 priv = ISC_TF((task->flags & TASK_F_PRIVILEGED) != 0);
1768 UNLOCK(&task->lock);
1772 #ifdef USE_SOCKETIMPREGISTER
1774 isc__task_register(void) {
1775 return (isc_task_register(isc__taskmgr_create));
1780 isc_task_exiting(isc_task_t *t) {
1781 isc__task_t *task = (isc__task_t *)t;
1783 REQUIRE(VALID_TASK(task));
1784 return (TASK_SHUTTINGDOWN(task));
1788 #if defined(HAVE_LIBXML2) && defined(BIND9)
1789 #define TRY0(a) do { xmlrc = (a); if (xmlrc < 0) goto error; } while(0)
1791 isc_taskmgr_renderxml(isc_taskmgr_t *mgr0, xmlTextWriterPtr writer) {
1792 isc__taskmgr_t *mgr = (isc__taskmgr_t *)mgr0;
1793 isc__task_t *task = NULL;
1799 * Write out the thread-model, and some details about each depending
1800 * on which type is enabled.
1802 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "thread-model"));
1803 #ifdef ISC_PLATFORM_USETHREADS
1804 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "type"));
1805 TRY0(xmlTextWriterWriteString(writer, ISC_XMLCHAR "threaded"));
1806 TRY0(xmlTextWriterEndElement(writer)); /* type */
1808 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "worker-threads"));
1809 TRY0(xmlTextWriterWriteFormatString(writer, "%d", mgr->workers));
1810 TRY0(xmlTextWriterEndElement(writer)); /* worker-threads */
1811 #else /* ISC_PLATFORM_USETHREADS */
1812 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "type"));
1813 TRY0(xmlTextWriterWriteString(writer, ISC_XMLCHAR "non-threaded"));
1814 TRY0(xmlTextWriterEndElement(writer)); /* type */
1816 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "references"));
1817 TRY0(xmlTextWriterWriteFormatString(writer, "%d", mgr->refs));
1818 TRY0(xmlTextWriterEndElement(writer)); /* references */
1819 #endif /* ISC_PLATFORM_USETHREADS */
1821 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "default-quantum"));
1822 TRY0(xmlTextWriterWriteFormatString(writer, "%d",
1823 mgr->default_quantum));
1824 TRY0(xmlTextWriterEndElement(writer)); /* default-quantum */
1826 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks-running"));
1827 TRY0(xmlTextWriterWriteFormatString(writer, "%d", mgr->tasks_running));
1828 TRY0(xmlTextWriterEndElement(writer)); /* tasks-running */
1830 TRY0(xmlTextWriterEndElement(writer)); /* thread-model */
1832 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks"));
1833 task = ISC_LIST_HEAD(mgr->tasks);
1834 while (task != NULL) {
1836 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "task"));
1838 if (task->name[0] != 0) {
1839 TRY0(xmlTextWriterStartElement(writer,
1840 ISC_XMLCHAR "name"));
1841 TRY0(xmlTextWriterWriteFormatString(writer, "%s",
1843 TRY0(xmlTextWriterEndElement(writer)); /* name */
1846 TRY0(xmlTextWriterStartElement(writer,
1847 ISC_XMLCHAR "references"));
1848 TRY0(xmlTextWriterWriteFormatString(writer, "%d",
1850 TRY0(xmlTextWriterEndElement(writer)); /* references */
1852 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "id"));
1853 TRY0(xmlTextWriterWriteFormatString(writer, "%p", task));
1854 TRY0(xmlTextWriterEndElement(writer)); /* id */
1856 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "state"));
1857 TRY0(xmlTextWriterWriteFormatString(writer, "%s",
1858 statenames[task->state]));
1859 TRY0(xmlTextWriterEndElement(writer)); /* state */
1861 TRY0(xmlTextWriterStartElement(writer, ISC_XMLCHAR "quantum"));
1862 TRY0(xmlTextWriterWriteFormatString(writer, "%d",
1864 TRY0(xmlTextWriterEndElement(writer)); /* quantum */
1866 TRY0(xmlTextWriterEndElement(writer));
1868 UNLOCK(&task->lock);
1869 task = ISC_LIST_NEXT(task, link);
1871 TRY0(xmlTextWriterEndElement(writer)); /* tasks */
1875 UNLOCK(&task->lock);
1880 #endif /* HAVE_LIBXML2 && BIND9 */