2 * Copyright (C) 2004-2008, 2010-2012 Internet Systems Consortium, Inc. ("ISC")
3 * Copyright (C) 1998-2003 Internet Software Consortium.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
10 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
11 * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
12 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
13 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
14 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
15 * PERFORMANCE OF THIS SOFTWARE.
21 * \author Principal Author: Bob Halley
25 * XXXRTH Need to document the states a task can be in, and the rules
26 * for changing states.
31 #include <isc/condition.h>
32 #include <isc/event.h>
33 #include <isc/magic.h>
36 #include <isc/platform.h>
37 #include <isc/string.h>
39 #include <isc/thread.h>
43 #ifndef ISC_PLATFORM_USETHREADS
45 #endif /* ISC_PLATFORM_USETHREADS */
48 #define XTRACE(m) fprintf(stderr, "task %p thread %lu: %s\n", \
49 task, isc_thread_self(), (m))
50 #define XTTRACE(t, m) fprintf(stderr, "task %p thread %lu: %s\n", \
51 (t), isc_thread_self(), (m))
52 #define XTHREADTRACE(m) fprintf(stderr, "thread %lu: %s\n", \
53 isc_thread_self(), (m))
57 #define XTHREADTRACE(m)
65 task_state_idle, task_state_ready, task_state_running,
70 static const char *statenames[] = {
71 "idle", "ready", "running", "done",
78 isc_taskmgr_t * manager;
80 /* Locked by task lock. */
82 unsigned int references;
83 isc_eventlist_t events;
84 isc_eventlist_t on_shutdown;
90 /* Locked by task manager lock. */
91 LINK(isc_task_t) link;
92 LINK(isc_task_t) ready_link;
95 #define TASK_F_SHUTTINGDOWN 0x01
97 #define TASK_SHUTTINGDOWN(t) (((t)->flags & TASK_F_SHUTTINGDOWN) \
100 #define TASK_MANAGER_MAGIC ISC_MAGIC('T', 'S', 'K', 'M')
101 #define VALID_MANAGER(m) ISC_MAGIC_VALID(m, TASK_MANAGER_MAGIC)
108 #ifdef ISC_PLATFORM_USETHREADS
109 unsigned int workers;
110 isc_thread_t * threads;
111 #endif /* ISC_PLATFORM_USETHREADS */
112 /* Locked by task manager lock. */
113 unsigned int default_quantum;
114 LIST(isc_task_t) tasks;
115 isc_tasklist_t ready_tasks;
116 #ifdef ISC_PLATFORM_USETHREADS
117 isc_condition_t work_available;
118 isc_condition_t exclusive_granted;
119 #endif /* ISC_PLATFORM_USETHREADS */
120 unsigned int tasks_running;
121 isc_boolean_t exclusive_requested;
122 isc_boolean_t exiting;
123 #ifndef ISC_PLATFORM_USETHREADS
125 #endif /* ISC_PLATFORM_USETHREADS */
128 #define DEFAULT_TASKMGR_QUANTUM 10
129 #define DEFAULT_DEFAULT_QUANTUM 5
130 #define FINISHED(m) ((m)->exiting && EMPTY((m)->tasks))
132 #ifndef ISC_PLATFORM_USETHREADS
133 static isc_taskmgr_t *taskmgr = NULL;
134 #endif /* ISC_PLATFORM_USETHREADS */
141 task_finished(isc_task_t *task) {
142 isc_taskmgr_t *manager = task->manager;
144 REQUIRE(EMPTY(task->events));
145 REQUIRE(EMPTY(task->on_shutdown));
146 REQUIRE(task->references == 0);
147 REQUIRE(task->state == task_state_done);
149 XTRACE("task_finished");
151 LOCK(&manager->lock);
152 UNLINK(manager->tasks, task, link);
153 #ifdef ISC_PLATFORM_USETHREADS
154 if (FINISHED(manager)) {
156 * All tasks have completed and the
157 * task manager is exiting. Wake up
158 * any idle worker threads so they
161 BROADCAST(&manager->work_available);
163 #endif /* ISC_PLATFORM_USETHREADS */
164 UNLOCK(&manager->lock);
166 DESTROYLOCK(&task->lock);
168 isc_mem_put(manager->mctx, task, sizeof(*task));
172 isc_task_create(isc_taskmgr_t *manager, unsigned int quantum,
176 isc_boolean_t exiting;
179 REQUIRE(VALID_MANAGER(manager));
180 REQUIRE(taskp != NULL && *taskp == NULL);
182 task = isc_mem_get(manager->mctx, sizeof(*task));
184 return (ISC_R_NOMEMORY);
185 XTRACE("isc_task_create");
186 task->manager = manager;
187 result = isc_mutex_init(&task->lock);
188 if (result != ISC_R_SUCCESS) {
189 isc_mem_put(manager->mctx, task, sizeof(*task));
192 task->state = task_state_idle;
193 task->references = 1;
194 INIT_LIST(task->events);
195 INIT_LIST(task->on_shutdown);
196 task->quantum = quantum;
199 memset(task->name, 0, sizeof(task->name));
201 INIT_LINK(task, link);
202 INIT_LINK(task, ready_link);
205 LOCK(&manager->lock);
206 if (!manager->exiting) {
207 if (task->quantum == 0)
208 task->quantum = manager->default_quantum;
209 APPEND(manager->tasks, task, link);
212 UNLOCK(&manager->lock);
215 DESTROYLOCK(&task->lock);
216 isc_mem_put(manager->mctx, task, sizeof(*task));
217 return (ISC_R_SHUTTINGDOWN);
220 task->magic = TASK_MAGIC;
223 return (ISC_R_SUCCESS);
227 isc_task_attach(isc_task_t *source, isc_task_t **targetp) {
230 * Attach *targetp to source.
233 REQUIRE(VALID_TASK(source));
234 REQUIRE(targetp != NULL && *targetp == NULL);
236 XTTRACE(source, "isc_task_attach");
239 source->references++;
240 UNLOCK(&source->lock);
245 static inline isc_boolean_t
246 task_shutdown(isc_task_t *task) {
247 isc_boolean_t was_idle = ISC_FALSE;
248 isc_event_t *event, *prev;
251 * Caller must be holding the task's lock.
254 XTRACE("task_shutdown");
256 if (! TASK_SHUTTINGDOWN(task)) {
257 XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
258 ISC_MSG_SHUTTINGDOWN, "shutting down"));
259 task->flags |= TASK_F_SHUTTINGDOWN;
260 if (task->state == task_state_idle) {
261 INSIST(EMPTY(task->events));
262 task->state = task_state_ready;
265 INSIST(task->state == task_state_ready ||
266 task->state == task_state_running);
268 * Note that we post shutdown events LIFO.
270 for (event = TAIL(task->on_shutdown);
273 prev = PREV(event, ev_link);
274 DEQUEUE(task->on_shutdown, event, ev_link);
275 ENQUEUE(task->events, event, ev_link);
283 task_ready(isc_task_t *task) {
284 isc_taskmgr_t *manager = task->manager;
286 REQUIRE(VALID_MANAGER(manager));
287 REQUIRE(task->state == task_state_ready);
289 XTRACE("task_ready");
291 LOCK(&manager->lock);
293 ENQUEUE(manager->ready_tasks, task, ready_link);
294 #ifdef ISC_PLATFORM_USETHREADS
295 SIGNAL(&manager->work_available);
296 #endif /* ISC_PLATFORM_USETHREADS */
298 UNLOCK(&manager->lock);
301 static inline isc_boolean_t
302 task_detach(isc_task_t *task) {
305 * Caller must be holding the task lock.
308 REQUIRE(task->references > 0);
313 if (task->references == 0 && task->state == task_state_idle) {
314 INSIST(EMPTY(task->events));
316 * There are no references to this task, and no
317 * pending events. We could try to optimize and
318 * either initiate shutdown or clean up the task,
319 * depending on its state, but it's easier to just
320 * make the task ready and allow run() or the event
321 * loop to deal with shutting down and termination.
323 task->state = task_state_ready;
331 isc_task_detach(isc_task_t **taskp) {
333 isc_boolean_t was_idle;
336 * Detach *taskp from its task.
339 REQUIRE(taskp != NULL);
341 REQUIRE(VALID_TASK(task));
343 XTRACE("isc_task_detach");
346 was_idle = task_detach(task);
355 static inline isc_boolean_t
356 task_send(isc_task_t *task, isc_event_t **eventp) {
357 isc_boolean_t was_idle = ISC_FALSE;
361 * Caller must be holding the task lock.
364 REQUIRE(eventp != NULL);
366 REQUIRE(event != NULL);
367 REQUIRE(event->ev_type > 0);
368 REQUIRE(task->state != task_state_done);
372 if (task->state == task_state_idle) {
374 INSIST(EMPTY(task->events));
375 task->state = task_state_ready;
377 INSIST(task->state == task_state_ready ||
378 task->state == task_state_running);
379 ENQUEUE(task->events, event, ev_link);
386 isc_task_send(isc_task_t *task, isc_event_t **eventp) {
387 isc_boolean_t was_idle;
390 * Send '*event' to 'task'.
393 REQUIRE(VALID_TASK(task));
395 XTRACE("isc_task_send");
398 * We're trying hard to hold locks for as short a time as possible.
399 * We're also trying to hold as few locks as possible. This is why
400 * some processing is deferred until after the lock is released.
403 was_idle = task_send(task, eventp);
408 * We need to add this task to the ready queue.
410 * We've waited until now to do it because making a task
411 * ready requires locking the manager. If we tried to do
412 * this while holding the task lock, we could deadlock.
414 * We've changed the state to ready, so no one else will
415 * be trying to add this task to the ready queue. The
416 * only way to leave the ready state is by executing the
417 * task. It thus doesn't matter if events are added,
418 * removed, or a shutdown is started in the interval
419 * between the time we released the task lock, and the time
420 * we add the task to the ready queue.
427 isc_task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp) {
428 isc_boolean_t idle1, idle2;
432 * Send '*event' to '*taskp' and then detach '*taskp' from its
436 REQUIRE(taskp != NULL);
438 REQUIRE(VALID_TASK(task));
440 XTRACE("isc_task_sendanddetach");
443 idle1 = task_send(task, eventp);
444 idle2 = task_detach(task);
448 * If idle1, then idle2 shouldn't be true as well since we're holding
449 * the task lock, and thus the task cannot switch from ready back to
452 INSIST(!(idle1 && idle2));
460 #define PURGE_OK(event) (((event)->ev_attributes & ISC_EVENTATTR_NOPURGE) == 0)
463 dequeue_events(isc_task_t *task, void *sender, isc_eventtype_t first,
464 isc_eventtype_t last, void *tag,
465 isc_eventlist_t *events, isc_boolean_t purging)
467 isc_event_t *event, *next_event;
468 unsigned int count = 0;
470 REQUIRE(VALID_TASK(task));
471 REQUIRE(last >= first);
473 XTRACE("dequeue_events");
476 * Events matching 'sender', whose type is >= first and <= last, and
477 * whose tag is 'tag' will be dequeued. If 'purging', matching events
478 * which are marked as unpurgable will not be dequeued.
480 * sender == NULL means "any sender", and tag == NULL means "any tag".
485 for (event = HEAD(task->events); event != NULL; event = next_event) {
486 next_event = NEXT(event, ev_link);
487 if (event->ev_type >= first && event->ev_type <= last &&
488 (sender == NULL || event->ev_sender == sender) &&
489 (tag == NULL || event->ev_tag == tag) &&
490 (!purging || PURGE_OK(event))) {
491 DEQUEUE(task->events, event, ev_link);
492 ENQUEUE(*events, event, ev_link);
503 isc_task_purgerange(isc_task_t *task, void *sender, isc_eventtype_t first,
504 isc_eventtype_t last, void *tag)
507 isc_eventlist_t events;
508 isc_event_t *event, *next_event;
511 * Purge events from a task's event queue.
514 XTRACE("isc_task_purgerange");
516 ISC_LIST_INIT(events);
518 count = dequeue_events(task, sender, first, last, tag, &events,
521 for (event = HEAD(events); event != NULL; event = next_event) {
522 next_event = NEXT(event, ev_link);
523 isc_event_free(&event);
527 * Note that purging never changes the state of the task.
534 isc_task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
538 * Purge events from a task's event queue.
541 XTRACE("isc_task_purge");
543 return (isc_task_purgerange(task, sender, type, type, tag));
547 isc_task_purgeevent(isc_task_t *task, isc_event_t *event) {
548 isc_event_t *curr_event, *next_event;
551 * Purge 'event' from a task's event queue.
553 * XXXRTH: WARNING: This method may be removed before beta.
556 REQUIRE(VALID_TASK(task));
559 * If 'event' is on the task's event queue, it will be purged,
560 * unless it is marked as unpurgeable. 'event' does not have to be
561 * on the task's event queue; in fact, it can even be an invalid
562 * pointer. Purging only occurs if the event is actually on the task's
565 * Purging never changes the state of the task.
569 for (curr_event = HEAD(task->events);
571 curr_event = next_event) {
572 next_event = NEXT(curr_event, ev_link);
573 if (curr_event == event && PURGE_OK(event)) {
574 DEQUEUE(task->events, curr_event, ev_link);
580 if (curr_event == NULL)
583 isc_event_free(&curr_event);
589 isc_task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
590 isc_eventtype_t last, void *tag,
591 isc_eventlist_t *events)
594 * Remove events from a task's event queue.
597 XTRACE("isc_task_unsendrange");
599 return (dequeue_events(task, sender, first, last, tag, events,
604 isc_task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
605 void *tag, isc_eventlist_t *events)
608 * Remove events from a task's event queue.
611 XTRACE("isc_task_unsend");
613 return (dequeue_events(task, sender, type, type, tag, events,
618 isc_task_onshutdown(isc_task_t *task, isc_taskaction_t action, const void *arg)
620 isc_boolean_t disallowed = ISC_FALSE;
621 isc_result_t result = ISC_R_SUCCESS;
625 * Send a shutdown event with action 'action' and argument 'arg' when
626 * 'task' is shutdown.
629 REQUIRE(VALID_TASK(task));
630 REQUIRE(action != NULL);
632 event = isc_event_allocate(task->manager->mctx,
634 ISC_TASKEVENT_SHUTDOWN,
639 return (ISC_R_NOMEMORY);
642 if (TASK_SHUTTINGDOWN(task)) {
643 disallowed = ISC_TRUE;
644 result = ISC_R_SHUTTINGDOWN;
646 ENQUEUE(task->on_shutdown, event, ev_link);
650 isc_mem_put(task->manager->mctx, event, sizeof(*event));
656 isc_task_shutdown(isc_task_t *task) {
657 isc_boolean_t was_idle;
663 REQUIRE(VALID_TASK(task));
666 was_idle = task_shutdown(task);
674 isc_task_destroy(isc_task_t **taskp) {
680 REQUIRE(taskp != NULL);
682 isc_task_shutdown(*taskp);
683 isc_task_detach(taskp);
687 isc_task_setname(isc_task_t *task, const char *name, void *tag) {
693 REQUIRE(VALID_TASK(task));
696 memset(task->name, 0, sizeof(task->name));
697 strncpy(task->name, name, sizeof(task->name) - 1);
703 isc_task_getname(isc_task_t *task) {
708 isc_task_gettag(isc_task_t *task) {
713 isc_task_getcurrenttime(isc_task_t *task, isc_stdtime_t *t) {
714 REQUIRE(VALID_TASK(task));
728 dispatch(isc_taskmgr_t *manager) {
730 #ifndef ISC_PLATFORM_USETHREADS
731 unsigned int total_dispatch_count = 0;
732 isc_tasklist_t ready_tasks;
733 #endif /* ISC_PLATFORM_USETHREADS */
735 REQUIRE(VALID_MANAGER(manager));
738 * Again we're trying to hold the lock for as short a time as possible
739 * and to do as little locking and unlocking as possible.
741 * In both while loops, the appropriate lock must be held before the
742 * while body starts. Code which acquired the lock at the top of
743 * the loop would be more readable, but would result in a lot of
744 * extra locking. Compare:
751 * while (expression) {
756 * Unlocked part here...
763 * Note how if the loop continues we unlock and then immediately lock.
764 * For N iterations of the loop, this code does 2N+1 locks and 2N+1
765 * unlocks. Also note that the lock is not held when the while
766 * condition is tested, which may or may not be important, depending
772 * while (expression) {
776 * Unlocked part here...
783 * For N iterations of the loop, this code does N+1 locks and N+1
784 * unlocks. The while expression is always protected by the lock.
787 #ifndef ISC_PLATFORM_USETHREADS
788 ISC_LIST_INIT(ready_tasks);
790 LOCK(&manager->lock);
791 while (!FINISHED(manager)) {
792 #ifdef ISC_PLATFORM_USETHREADS
794 * For reasons similar to those given in the comment in
795 * isc_task_send() above, it is safe for us to dequeue
796 * the task while only holding the manager lock, and then
797 * change the task to running state while only holding the
800 while ((EMPTY(manager->ready_tasks) ||
801 manager->exclusive_requested) &&
804 XTHREADTRACE(isc_msgcat_get(isc_msgcat,
806 ISC_MSG_WAIT, "wait"));
807 WAIT(&manager->work_available, &manager->lock);
808 XTHREADTRACE(isc_msgcat_get(isc_msgcat,
810 ISC_MSG_AWAKE, "awake"));
812 #else /* ISC_PLATFORM_USETHREADS */
813 if (total_dispatch_count >= DEFAULT_TASKMGR_QUANTUM ||
814 EMPTY(manager->ready_tasks))
816 #endif /* ISC_PLATFORM_USETHREADS */
817 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK,
818 ISC_MSG_WORKING, "working"));
820 task = HEAD(manager->ready_tasks);
822 unsigned int dispatch_count = 0;
823 isc_boolean_t done = ISC_FALSE;
824 isc_boolean_t requeue = ISC_FALSE;
825 isc_boolean_t finished = ISC_FALSE;
828 INSIST(VALID_TASK(task));
831 * Note we only unlock the manager lock if we actually
832 * have a task to do. We must reacquire the manager
833 * lock before exiting the 'if (task != NULL)' block.
835 DEQUEUE(manager->ready_tasks, task, ready_link);
836 manager->tasks_running++;
837 UNLOCK(&manager->lock);
840 INSIST(task->state == task_state_ready);
841 task->state = task_state_running;
842 XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
843 ISC_MSG_RUNNING, "running"));
844 isc_stdtime_get(&task->now);
846 if (!EMPTY(task->events)) {
847 event = HEAD(task->events);
848 DEQUEUE(task->events, event, ev_link);
851 * Execute the event action.
853 XTRACE(isc_msgcat_get(isc_msgcat,
857 if (event->ev_action != NULL) {
859 (event->ev_action)(task,event);
863 #ifndef ISC_PLATFORM_USETHREADS
864 total_dispatch_count++;
865 #endif /* ISC_PLATFORM_USETHREADS */
868 if (task->references == 0 &&
869 EMPTY(task->events) &&
870 !TASK_SHUTTINGDOWN(task)) {
871 isc_boolean_t was_idle;
874 * There are no references and no
875 * pending events for this task,
876 * which means it will not become
877 * runnable again via an external
878 * action (such as sending an event
881 * We initiate shutdown to prevent
882 * it from becoming a zombie.
884 * We do this here instead of in
885 * the "if EMPTY(task->events)" block
888 * If we post no shutdown events,
889 * we want the task to finish.
891 * If we did post shutdown events,
892 * will still want the task's
893 * quantum to be applied.
895 was_idle = task_shutdown(task);
899 if (EMPTY(task->events)) {
901 * Nothing else to do for this task
904 XTRACE(isc_msgcat_get(isc_msgcat,
908 if (task->references == 0 &&
909 TASK_SHUTTINGDOWN(task)) {
913 XTRACE(isc_msgcat_get(
919 task->state = task_state_done;
921 task->state = task_state_idle;
923 } else if (dispatch_count >= task->quantum) {
925 * Our quantum has expired, but
926 * there is more work to be done.
927 * We'll requeue it to the ready
930 * We don't check quantum until
931 * dispatching at least one event,
932 * so the minimum quantum is one.
934 XTRACE(isc_msgcat_get(isc_msgcat,
938 task->state = task_state_ready;
948 LOCK(&manager->lock);
949 manager->tasks_running--;
950 #ifdef ISC_PLATFORM_USETHREADS
951 if (manager->exclusive_requested &&
952 manager->tasks_running == 1) {
953 SIGNAL(&manager->exclusive_granted);
955 #endif /* ISC_PLATFORM_USETHREADS */
958 * We know we're awake, so we don't have
959 * to wakeup any sleeping threads if the
960 * ready queue is empty before we requeue.
962 * A possible optimization if the queue is
963 * empty is to 'goto' the 'if (task != NULL)'
964 * block, avoiding the ENQUEUE of the task
965 * and the subsequent immediate DEQUEUE
966 * (since it is the only executable task).
967 * We don't do this because then we'd be
968 * skipping the exit_requested check. The
969 * cost of ENQUEUE is low anyway, especially
970 * when you consider that we'd have to do
971 * an extra EMPTY check to see if we could
972 * do the optimization. If the ready queue
973 * were usually nonempty, the 'optimization'
974 * might even hurt rather than help.
976 #ifdef ISC_PLATFORM_USETHREADS
977 ENQUEUE(manager->ready_tasks, task,
980 ENQUEUE(ready_tasks, task, ready_link);
985 #ifndef ISC_PLATFORM_USETHREADS
986 ISC_LIST_APPENDLIST(manager->ready_tasks, ready_tasks, ready_link);
988 UNLOCK(&manager->lock);
991 #ifdef ISC_PLATFORM_USETHREADS
992 static isc_threadresult_t
997 isc_taskmgr_t *manager = uap;
999 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1000 ISC_MSG_STARTING, "starting"));
1004 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1005 ISC_MSG_EXITING, "exiting"));
1007 return ((isc_threadresult_t)0);
1009 #endif /* ISC_PLATFORM_USETHREADS */
1012 manager_free(isc_taskmgr_t *manager) {
1015 #ifdef ISC_PLATFORM_USETHREADS
1016 (void)isc_condition_destroy(&manager->exclusive_granted);
1017 (void)isc_condition_destroy(&manager->work_available);
1018 isc_mem_free(manager->mctx, manager->threads);
1019 #endif /* ISC_PLATFORM_USETHREADS */
1020 DESTROYLOCK(&manager->lock);
1022 mctx = manager->mctx;
1023 isc_mem_put(mctx, manager, sizeof(*manager));
1024 isc_mem_detach(&mctx);
1028 isc_taskmgr_create(isc_mem_t *mctx, unsigned int workers,
1029 unsigned int default_quantum, isc_taskmgr_t **managerp)
1031 isc_result_t result;
1032 unsigned int i, started = 0;
1033 isc_taskmgr_t *manager;
1036 * Create a new task manager.
1039 REQUIRE(workers > 0);
1040 REQUIRE(managerp != NULL && *managerp == NULL);
1042 #ifndef ISC_PLATFORM_USETHREADS
1047 if (taskmgr != NULL) {
1049 *managerp = taskmgr;
1050 return (ISC_R_SUCCESS);
1052 #endif /* ISC_PLATFORM_USETHREADS */
1054 manager = isc_mem_get(mctx, sizeof(*manager));
1055 if (manager == NULL)
1056 return (ISC_R_NOMEMORY);
1057 manager->magic = TASK_MANAGER_MAGIC;
1058 manager->mctx = NULL;
1059 result = isc_mutex_init(&manager->lock);
1060 if (result != ISC_R_SUCCESS)
1063 #ifdef ISC_PLATFORM_USETHREADS
1064 manager->workers = 0;
1065 manager->threads = isc_mem_allocate(mctx,
1066 workers * sizeof(isc_thread_t));
1067 if (manager->threads == NULL) {
1068 result = ISC_R_NOMEMORY;
1071 if (isc_condition_init(&manager->work_available) != ISC_R_SUCCESS) {
1072 UNEXPECTED_ERROR(__FILE__, __LINE__,
1073 "isc_condition_init() %s",
1074 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1075 ISC_MSG_FAILED, "failed"));
1076 result = ISC_R_UNEXPECTED;
1077 goto cleanup_threads;
1079 if (isc_condition_init(&manager->exclusive_granted) != ISC_R_SUCCESS) {
1080 UNEXPECTED_ERROR(__FILE__, __LINE__,
1081 "isc_condition_init() %s",
1082 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1083 ISC_MSG_FAILED, "failed"));
1084 result = ISC_R_UNEXPECTED;
1085 goto cleanup_workavailable;
1087 #endif /* ISC_PLATFORM_USETHREADS */
1088 if (default_quantum == 0)
1089 default_quantum = DEFAULT_DEFAULT_QUANTUM;
1090 manager->default_quantum = default_quantum;
1091 INIT_LIST(manager->tasks);
1092 INIT_LIST(manager->ready_tasks);
1093 manager->tasks_running = 0;
1094 manager->exclusive_requested = ISC_FALSE;
1095 manager->exiting = ISC_FALSE;
1097 isc_mem_attach(mctx, &manager->mctx);
1099 #ifdef ISC_PLATFORM_USETHREADS
1100 LOCK(&manager->lock);
1104 for (i = 0; i < workers; i++) {
1105 if (isc_thread_create(run, manager,
1106 &manager->threads[manager->workers]) ==
1112 UNLOCK(&manager->lock);
1115 manager_free(manager);
1116 return (ISC_R_NOTHREADS);
1118 isc_thread_setconcurrency(workers);
1119 #else /* ISC_PLATFORM_USETHREADS */
1122 #endif /* ISC_PLATFORM_USETHREADS */
1124 *managerp = manager;
1126 return (ISC_R_SUCCESS);
1128 #ifdef ISC_PLATFORM_USETHREADS
1129 cleanup_workavailable:
1130 (void)isc_condition_destroy(&manager->work_available);
1132 isc_mem_free(mctx, manager->threads);
1134 DESTROYLOCK(&manager->lock);
1137 isc_mem_put(mctx, manager, sizeof(*manager));
1142 isc_taskmgr_destroy(isc_taskmgr_t **managerp) {
1143 isc_taskmgr_t *manager;
1148 * Destroy '*managerp'.
1151 REQUIRE(managerp != NULL);
1152 manager = *managerp;
1153 REQUIRE(VALID_MANAGER(manager));
1155 #ifndef ISC_PLATFORM_USETHREADS
1158 if (manager->refs > 1) {
1163 #endif /* ISC_PLATFORM_USETHREADS */
1165 XTHREADTRACE("isc_taskmgr_destroy");
1167 * Only one non-worker thread may ever call this routine.
1168 * If a worker thread wants to initiate shutdown of the
1169 * task manager, it should ask some non-worker thread to call
1170 * isc_taskmgr_destroy(), e.g. by signalling a condition variable
1171 * that the startup thread is sleeping on.
1175 * Unlike elsewhere, we're going to hold this lock a long time.
1176 * We need to do so, because otherwise the list of tasks could
1177 * change while we were traversing it.
1179 * This is also the only function where we will hold both the
1180 * task manager lock and a task lock at the same time.
1183 LOCK(&manager->lock);
1186 * Make sure we only get called once.
1188 INSIST(!manager->exiting);
1189 manager->exiting = ISC_TRUE;
1192 * Post shutdown event(s) to every task (if they haven't already been
1195 for (task = HEAD(manager->tasks);
1197 task = NEXT(task, link)) {
1199 if (task_shutdown(task))
1200 ENQUEUE(manager->ready_tasks, task, ready_link);
1201 UNLOCK(&task->lock);
1203 #ifdef ISC_PLATFORM_USETHREADS
1205 * Wake up any sleeping workers. This ensures we get work done if
1206 * there's work left to do, and if there are already no tasks left
1207 * it will cause the workers to see manager->exiting.
1209 BROADCAST(&manager->work_available);
1210 UNLOCK(&manager->lock);
1213 * Wait for all the worker threads to exit.
1215 for (i = 0; i < manager->workers; i++)
1216 (void)isc_thread_join(manager->threads[i], NULL);
1217 #else /* ISC_PLATFORM_USETHREADS */
1219 * Dispatch the shutdown events.
1221 UNLOCK(&manager->lock);
1222 while (isc__taskmgr_ready())
1223 (void)isc__taskmgr_dispatch();
1224 if (!ISC_LIST_EMPTY(manager->tasks))
1225 isc_mem_printallactive(stderr);
1226 INSIST(ISC_LIST_EMPTY(manager->tasks));
1227 #endif /* ISC_PLATFORM_USETHREADS */
1229 manager_free(manager);
1234 #ifndef ISC_PLATFORM_USETHREADS
1236 isc__taskmgr_ready(void) {
1237 if (taskmgr == NULL)
1239 return (ISC_TF(!ISC_LIST_EMPTY(taskmgr->ready_tasks)));
1243 isc__taskmgr_dispatch(void) {
1244 isc_taskmgr_t *manager = taskmgr;
1246 if (taskmgr == NULL)
1247 return (ISC_R_NOTFOUND);
1251 return (ISC_R_SUCCESS);
1254 #endif /* ISC_PLATFORM_USETHREADS */
1257 isc_task_beginexclusive(isc_task_t *task) {
1258 #ifdef ISC_PLATFORM_USETHREADS
1259 isc_taskmgr_t *manager = task->manager;
1260 REQUIRE(task->state == task_state_running);
1261 LOCK(&manager->lock);
1262 if (manager->exclusive_requested) {
1263 UNLOCK(&manager->lock);
1264 return (ISC_R_LOCKBUSY);
1266 manager->exclusive_requested = ISC_TRUE;
1267 while (manager->tasks_running > 1) {
1268 WAIT(&manager->exclusive_granted, &manager->lock);
1270 UNLOCK(&manager->lock);
1274 return (ISC_R_SUCCESS);
1278 isc_task_endexclusive(isc_task_t *task) {
1279 #ifdef ISC_PLATFORM_USETHREADS
1280 isc_taskmgr_t *manager = task->manager;
1281 REQUIRE(task->state == task_state_running);
1282 LOCK(&manager->lock);
1283 REQUIRE(manager->exclusive_requested);
1284 manager->exclusive_requested = ISC_FALSE;
1285 BROADCAST(&manager->work_available);
1286 UNLOCK(&manager->lock);
1293 isc_task_exiting(isc_task_t *t) {
1294 isc_task_t *task = (isc_task_t *)t;
1296 REQUIRE(VALID_TASK(task));
1297 return (TASK_SHUTTINGDOWN(task));
1302 isc_taskmgr_renderxml(isc_taskmgr_t *mgr, xmlTextWriterPtr writer)
1309 * Write out the thread-model, and some details about each depending
1310 * on which type is enabled.
1312 xmlTextWriterStartElement(writer, ISC_XMLCHAR "thread-model");
1313 #ifdef ISC_PLATFORM_USETHREADS
1314 xmlTextWriterStartElement(writer, ISC_XMLCHAR "type");
1315 xmlTextWriterWriteString(writer, ISC_XMLCHAR "threaded");
1316 xmlTextWriterEndElement(writer); /* type */
1318 xmlTextWriterStartElement(writer, ISC_XMLCHAR "worker-threads");
1319 xmlTextWriterWriteFormatString(writer, "%d", mgr->workers);
1320 xmlTextWriterEndElement(writer); /* worker-threads */
1321 #else /* ISC_PLATFORM_USETHREADS */
1322 xmlTextWriterStartElement(writer, ISC_XMLCHAR "type");
1323 xmlTextWriterWriteString(writer, ISC_XMLCHAR "non-threaded");
1324 xmlTextWriterEndElement(writer); /* type */
1326 xmlTextWriterStartElement(writer, ISC_XMLCHAR "references");
1327 xmlTextWriterWriteFormatString(writer, "%d", mgr->refs);
1328 xmlTextWriterEndElement(writer); /* references */
1329 #endif /* ISC_PLATFORM_USETHREADS */
1331 xmlTextWriterStartElement(writer, ISC_XMLCHAR "default-quantum");
1332 xmlTextWriterWriteFormatString(writer, "%d", mgr->default_quantum);
1333 xmlTextWriterEndElement(writer); /* default-quantum */
1335 xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks-running");
1336 xmlTextWriterWriteFormatString(writer, "%d", mgr->tasks_running);
1337 xmlTextWriterEndElement(writer); /* tasks-running */
1339 xmlTextWriterEndElement(writer); /* thread-model */
1341 xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks");
1342 task = ISC_LIST_HEAD(mgr->tasks);
1343 while (task != NULL) {
1345 xmlTextWriterStartElement(writer, ISC_XMLCHAR "task");
1347 if (task->name[0] != 0) {
1348 xmlTextWriterStartElement(writer, ISC_XMLCHAR "name");
1349 xmlTextWriterWriteFormatString(writer, "%s",
1351 xmlTextWriterEndElement(writer); /* name */
1354 xmlTextWriterStartElement(writer, ISC_XMLCHAR "references");
1355 xmlTextWriterWriteFormatString(writer, "%d", task->references);
1356 xmlTextWriterEndElement(writer); /* references */
1358 xmlTextWriterStartElement(writer, ISC_XMLCHAR "id");
1359 xmlTextWriterWriteFormatString(writer, "%p", task);
1360 xmlTextWriterEndElement(writer); /* id */
1362 xmlTextWriterStartElement(writer, ISC_XMLCHAR "state");
1363 xmlTextWriterWriteFormatString(writer, "%s",
1364 statenames[task->state]);
1365 xmlTextWriterEndElement(writer); /* state */
1367 xmlTextWriterStartElement(writer, ISC_XMLCHAR "quantum");
1368 xmlTextWriterWriteFormatString(writer, "%d", task->quantum);
1369 xmlTextWriterEndElement(writer); /* quantum */
1371 xmlTextWriterEndElement(writer);
1373 UNLOCK(&task->lock);
1374 task = ISC_LIST_NEXT(task, link);
1376 xmlTextWriterEndElement(writer); /* tasks */
1380 #endif /* HAVE_LIBXML2 */