2 * Copyright (C) 2004-2006 Internet Systems Consortium, Inc. ("ISC")
3 * Copyright (C) 1998-2003 Internet Software Consortium.
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
10 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
11 * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
12 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
13 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
14 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
15 * PERFORMANCE OF THIS SOFTWARE.
18 /* $Id: task.c,v 1.91.18.6 2006/01/04 23:50:23 marka Exp $ */
21 * \author Principal Author: Bob Halley
25 * XXXRTH Need to document the states a task can be in, and the rules
26 * for changing states.
31 #include <isc/condition.h>
32 #include <isc/event.h>
33 #include <isc/magic.h>
36 #include <isc/platform.h>
37 #include <isc/string.h>
39 #include <isc/thread.h>
42 #ifndef ISC_PLATFORM_USETHREADS
44 #endif /* ISC_PLATFORM_USETHREADS */
46 #define ISC_TASK_NAMES 1
49 #define XTRACE(m) fprintf(stderr, "task %p thread %lu: %s\n", \
50 task, isc_thread_self(), (m))
51 #define XTTRACE(t, m) fprintf(stderr, "task %p thread %lu: %s\n", \
52 (t), isc_thread_self(), (m))
53 #define XTHREADTRACE(m) fprintf(stderr, "thread %lu: %s\n", \
54 isc_thread_self(), (m))
58 #define XTHREADTRACE(m)
66 task_state_idle, task_state_ready, task_state_running,
70 #define TASK_MAGIC ISC_MAGIC('T', 'A', 'S', 'K')
71 #define VALID_TASK(t) ISC_MAGIC_VALID(t, TASK_MAGIC)
76 isc_taskmgr_t * manager;
78 /* Locked by task lock. */
80 unsigned int references;
81 isc_eventlist_t events;
82 isc_eventlist_t on_shutdown;
90 /* Locked by task manager lock. */
91 LINK(isc_task_t) link;
92 LINK(isc_task_t) ready_link;
95 #define TASK_F_SHUTTINGDOWN 0x01
97 #define TASK_SHUTTINGDOWN(t) (((t)->flags & TASK_F_SHUTTINGDOWN) \
100 #define TASK_MANAGER_MAGIC ISC_MAGIC('T', 'S', 'K', 'M')
101 #define VALID_MANAGER(m) ISC_MAGIC_VALID(m, TASK_MANAGER_MAGIC)
108 #ifdef ISC_PLATFORM_USETHREADS
109 unsigned int workers;
110 isc_thread_t * threads;
111 #endif /* ISC_PLATFORM_USETHREADS */
112 /* Locked by task manager lock. */
113 unsigned int default_quantum;
114 LIST(isc_task_t) tasks;
115 isc_tasklist_t ready_tasks;
116 #ifdef ISC_PLATFORM_USETHREADS
117 isc_condition_t work_available;
118 isc_condition_t exclusive_granted;
119 #endif /* ISC_PLATFORM_USETHREADS */
120 unsigned int tasks_running;
121 isc_boolean_t exclusive_requested;
122 isc_boolean_t exiting;
123 #ifndef ISC_PLATFORM_USETHREADS
125 #endif /* ISC_PLATFORM_USETHREADS */
128 #define DEFAULT_TASKMGR_QUANTUM 10
129 #define DEFAULT_DEFAULT_QUANTUM 5
130 #define FINISHED(m) ((m)->exiting && EMPTY((m)->tasks))
132 #ifndef ISC_PLATFORM_USETHREADS
133 static isc_taskmgr_t *taskmgr = NULL;
134 #endif /* ISC_PLATFORM_USETHREADS */
141 task_finished(isc_task_t *task) {
142 isc_taskmgr_t *manager = task->manager;
144 REQUIRE(EMPTY(task->events));
145 REQUIRE(EMPTY(task->on_shutdown));
146 REQUIRE(task->references == 0);
147 REQUIRE(task->state == task_state_done);
149 XTRACE("task_finished");
151 LOCK(&manager->lock);
152 UNLINK(manager->tasks, task, link);
153 #ifdef ISC_PLATFORM_USETHREADS
154 if (FINISHED(manager)) {
156 * All tasks have completed and the
157 * task manager is exiting. Wake up
158 * any idle worker threads so they
161 BROADCAST(&manager->work_available);
163 #endif /* ISC_PLATFORM_USETHREADS */
164 UNLOCK(&manager->lock);
166 DESTROYLOCK(&task->lock);
168 isc_mem_put(manager->mctx, task, sizeof(*task));
172 isc_task_create(isc_taskmgr_t *manager, unsigned int quantum,
176 isc_boolean_t exiting;
179 REQUIRE(VALID_MANAGER(manager));
180 REQUIRE(taskp != NULL && *taskp == NULL);
182 task = isc_mem_get(manager->mctx, sizeof(*task));
184 return (ISC_R_NOMEMORY);
185 XTRACE("isc_task_create");
186 task->manager = manager;
187 result = isc_mutex_init(&task->lock);
188 if (result != ISC_R_SUCCESS) {
189 isc_mem_put(manager->mctx, task, sizeof(*task));
192 task->state = task_state_idle;
193 task->references = 1;
194 INIT_LIST(task->events);
195 INIT_LIST(task->on_shutdown);
196 task->quantum = quantum;
199 #ifdef ISC_TASK_NAMES
200 memset(task->name, 0, sizeof(task->name));
203 INIT_LINK(task, link);
204 INIT_LINK(task, ready_link);
207 LOCK(&manager->lock);
208 if (!manager->exiting) {
209 if (task->quantum == 0)
210 task->quantum = manager->default_quantum;
211 APPEND(manager->tasks, task, link);
214 UNLOCK(&manager->lock);
217 DESTROYLOCK(&task->lock);
218 isc_mem_put(manager->mctx, task, sizeof(*task));
219 return (ISC_R_SHUTTINGDOWN);
222 task->magic = TASK_MAGIC;
225 return (ISC_R_SUCCESS);
229 isc_task_attach(isc_task_t *source, isc_task_t **targetp) {
232 * Attach *targetp to source.
235 REQUIRE(VALID_TASK(source));
236 REQUIRE(targetp != NULL && *targetp == NULL);
238 XTTRACE(source, "isc_task_attach");
241 source->references++;
242 UNLOCK(&source->lock);
247 static inline isc_boolean_t
248 task_shutdown(isc_task_t *task) {
249 isc_boolean_t was_idle = ISC_FALSE;
250 isc_event_t *event, *prev;
253 * Caller must be holding the task's lock.
256 XTRACE("task_shutdown");
258 if (! TASK_SHUTTINGDOWN(task)) {
259 XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
260 ISC_MSG_SHUTTINGDOWN, "shutting down"));
261 task->flags |= TASK_F_SHUTTINGDOWN;
262 if (task->state == task_state_idle) {
263 INSIST(EMPTY(task->events));
264 task->state = task_state_ready;
267 INSIST(task->state == task_state_ready ||
268 task->state == task_state_running);
270 * Note that we post shutdown events LIFO.
272 for (event = TAIL(task->on_shutdown);
275 prev = PREV(event, ev_link);
276 DEQUEUE(task->on_shutdown, event, ev_link);
277 ENQUEUE(task->events, event, ev_link);
285 task_ready(isc_task_t *task) {
286 isc_taskmgr_t *manager = task->manager;
288 REQUIRE(VALID_MANAGER(manager));
289 REQUIRE(task->state == task_state_ready);
291 XTRACE("task_ready");
293 LOCK(&manager->lock);
295 ENQUEUE(manager->ready_tasks, task, ready_link);
296 #ifdef ISC_PLATFORM_USETHREADS
297 SIGNAL(&manager->work_available);
298 #endif /* ISC_PLATFORM_USETHREADS */
300 UNLOCK(&manager->lock);
303 static inline isc_boolean_t
304 task_detach(isc_task_t *task) {
307 * Caller must be holding the task lock.
310 REQUIRE(task->references > 0);
315 if (task->references == 0 && task->state == task_state_idle) {
316 INSIST(EMPTY(task->events));
318 * There are no references to this task, and no
319 * pending events. We could try to optimize and
320 * either initiate shutdown or clean up the task,
321 * depending on its state, but it's easier to just
322 * make the task ready and allow run() or the event
323 * loop to deal with shutting down and termination.
325 task->state = task_state_ready;
333 isc_task_detach(isc_task_t **taskp) {
335 isc_boolean_t was_idle;
338 * Detach *taskp from its task.
341 REQUIRE(taskp != NULL);
343 REQUIRE(VALID_TASK(task));
345 XTRACE("isc_task_detach");
348 was_idle = task_detach(task);
357 static inline isc_boolean_t
358 task_send(isc_task_t *task, isc_event_t **eventp) {
359 isc_boolean_t was_idle = ISC_FALSE;
363 * Caller must be holding the task lock.
366 REQUIRE(eventp != NULL);
368 REQUIRE(event != NULL);
369 REQUIRE(event->ev_type > 0);
370 REQUIRE(task->state != task_state_done);
374 if (task->state == task_state_idle) {
376 INSIST(EMPTY(task->events));
377 task->state = task_state_ready;
379 INSIST(task->state == task_state_ready ||
380 task->state == task_state_running);
381 ENQUEUE(task->events, event, ev_link);
388 isc_task_send(isc_task_t *task, isc_event_t **eventp) {
389 isc_boolean_t was_idle;
392 * Send '*event' to 'task'.
395 REQUIRE(VALID_TASK(task));
397 XTRACE("isc_task_send");
400 * We're trying hard to hold locks for as short a time as possible.
401 * We're also trying to hold as few locks as possible. This is why
402 * some processing is deferred until after the lock is released.
405 was_idle = task_send(task, eventp);
410 * We need to add this task to the ready queue.
412 * We've waited until now to do it because making a task
413 * ready requires locking the manager. If we tried to do
414 * this while holding the task lock, we could deadlock.
416 * We've changed the state to ready, so no one else will
417 * be trying to add this task to the ready queue. The
418 * only way to leave the ready state is by executing the
419 * task. It thus doesn't matter if events are added,
420 * removed, or a shutdown is started in the interval
421 * between the time we released the task lock, and the time
422 * we add the task to the ready queue.
429 isc_task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp) {
430 isc_boolean_t idle1, idle2;
434 * Send '*event' to '*taskp' and then detach '*taskp' from its
438 REQUIRE(taskp != NULL);
440 REQUIRE(VALID_TASK(task));
442 XTRACE("isc_task_sendanddetach");
445 idle1 = task_send(task, eventp);
446 idle2 = task_detach(task);
450 * If idle1, then idle2 shouldn't be true as well since we're holding
451 * the task lock, and thus the task cannot switch from ready back to
454 INSIST(!(idle1 && idle2));
462 #define PURGE_OK(event) (((event)->ev_attributes & ISC_EVENTATTR_NOPURGE) == 0)
465 dequeue_events(isc_task_t *task, void *sender, isc_eventtype_t first,
466 isc_eventtype_t last, void *tag,
467 isc_eventlist_t *events, isc_boolean_t purging)
469 isc_event_t *event, *next_event;
470 unsigned int count = 0;
472 REQUIRE(VALID_TASK(task));
473 REQUIRE(last >= first);
475 XTRACE("dequeue_events");
478 * Events matching 'sender', whose type is >= first and <= last, and
479 * whose tag is 'tag' will be dequeued. If 'purging', matching events
480 * which are marked as unpurgable will not be dequeued.
482 * sender == NULL means "any sender", and tag == NULL means "any tag".
487 for (event = HEAD(task->events); event != NULL; event = next_event) {
488 next_event = NEXT(event, ev_link);
489 if (event->ev_type >= first && event->ev_type <= last &&
490 (sender == NULL || event->ev_sender == sender) &&
491 (tag == NULL || event->ev_tag == tag) &&
492 (!purging || PURGE_OK(event))) {
493 DEQUEUE(task->events, event, ev_link);
494 ENQUEUE(*events, event, ev_link);
505 isc_task_purgerange(isc_task_t *task, void *sender, isc_eventtype_t first,
506 isc_eventtype_t last, void *tag)
509 isc_eventlist_t events;
510 isc_event_t *event, *next_event;
513 * Purge events from a task's event queue.
516 XTRACE("isc_task_purgerange");
518 ISC_LIST_INIT(events);
520 count = dequeue_events(task, sender, first, last, tag, &events,
523 for (event = HEAD(events); event != NULL; event = next_event) {
524 next_event = NEXT(event, ev_link);
525 isc_event_free(&event);
529 * Note that purging never changes the state of the task.
536 isc_task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
540 * Purge events from a task's event queue.
543 XTRACE("isc_task_purge");
545 return (isc_task_purgerange(task, sender, type, type, tag));
549 isc_task_purgeevent(isc_task_t *task, isc_event_t *event) {
550 isc_event_t *curr_event, *next_event;
553 * Purge 'event' from a task's event queue.
555 * XXXRTH: WARNING: This method may be removed before beta.
558 REQUIRE(VALID_TASK(task));
561 * If 'event' is on the task's event queue, it will be purged,
562 * unless it is marked as unpurgeable. 'event' does not have to be
563 * on the task's event queue; in fact, it can even be an invalid
564 * pointer. Purging only occurs if the event is actually on the task's
567 * Purging never changes the state of the task.
571 for (curr_event = HEAD(task->events);
573 curr_event = next_event) {
574 next_event = NEXT(curr_event, ev_link);
575 if (curr_event == event && PURGE_OK(event)) {
576 DEQUEUE(task->events, curr_event, ev_link);
582 if (curr_event == NULL)
585 isc_event_free(&curr_event);
591 isc_task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
592 isc_eventtype_t last, void *tag,
593 isc_eventlist_t *events)
596 * Remove events from a task's event queue.
599 XTRACE("isc_task_unsendrange");
601 return (dequeue_events(task, sender, first, last, tag, events,
606 isc_task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
607 void *tag, isc_eventlist_t *events)
610 * Remove events from a task's event queue.
613 XTRACE("isc_task_unsend");
615 return (dequeue_events(task, sender, type, type, tag, events,
620 isc_task_onshutdown(isc_task_t *task, isc_taskaction_t action, const void *arg)
622 isc_boolean_t disallowed = ISC_FALSE;
623 isc_result_t result = ISC_R_SUCCESS;
627 * Send a shutdown event with action 'action' and argument 'arg' when
628 * 'task' is shutdown.
631 REQUIRE(VALID_TASK(task));
632 REQUIRE(action != NULL);
634 event = isc_event_allocate(task->manager->mctx,
636 ISC_TASKEVENT_SHUTDOWN,
641 return (ISC_R_NOMEMORY);
644 if (TASK_SHUTTINGDOWN(task)) {
645 disallowed = ISC_TRUE;
646 result = ISC_R_SHUTTINGDOWN;
648 ENQUEUE(task->on_shutdown, event, ev_link);
652 isc_mem_put(task->manager->mctx, event, sizeof(*event));
658 isc_task_shutdown(isc_task_t *task) {
659 isc_boolean_t was_idle;
665 REQUIRE(VALID_TASK(task));
668 was_idle = task_shutdown(task);
676 isc_task_destroy(isc_task_t **taskp) {
682 REQUIRE(taskp != NULL);
684 isc_task_shutdown(*taskp);
685 isc_task_detach(taskp);
689 isc_task_setname(isc_task_t *task, const char *name, void *tag) {
695 REQUIRE(VALID_TASK(task));
697 #ifdef ISC_TASK_NAMES
699 memset(task->name, 0, sizeof(task->name));
700 strncpy(task->name, name, sizeof(task->name) - 1);
711 isc_task_getname(isc_task_t *task) {
716 isc_task_gettag(isc_task_t *task) {
721 isc_task_getcurrenttime(isc_task_t *task, isc_stdtime_t *t) {
722 REQUIRE(VALID_TASK(task));
736 dispatch(isc_taskmgr_t *manager) {
738 #ifndef ISC_PLATFORM_USETHREADS
739 unsigned int total_dispatch_count = 0;
740 isc_tasklist_t ready_tasks;
741 #endif /* ISC_PLATFORM_USETHREADS */
743 REQUIRE(VALID_MANAGER(manager));
746 * Again we're trying to hold the lock for as short a time as possible
747 * and to do as little locking and unlocking as possible.
749 * In both while loops, the appropriate lock must be held before the
750 * while body starts. Code which acquired the lock at the top of
751 * the loop would be more readable, but would result in a lot of
752 * extra locking. Compare:
759 * while (expression) {
764 * Unlocked part here...
771 * Note how if the loop continues we unlock and then immediately lock.
772 * For N iterations of the loop, this code does 2N+1 locks and 2N+1
773 * unlocks. Also note that the lock is not held when the while
774 * condition is tested, which may or may not be important, depending
780 * while (expression) {
784 * Unlocked part here...
791 * For N iterations of the loop, this code does N+1 locks and N+1
792 * unlocks. The while expression is always protected by the lock.
795 #ifndef ISC_PLATFORM_USETHREADS
796 ISC_LIST_INIT(ready_tasks);
798 LOCK(&manager->lock);
799 while (!FINISHED(manager)) {
800 #ifdef ISC_PLATFORM_USETHREADS
802 * For reasons similar to those given in the comment in
803 * isc_task_send() above, it is safe for us to dequeue
804 * the task while only holding the manager lock, and then
805 * change the task to running state while only holding the
808 while ((EMPTY(manager->ready_tasks) ||
809 manager->exclusive_requested) &&
812 XTHREADTRACE(isc_msgcat_get(isc_msgcat,
814 ISC_MSG_WAIT, "wait"));
815 WAIT(&manager->work_available, &manager->lock);
816 XTHREADTRACE(isc_msgcat_get(isc_msgcat,
818 ISC_MSG_AWAKE, "awake"));
820 #else /* ISC_PLATFORM_USETHREADS */
821 if (total_dispatch_count >= DEFAULT_TASKMGR_QUANTUM ||
822 EMPTY(manager->ready_tasks))
824 #endif /* ISC_PLATFORM_USETHREADS */
825 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK,
826 ISC_MSG_WORKING, "working"));
828 task = HEAD(manager->ready_tasks);
830 unsigned int dispatch_count = 0;
831 isc_boolean_t done = ISC_FALSE;
832 isc_boolean_t requeue = ISC_FALSE;
833 isc_boolean_t finished = ISC_FALSE;
836 INSIST(VALID_TASK(task));
839 * Note we only unlock the manager lock if we actually
840 * have a task to do. We must reacquire the manager
841 * lock before exiting the 'if (task != NULL)' block.
843 DEQUEUE(manager->ready_tasks, task, ready_link);
844 manager->tasks_running++;
845 UNLOCK(&manager->lock);
848 INSIST(task->state == task_state_ready);
849 task->state = task_state_running;
850 XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
851 ISC_MSG_RUNNING, "running"));
852 isc_stdtime_get(&task->now);
854 if (!EMPTY(task->events)) {
855 event = HEAD(task->events);
856 DEQUEUE(task->events, event, ev_link);
859 * Execute the event action.
861 XTRACE(isc_msgcat_get(isc_msgcat,
865 if (event->ev_action != NULL) {
867 (event->ev_action)(task,event);
871 #ifndef ISC_PLATFORM_USETHREADS
872 total_dispatch_count++;
873 #endif /* ISC_PLATFORM_USETHREADS */
876 if (task->references == 0 &&
877 EMPTY(task->events) &&
878 !TASK_SHUTTINGDOWN(task)) {
879 isc_boolean_t was_idle;
882 * There are no references and no
883 * pending events for this task,
884 * which means it will not become
885 * runnable again via an external
886 * action (such as sending an event
889 * We initiate shutdown to prevent
890 * it from becoming a zombie.
892 * We do this here instead of in
893 * the "if EMPTY(task->events)" block
896 * If we post no shutdown events,
897 * we want the task to finish.
899 * If we did post shutdown events,
900 * will still want the task's
901 * quantum to be applied.
903 was_idle = task_shutdown(task);
907 if (EMPTY(task->events)) {
909 * Nothing else to do for this task
912 XTRACE(isc_msgcat_get(isc_msgcat,
916 if (task->references == 0 &&
917 TASK_SHUTTINGDOWN(task)) {
921 XTRACE(isc_msgcat_get(
927 task->state = task_state_done;
929 task->state = task_state_idle;
931 } else if (dispatch_count >= task->quantum) {
933 * Our quantum has expired, but
934 * there is more work to be done.
935 * We'll requeue it to the ready
938 * We don't check quantum until
939 * dispatching at least one event,
940 * so the minimum quantum is one.
942 XTRACE(isc_msgcat_get(isc_msgcat,
946 task->state = task_state_ready;
956 LOCK(&manager->lock);
957 manager->tasks_running--;
958 #ifdef ISC_PLATFORM_USETHREADS
959 if (manager->exclusive_requested &&
960 manager->tasks_running == 1) {
961 SIGNAL(&manager->exclusive_granted);
963 #endif /* ISC_PLATFORM_USETHREADS */
966 * We know we're awake, so we don't have
967 * to wakeup any sleeping threads if the
968 * ready queue is empty before we requeue.
970 * A possible optimization if the queue is
971 * empty is to 'goto' the 'if (task != NULL)'
972 * block, avoiding the ENQUEUE of the task
973 * and the subsequent immediate DEQUEUE
974 * (since it is the only executable task).
975 * We don't do this because then we'd be
976 * skipping the exit_requested check. The
977 * cost of ENQUEUE is low anyway, especially
978 * when you consider that we'd have to do
979 * an extra EMPTY check to see if we could
980 * do the optimization. If the ready queue
981 * were usually nonempty, the 'optimization'
982 * might even hurt rather than help.
984 #ifdef ISC_PLATFORM_USETHREADS
985 ENQUEUE(manager->ready_tasks, task,
988 ENQUEUE(ready_tasks, task, ready_link);
993 #ifndef ISC_PLATFORM_USETHREADS
994 ISC_LIST_APPENDLIST(manager->ready_tasks, ready_tasks, ready_link);
996 UNLOCK(&manager->lock);
999 #ifdef ISC_PLATFORM_USETHREADS
1000 static isc_threadresult_t
1005 isc_taskmgr_t *manager = uap;
1007 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1008 ISC_MSG_STARTING, "starting"));
1012 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1013 ISC_MSG_EXITING, "exiting"));
1015 return ((isc_threadresult_t)0);
1017 #endif /* ISC_PLATFORM_USETHREADS */
1020 manager_free(isc_taskmgr_t *manager) {
1023 #ifdef ISC_PLATFORM_USETHREADS
1024 (void)isc_condition_destroy(&manager->exclusive_granted);
1025 (void)isc_condition_destroy(&manager->work_available);
1026 isc_mem_free(manager->mctx, manager->threads);
1027 #endif /* ISC_PLATFORM_USETHREADS */
1028 DESTROYLOCK(&manager->lock);
1030 mctx = manager->mctx;
1031 isc_mem_put(mctx, manager, sizeof(*manager));
1032 isc_mem_detach(&mctx);
1036 isc_taskmgr_create(isc_mem_t *mctx, unsigned int workers,
1037 unsigned int default_quantum, isc_taskmgr_t **managerp)
1039 isc_result_t result;
1040 unsigned int i, started = 0;
1041 isc_taskmgr_t *manager;
1044 * Create a new task manager.
1047 REQUIRE(workers > 0);
1048 REQUIRE(managerp != NULL && *managerp == NULL);
1050 #ifndef ISC_PLATFORM_USETHREADS
1055 if (taskmgr != NULL) {
1057 *managerp = taskmgr;
1058 return (ISC_R_SUCCESS);
1060 #endif /* ISC_PLATFORM_USETHREADS */
1062 manager = isc_mem_get(mctx, sizeof(*manager));
1063 if (manager == NULL)
1064 return (ISC_R_NOMEMORY);
1065 manager->magic = TASK_MANAGER_MAGIC;
1066 manager->mctx = NULL;
1067 result = isc_mutex_init(&manager->lock);
1068 if (result != ISC_R_SUCCESS)
1071 #ifdef ISC_PLATFORM_USETHREADS
1072 manager->workers = 0;
1073 manager->threads = isc_mem_allocate(mctx,
1074 workers * sizeof(isc_thread_t));
1075 if (manager->threads == NULL) {
1076 result = ISC_R_NOMEMORY;
1079 if (isc_condition_init(&manager->work_available) != ISC_R_SUCCESS) {
1080 UNEXPECTED_ERROR(__FILE__, __LINE__,
1081 "isc_condition_init() %s",
1082 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1083 ISC_MSG_FAILED, "failed"));
1084 result = ISC_R_UNEXPECTED;
1085 goto cleanup_threads;
1087 if (isc_condition_init(&manager->exclusive_granted) != ISC_R_SUCCESS) {
1088 UNEXPECTED_ERROR(__FILE__, __LINE__,
1089 "isc_condition_init() %s",
1090 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1091 ISC_MSG_FAILED, "failed"));
1092 result = ISC_R_UNEXPECTED;
1093 goto cleanup_workavailable;
1095 #endif /* ISC_PLATFORM_USETHREADS */
1096 if (default_quantum == 0)
1097 default_quantum = DEFAULT_DEFAULT_QUANTUM;
1098 manager->default_quantum = default_quantum;
1099 INIT_LIST(manager->tasks);
1100 INIT_LIST(manager->ready_tasks);
1101 manager->tasks_running = 0;
1102 manager->exclusive_requested = ISC_FALSE;
1103 manager->exiting = ISC_FALSE;
1105 isc_mem_attach(mctx, &manager->mctx);
1107 #ifdef ISC_PLATFORM_USETHREADS
1108 LOCK(&manager->lock);
1112 for (i = 0; i < workers; i++) {
1113 if (isc_thread_create(run, manager,
1114 &manager->threads[manager->workers]) ==
1120 UNLOCK(&manager->lock);
1123 manager_free(manager);
1124 return (ISC_R_NOTHREADS);
1126 isc_thread_setconcurrency(workers);
1127 #else /* ISC_PLATFORM_USETHREADS */
1130 #endif /* ISC_PLATFORM_USETHREADS */
1132 *managerp = manager;
1134 return (ISC_R_SUCCESS);
1136 #ifdef ISC_PLATFORM_USETHREADS
1137 cleanup_workavailable:
1138 (void)isc_condition_destroy(&manager->work_available);
1140 isc_mem_free(mctx, manager->threads);
1142 DESTROYLOCK(&manager->lock);
1145 isc_mem_put(mctx, manager, sizeof(*manager));
1150 isc_taskmgr_destroy(isc_taskmgr_t **managerp) {
1151 isc_taskmgr_t *manager;
1156 * Destroy '*managerp'.
1159 REQUIRE(managerp != NULL);
1160 manager = *managerp;
1161 REQUIRE(VALID_MANAGER(manager));
1163 #ifndef ISC_PLATFORM_USETHREADS
1166 if (manager->refs > 1) {
1171 #endif /* ISC_PLATFORM_USETHREADS */
1173 XTHREADTRACE("isc_taskmgr_destroy");
1175 * Only one non-worker thread may ever call this routine.
1176 * If a worker thread wants to initiate shutdown of the
1177 * task manager, it should ask some non-worker thread to call
1178 * isc_taskmgr_destroy(), e.g. by signalling a condition variable
1179 * that the startup thread is sleeping on.
1183 * Unlike elsewhere, we're going to hold this lock a long time.
1184 * We need to do so, because otherwise the list of tasks could
1185 * change while we were traversing it.
1187 * This is also the only function where we will hold both the
1188 * task manager lock and a task lock at the same time.
1191 LOCK(&manager->lock);
1194 * Make sure we only get called once.
1196 INSIST(!manager->exiting);
1197 manager->exiting = ISC_TRUE;
1200 * Post shutdown event(s) to every task (if they haven't already been
1203 for (task = HEAD(manager->tasks);
1205 task = NEXT(task, link)) {
1207 if (task_shutdown(task))
1208 ENQUEUE(manager->ready_tasks, task, ready_link);
1209 UNLOCK(&task->lock);
1211 #ifdef ISC_PLATFORM_USETHREADS
1213 * Wake up any sleeping workers. This ensures we get work done if
1214 * there's work left to do, and if there are already no tasks left
1215 * it will cause the workers to see manager->exiting.
1217 BROADCAST(&manager->work_available);
1218 UNLOCK(&manager->lock);
1221 * Wait for all the worker threads to exit.
1223 for (i = 0; i < manager->workers; i++)
1224 (void)isc_thread_join(manager->threads[i], NULL);
1225 #else /* ISC_PLATFORM_USETHREADS */
1227 * Dispatch the shutdown events.
1229 UNLOCK(&manager->lock);
1230 while (isc__taskmgr_ready())
1231 (void)isc__taskmgr_dispatch();
1232 if (!ISC_LIST_EMPTY(manager->tasks))
1233 isc_mem_printallactive(stderr);
1234 INSIST(ISC_LIST_EMPTY(manager->tasks));
1235 #endif /* ISC_PLATFORM_USETHREADS */
1237 manager_free(manager);
1242 #ifndef ISC_PLATFORM_USETHREADS
1244 isc__taskmgr_ready(void) {
1245 if (taskmgr == NULL)
1247 return (ISC_TF(!ISC_LIST_EMPTY(taskmgr->ready_tasks)));
1251 isc__taskmgr_dispatch(void) {
1252 isc_taskmgr_t *manager = taskmgr;
1254 if (taskmgr == NULL)
1255 return (ISC_R_NOTFOUND);
1259 return (ISC_R_SUCCESS);
1262 #endif /* ISC_PLATFORM_USETHREADS */
1265 isc_task_beginexclusive(isc_task_t *task) {
1266 #ifdef ISC_PLATFORM_USETHREADS
1267 isc_taskmgr_t *manager = task->manager;
1268 REQUIRE(task->state == task_state_running);
1269 LOCK(&manager->lock);
1270 if (manager->exclusive_requested) {
1271 UNLOCK(&manager->lock);
1272 return (ISC_R_LOCKBUSY);
1274 manager->exclusive_requested = ISC_TRUE;
1275 while (manager->tasks_running > 1) {
1276 WAIT(&manager->exclusive_granted, &manager->lock);
1278 UNLOCK(&manager->lock);
1282 return (ISC_R_SUCCESS);
1286 isc_task_endexclusive(isc_task_t *task) {
1287 #ifdef ISC_PLATFORM_USETHREADS
1288 isc_taskmgr_t *manager = task->manager;
1289 REQUIRE(task->state == task_state_running);
1290 LOCK(&manager->lock);
1291 REQUIRE(manager->exclusive_requested);
1292 manager->exclusive_requested = ISC_FALSE;
1293 BROADCAST(&manager->work_available);
1294 UNLOCK(&manager->lock);