2 * Copyright (C) 2004-2012 Internet Systems Consortium, Inc. ("ISC")
3 * Copyright (C) 1998-2003 Internet Software Consortium.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
10 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
11 * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
12 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
13 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
14 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
15 * PERFORMANCE OF THIS SOFTWARE.
21 * \author Principal Author: Bob Halley
25 * XXXRTH Need to document the states a task can be in, and the rules
26 * for changing states.
31 #include <isc/condition.h>
32 #include <isc/event.h>
33 #include <isc/magic.h>
36 #include <isc/platform.h>
37 #include <isc/string.h>
39 #include <isc/thread.h>
44 #include <openssl/err.h>
48 * For BIND9 internal applications:
49 * when built with threads we use multiple worker threads shared by the whole
51 * when built without threads we share a single global task manager and use
52 * an integrated event loop for socket, timer, and other generic task events.
53 * For generic library:
54 * we don't use either of them: an application can have multiple task managers
55 * whether or not it's threaded, and if the application is threaded each thread
56 * is expected to have a separate manager; no "worker threads" are shared by
57 * the application threads.
60 #ifdef ISC_PLATFORM_USETHREADS
61 #define USE_WORKER_THREADS
63 #define USE_SHARED_MANAGER
64 #endif /* ISC_PLATFORM_USETHREADS */
67 #ifndef USE_WORKER_THREADS
69 #endif /* USE_WORKER_THREADS */
72 #define XTRACE(m) fprintf(stderr, "task %p thread %lu: %s\n", \
73 task, isc_thread_self(), (m))
74 #define XTTRACE(t, m) fprintf(stderr, "task %p thread %lu: %s\n", \
75 (t), isc_thread_self(), (m))
76 #define XTHREADTRACE(m) fprintf(stderr, "thread %lu: %s\n", \
77 isc_thread_self(), (m))
81 #define XTHREADTRACE(m)
89 task_state_idle, task_state_ready, task_state_running,
93 #if defined(HAVE_LIBXML2) && defined(BIND9)
94 static const char *statenames[] = {
95 "idle", "ready", "running", "done",
99 #define TASK_MAGIC ISC_MAGIC('T', 'A', 'S', 'K')
100 #define VALID_TASK(t) ISC_MAGIC_VALID(t, TASK_MAGIC)
102 typedef struct isc__task isc__task_t;
103 typedef struct isc__taskmgr isc__taskmgr_t;
108 isc__taskmgr_t * manager;
110 /* Locked by task lock. */
112 unsigned int references;
113 isc_eventlist_t events;
114 isc_eventlist_t on_shutdown;
115 unsigned int quantum;
120 /* Locked by task manager lock. */
121 LINK(isc__task_t) link;
122 LINK(isc__task_t) ready_link;
125 #define TASK_F_SHUTTINGDOWN 0x01
127 #define TASK_SHUTTINGDOWN(t) (((t)->flags & TASK_F_SHUTTINGDOWN) \
130 #define TASK_MANAGER_MAGIC ISC_MAGIC('T', 'S', 'K', 'M')
131 #define VALID_MANAGER(m) ISC_MAGIC_VALID(m, TASK_MANAGER_MAGIC)
133 typedef ISC_LIST(isc__task_t) isc__tasklist_t;
135 struct isc__taskmgr {
137 isc_taskmgr_t common;
140 #ifdef ISC_PLATFORM_USETHREADS
141 unsigned int workers;
142 isc_thread_t * threads;
143 #endif /* ISC_PLATFORM_USETHREADS */
144 /* Locked by task manager lock. */
145 unsigned int default_quantum;
146 LIST(isc__task_t) tasks;
147 isc__tasklist_t ready_tasks;
148 #ifdef ISC_PLATFORM_USETHREADS
149 isc_condition_t work_available;
150 isc_condition_t exclusive_granted;
151 #endif /* ISC_PLATFORM_USETHREADS */
152 unsigned int tasks_running;
153 isc_boolean_t exclusive_requested;
154 isc_boolean_t exiting;
156 #ifdef USE_SHARED_MANAGER
158 #endif /* ISC_PLATFORM_USETHREADS */
161 #define DEFAULT_TASKMGR_QUANTUM 10
162 #define DEFAULT_DEFAULT_QUANTUM 5
163 #define FINISHED(m) ((m)->exiting && EMPTY((m)->tasks))
165 #ifdef USE_SHARED_MANAGER
166 static isc__taskmgr_t *taskmgr = NULL;
167 #endif /* USE_SHARED_MANAGER */
170 * The following can be either static or public, depending on build environment.
174 #define ISC_TASKFUNC_SCOPE
176 #define ISC_TASKFUNC_SCOPE static
179 ISC_TASKFUNC_SCOPE isc_result_t
180 isc__task_create(isc_taskmgr_t *manager0, unsigned int quantum,
182 ISC_TASKFUNC_SCOPE void
183 isc__task_attach(isc_task_t *source0, isc_task_t **targetp);
184 ISC_TASKFUNC_SCOPE void
185 isc__task_detach(isc_task_t **taskp);
186 ISC_TASKFUNC_SCOPE void
187 isc__task_send(isc_task_t *task0, isc_event_t **eventp);
188 ISC_TASKFUNC_SCOPE void
189 isc__task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp);
190 ISC_TASKFUNC_SCOPE unsigned int
191 isc__task_purgerange(isc_task_t *task0, void *sender, isc_eventtype_t first,
192 isc_eventtype_t last, void *tag);
193 ISC_TASKFUNC_SCOPE unsigned int
194 isc__task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
196 ISC_TASKFUNC_SCOPE isc_boolean_t
197 isc__task_purgeevent(isc_task_t *task0, isc_event_t *event);
198 ISC_TASKFUNC_SCOPE unsigned int
199 isc__task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
200 isc_eventtype_t last, void *tag,
201 isc_eventlist_t *events);
202 ISC_TASKFUNC_SCOPE unsigned int
203 isc__task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
204 void *tag, isc_eventlist_t *events);
205 ISC_TASKFUNC_SCOPE isc_result_t
206 isc__task_onshutdown(isc_task_t *task0, isc_taskaction_t action,
208 ISC_TASKFUNC_SCOPE void
209 isc__task_shutdown(isc_task_t *task0);
210 ISC_TASKFUNC_SCOPE void
211 isc__task_destroy(isc_task_t **taskp);
212 ISC_TASKFUNC_SCOPE void
213 isc__task_setname(isc_task_t *task0, const char *name, void *tag);
214 ISC_TASKFUNC_SCOPE const char *
215 isc__task_getname(isc_task_t *task0);
216 ISC_TASKFUNC_SCOPE void *
217 isc__task_gettag(isc_task_t *task0);
218 ISC_TASKFUNC_SCOPE void
219 isc__task_getcurrenttime(isc_task_t *task0, isc_stdtime_t *t);
220 ISC_TASKFUNC_SCOPE isc_result_t
221 isc__taskmgr_create(isc_mem_t *mctx, unsigned int workers,
222 unsigned int default_quantum, isc_taskmgr_t **managerp);
223 ISC_TASKFUNC_SCOPE void
224 isc__taskmgr_destroy(isc_taskmgr_t **managerp);
225 ISC_TASKFUNC_SCOPE void
226 isc__taskmgr_setexcltask(isc_taskmgr_t *mgr0, isc_task_t *task0);
227 ISC_TASKFUNC_SCOPE isc_result_t
228 isc__taskmgr_excltask(isc_taskmgr_t *mgr0, isc_task_t **taskp);
229 ISC_TASKFUNC_SCOPE isc_result_t
230 isc__task_beginexclusive(isc_task_t *task);
231 ISC_TASKFUNC_SCOPE void
232 isc__task_endexclusive(isc_task_t *task0);
234 static struct isc__taskmethods {
235 isc_taskmethods_t methods;
238 * The following are defined just for avoiding unused static functions.
241 void *purgeevent, *unsendrange, *getname, *gettag, *getcurrenttime;
249 isc__task_sendanddetach,
251 isc__task_onshutdown,
255 isc__task_purgerange,
256 isc__task_beginexclusive,
257 isc__task_endexclusive
261 (void *)isc__task_purgeevent, (void *)isc__task_unsendrange,
262 (void *)isc__task_getname, (void *)isc__task_gettag,
263 (void *)isc__task_getcurrenttime
267 static isc_taskmgrmethods_t taskmgrmethods = {
268 isc__taskmgr_destroy,
270 isc__taskmgr_setexcltask,
271 isc__taskmgr_excltask
279 task_finished(isc__task_t *task) {
280 isc__taskmgr_t *manager = task->manager;
282 REQUIRE(EMPTY(task->events));
283 REQUIRE(EMPTY(task->on_shutdown));
284 REQUIRE(task->references == 0);
285 REQUIRE(task->state == task_state_done);
287 XTRACE("task_finished");
289 LOCK(&manager->lock);
290 UNLINK(manager->tasks, task, link);
291 #ifdef USE_WORKER_THREADS
292 if (FINISHED(manager)) {
294 * All tasks have completed and the
295 * task manager is exiting. Wake up
296 * any idle worker threads so they
299 BROADCAST(&manager->work_available);
301 #endif /* USE_WORKER_THREADS */
302 UNLOCK(&manager->lock);
304 DESTROYLOCK(&task->lock);
305 task->common.impmagic = 0;
306 task->common.magic = 0;
307 isc_mem_put(manager->mctx, task, sizeof(*task));
310 ISC_TASKFUNC_SCOPE isc_result_t
311 isc__task_create(isc_taskmgr_t *manager0, unsigned int quantum,
314 isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
316 isc_boolean_t exiting;
319 REQUIRE(VALID_MANAGER(manager));
320 REQUIRE(taskp != NULL && *taskp == NULL);
322 task = isc_mem_get(manager->mctx, sizeof(*task));
324 return (ISC_R_NOMEMORY);
325 XTRACE("isc_task_create");
326 task->manager = manager;
327 result = isc_mutex_init(&task->lock);
328 if (result != ISC_R_SUCCESS) {
329 isc_mem_put(manager->mctx, task, sizeof(*task));
332 task->state = task_state_idle;
333 task->references = 1;
334 INIT_LIST(task->events);
335 INIT_LIST(task->on_shutdown);
336 task->quantum = quantum;
339 memset(task->name, 0, sizeof(task->name));
341 INIT_LINK(task, link);
342 INIT_LINK(task, ready_link);
345 LOCK(&manager->lock);
346 if (!manager->exiting) {
347 if (task->quantum == 0)
348 task->quantum = manager->default_quantum;
349 APPEND(manager->tasks, task, link);
352 UNLOCK(&manager->lock);
355 DESTROYLOCK(&task->lock);
356 isc_mem_put(manager->mctx, task, sizeof(*task));
357 return (ISC_R_SHUTTINGDOWN);
360 task->common.methods = (isc_taskmethods_t *)&taskmethods;
361 task->common.magic = ISCAPI_TASK_MAGIC;
362 task->common.impmagic = TASK_MAGIC;
363 *taskp = (isc_task_t *)task;
365 return (ISC_R_SUCCESS);
368 ISC_TASKFUNC_SCOPE void
369 isc__task_attach(isc_task_t *source0, isc_task_t **targetp) {
370 isc__task_t *source = (isc__task_t *)source0;
373 * Attach *targetp to source.
376 REQUIRE(VALID_TASK(source));
377 REQUIRE(targetp != NULL && *targetp == NULL);
379 XTTRACE(source, "isc_task_attach");
382 source->references++;
383 UNLOCK(&source->lock);
385 *targetp = (isc_task_t *)source;
388 static inline isc_boolean_t
389 task_shutdown(isc__task_t *task) {
390 isc_boolean_t was_idle = ISC_FALSE;
391 isc_event_t *event, *prev;
394 * Caller must be holding the task's lock.
397 XTRACE("task_shutdown");
399 if (! TASK_SHUTTINGDOWN(task)) {
400 XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
401 ISC_MSG_SHUTTINGDOWN, "shutting down"));
402 task->flags |= TASK_F_SHUTTINGDOWN;
403 if (task->state == task_state_idle) {
404 INSIST(EMPTY(task->events));
405 task->state = task_state_ready;
408 INSIST(task->state == task_state_ready ||
409 task->state == task_state_running);
411 * Note that we post shutdown events LIFO.
413 for (event = TAIL(task->on_shutdown);
416 prev = PREV(event, ev_link);
417 DEQUEUE(task->on_shutdown, event, ev_link);
418 ENQUEUE(task->events, event, ev_link);
426 task_ready(isc__task_t *task) {
427 isc__taskmgr_t *manager = task->manager;
429 REQUIRE(VALID_MANAGER(manager));
430 REQUIRE(task->state == task_state_ready);
432 XTRACE("task_ready");
434 LOCK(&manager->lock);
436 ENQUEUE(manager->ready_tasks, task, ready_link);
437 #ifdef USE_WORKER_THREADS
438 SIGNAL(&manager->work_available);
439 #endif /* USE_WORKER_THREADS */
441 UNLOCK(&manager->lock);
444 static inline isc_boolean_t
445 task_detach(isc__task_t *task) {
448 * Caller must be holding the task lock.
451 REQUIRE(task->references > 0);
456 if (task->references == 0 && task->state == task_state_idle) {
457 INSIST(EMPTY(task->events));
459 * There are no references to this task, and no
460 * pending events. We could try to optimize and
461 * either initiate shutdown or clean up the task,
462 * depending on its state, but it's easier to just
463 * make the task ready and allow run() or the event
464 * loop to deal with shutting down and termination.
466 task->state = task_state_ready;
473 ISC_TASKFUNC_SCOPE void
474 isc__task_detach(isc_task_t **taskp) {
476 isc_boolean_t was_idle;
479 * Detach *taskp from its task.
482 REQUIRE(taskp != NULL);
483 task = (isc__task_t *)*taskp;
484 REQUIRE(VALID_TASK(task));
486 XTRACE("isc_task_detach");
489 was_idle = task_detach(task);
498 static inline isc_boolean_t
499 task_send(isc__task_t *task, isc_event_t **eventp) {
500 isc_boolean_t was_idle = ISC_FALSE;
504 * Caller must be holding the task lock.
507 REQUIRE(eventp != NULL);
509 REQUIRE(event != NULL);
510 REQUIRE(event->ev_type > 0);
511 REQUIRE(task->state != task_state_done);
515 if (task->state == task_state_idle) {
517 INSIST(EMPTY(task->events));
518 task->state = task_state_ready;
520 INSIST(task->state == task_state_ready ||
521 task->state == task_state_running);
522 ENQUEUE(task->events, event, ev_link);
528 ISC_TASKFUNC_SCOPE void
529 isc__task_send(isc_task_t *task0, isc_event_t **eventp) {
530 isc__task_t *task = (isc__task_t *)task0;
531 isc_boolean_t was_idle;
534 * Send '*event' to 'task'.
537 REQUIRE(VALID_TASK(task));
539 XTRACE("isc_task_send");
542 * We're trying hard to hold locks for as short a time as possible.
543 * We're also trying to hold as few locks as possible. This is why
544 * some processing is deferred until after the lock is released.
547 was_idle = task_send(task, eventp);
552 * We need to add this task to the ready queue.
554 * We've waited until now to do it because making a task
555 * ready requires locking the manager. If we tried to do
556 * this while holding the task lock, we could deadlock.
558 * We've changed the state to ready, so no one else will
559 * be trying to add this task to the ready queue. The
560 * only way to leave the ready state is by executing the
561 * task. It thus doesn't matter if events are added,
562 * removed, or a shutdown is started in the interval
563 * between the time we released the task lock, and the time
564 * we add the task to the ready queue.
570 ISC_TASKFUNC_SCOPE void
571 isc__task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp) {
572 isc_boolean_t idle1, idle2;
576 * Send '*event' to '*taskp' and then detach '*taskp' from its
580 REQUIRE(taskp != NULL);
581 task = (isc__task_t *)*taskp;
582 REQUIRE(VALID_TASK(task));
584 XTRACE("isc_task_sendanddetach");
587 idle1 = task_send(task, eventp);
588 idle2 = task_detach(task);
592 * If idle1, then idle2 shouldn't be true as well since we're holding
593 * the task lock, and thus the task cannot switch from ready back to
596 INSIST(!(idle1 && idle2));
604 #define PURGE_OK(event) (((event)->ev_attributes & ISC_EVENTATTR_NOPURGE) == 0)
607 dequeue_events(isc__task_t *task, void *sender, isc_eventtype_t first,
608 isc_eventtype_t last, void *tag,
609 isc_eventlist_t *events, isc_boolean_t purging)
611 isc_event_t *event, *next_event;
612 unsigned int count = 0;
614 REQUIRE(VALID_TASK(task));
615 REQUIRE(last >= first);
617 XTRACE("dequeue_events");
620 * Events matching 'sender', whose type is >= first and <= last, and
621 * whose tag is 'tag' will be dequeued. If 'purging', matching events
622 * which are marked as unpurgable will not be dequeued.
624 * sender == NULL means "any sender", and tag == NULL means "any tag".
629 for (event = HEAD(task->events); event != NULL; event = next_event) {
630 next_event = NEXT(event, ev_link);
631 if (event->ev_type >= first && event->ev_type <= last &&
632 (sender == NULL || event->ev_sender == sender) &&
633 (tag == NULL || event->ev_tag == tag) &&
634 (!purging || PURGE_OK(event))) {
635 DEQUEUE(task->events, event, ev_link);
636 ENQUEUE(*events, event, ev_link);
646 ISC_TASKFUNC_SCOPE unsigned int
647 isc__task_purgerange(isc_task_t *task0, void *sender, isc_eventtype_t first,
648 isc_eventtype_t last, void *tag)
650 isc__task_t *task = (isc__task_t *)task0;
652 isc_eventlist_t events;
653 isc_event_t *event, *next_event;
656 * Purge events from a task's event queue.
659 XTRACE("isc_task_purgerange");
661 ISC_LIST_INIT(events);
663 count = dequeue_events(task, sender, first, last, tag, &events,
666 for (event = HEAD(events); event != NULL; event = next_event) {
667 next_event = NEXT(event, ev_link);
668 isc_event_free(&event);
672 * Note that purging never changes the state of the task.
678 ISC_TASKFUNC_SCOPE unsigned int
679 isc__task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
683 * Purge events from a task's event queue.
686 XTRACE("isc_task_purge");
688 return (isc__task_purgerange(task, sender, type, type, tag));
691 ISC_TASKFUNC_SCOPE isc_boolean_t
692 isc__task_purgeevent(isc_task_t *task0, isc_event_t *event) {
693 isc__task_t *task = (isc__task_t *)task0;
694 isc_event_t *curr_event, *next_event;
697 * Purge 'event' from a task's event queue.
699 * XXXRTH: WARNING: This method may be removed before beta.
702 REQUIRE(VALID_TASK(task));
705 * If 'event' is on the task's event queue, it will be purged,
706 * unless it is marked as unpurgeable. 'event' does not have to be
707 * on the task's event queue; in fact, it can even be an invalid
708 * pointer. Purging only occurs if the event is actually on the task's
711 * Purging never changes the state of the task.
715 for (curr_event = HEAD(task->events);
717 curr_event = next_event) {
718 next_event = NEXT(curr_event, ev_link);
719 if (curr_event == event && PURGE_OK(event)) {
720 DEQUEUE(task->events, curr_event, ev_link);
726 if (curr_event == NULL)
729 isc_event_free(&curr_event);
734 ISC_TASKFUNC_SCOPE unsigned int
735 isc__task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
736 isc_eventtype_t last, void *tag,
737 isc_eventlist_t *events)
740 * Remove events from a task's event queue.
743 XTRACE("isc_task_unsendrange");
745 return (dequeue_events((isc__task_t *)task, sender, first,
746 last, tag, events, ISC_FALSE));
749 ISC_TASKFUNC_SCOPE unsigned int
750 isc__task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
751 void *tag, isc_eventlist_t *events)
754 * Remove events from a task's event queue.
757 XTRACE("isc_task_unsend");
759 return (dequeue_events((isc__task_t *)task, sender, type,
760 type, tag, events, ISC_FALSE));
763 ISC_TASKFUNC_SCOPE isc_result_t
764 isc__task_onshutdown(isc_task_t *task0, isc_taskaction_t action,
767 isc__task_t *task = (isc__task_t *)task0;
768 isc_boolean_t disallowed = ISC_FALSE;
769 isc_result_t result = ISC_R_SUCCESS;
773 * Send a shutdown event with action 'action' and argument 'arg' when
774 * 'task' is shutdown.
777 REQUIRE(VALID_TASK(task));
778 REQUIRE(action != NULL);
780 event = isc_event_allocate(task->manager->mctx,
782 ISC_TASKEVENT_SHUTDOWN,
787 return (ISC_R_NOMEMORY);
790 if (TASK_SHUTTINGDOWN(task)) {
791 disallowed = ISC_TRUE;
792 result = ISC_R_SHUTTINGDOWN;
794 ENQUEUE(task->on_shutdown, event, ev_link);
798 isc_mem_put(task->manager->mctx, event, sizeof(*event));
803 ISC_TASKFUNC_SCOPE void
804 isc__task_shutdown(isc_task_t *task0) {
805 isc__task_t *task = (isc__task_t *)task0;
806 isc_boolean_t was_idle;
812 REQUIRE(VALID_TASK(task));
815 was_idle = task_shutdown(task);
822 ISC_TASKFUNC_SCOPE void
823 isc__task_destroy(isc_task_t **taskp) {
829 REQUIRE(taskp != NULL);
831 isc_task_shutdown(*taskp);
832 isc_task_detach(taskp);
835 ISC_TASKFUNC_SCOPE void
836 isc__task_setname(isc_task_t *task0, const char *name, void *tag) {
837 isc__task_t *task = (isc__task_t *)task0;
843 REQUIRE(VALID_TASK(task));
846 memset(task->name, 0, sizeof(task->name));
847 strncpy(task->name, name, sizeof(task->name) - 1);
852 ISC_TASKFUNC_SCOPE const char *
853 isc__task_getname(isc_task_t *task0) {
854 isc__task_t *task = (isc__task_t *)task0;
856 REQUIRE(VALID_TASK(task));
861 ISC_TASKFUNC_SCOPE void *
862 isc__task_gettag(isc_task_t *task0) {
863 isc__task_t *task = (isc__task_t *)task0;
865 REQUIRE(VALID_TASK(task));
870 ISC_TASKFUNC_SCOPE void
871 isc__task_getcurrenttime(isc_task_t *task0, isc_stdtime_t *t) {
872 isc__task_t *task = (isc__task_t *)task0;
874 REQUIRE(VALID_TASK(task));
888 dispatch(isc__taskmgr_t *manager) {
890 #ifndef USE_WORKER_THREADS
891 unsigned int total_dispatch_count = 0;
892 isc__tasklist_t ready_tasks;
893 #endif /* USE_WORKER_THREADS */
895 REQUIRE(VALID_MANAGER(manager));
898 * Again we're trying to hold the lock for as short a time as possible
899 * and to do as little locking and unlocking as possible.
901 * In both while loops, the appropriate lock must be held before the
902 * while body starts. Code which acquired the lock at the top of
903 * the loop would be more readable, but would result in a lot of
904 * extra locking. Compare:
911 * while (expression) {
916 * Unlocked part here...
923 * Note how if the loop continues we unlock and then immediately lock.
924 * For N iterations of the loop, this code does 2N+1 locks and 2N+1
925 * unlocks. Also note that the lock is not held when the while
926 * condition is tested, which may or may not be important, depending
932 * while (expression) {
936 * Unlocked part here...
943 * For N iterations of the loop, this code does N+1 locks and N+1
944 * unlocks. The while expression is always protected by the lock.
947 #ifndef USE_WORKER_THREADS
948 ISC_LIST_INIT(ready_tasks);
950 LOCK(&manager->lock);
951 while (!FINISHED(manager)) {
952 #ifdef USE_WORKER_THREADS
954 * For reasons similar to those given in the comment in
955 * isc_task_send() above, it is safe for us to dequeue
956 * the task while only holding the manager lock, and then
957 * change the task to running state while only holding the
960 while ((EMPTY(manager->ready_tasks) ||
961 manager->exclusive_requested) &&
964 XTHREADTRACE(isc_msgcat_get(isc_msgcat,
966 ISC_MSG_WAIT, "wait"));
967 WAIT(&manager->work_available, &manager->lock);
968 XTHREADTRACE(isc_msgcat_get(isc_msgcat,
970 ISC_MSG_AWAKE, "awake"));
972 #else /* USE_WORKER_THREADS */
973 if (total_dispatch_count >= DEFAULT_TASKMGR_QUANTUM ||
974 EMPTY(manager->ready_tasks))
976 #endif /* USE_WORKER_THREADS */
977 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK,
978 ISC_MSG_WORKING, "working"));
980 task = HEAD(manager->ready_tasks);
982 unsigned int dispatch_count = 0;
983 isc_boolean_t done = ISC_FALSE;
984 isc_boolean_t requeue = ISC_FALSE;
985 isc_boolean_t finished = ISC_FALSE;
988 INSIST(VALID_TASK(task));
991 * Note we only unlock the manager lock if we actually
992 * have a task to do. We must reacquire the manager
993 * lock before exiting the 'if (task != NULL)' block.
995 DEQUEUE(manager->ready_tasks, task, ready_link);
996 manager->tasks_running++;
997 UNLOCK(&manager->lock);
1000 INSIST(task->state == task_state_ready);
1001 task->state = task_state_running;
1002 XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1003 ISC_MSG_RUNNING, "running"));
1004 isc_stdtime_get(&task->now);
1006 if (!EMPTY(task->events)) {
1007 event = HEAD(task->events);
1008 DEQUEUE(task->events, event, ev_link);
1011 * Execute the event action.
1013 XTRACE(isc_msgcat_get(isc_msgcat,
1017 if (event->ev_action != NULL) {
1018 UNLOCK(&task->lock);
1025 #ifndef USE_WORKER_THREADS
1026 total_dispatch_count++;
1027 #endif /* USE_WORKER_THREADS */
1030 if (task->references == 0 &&
1031 EMPTY(task->events) &&
1032 !TASK_SHUTTINGDOWN(task)) {
1033 isc_boolean_t was_idle;
1036 * There are no references and no
1037 * pending events for this task,
1038 * which means it will not become
1039 * runnable again via an external
1040 * action (such as sending an event
1043 * We initiate shutdown to prevent
1044 * it from becoming a zombie.
1046 * We do this here instead of in
1047 * the "if EMPTY(task->events)" block
1050 * If we post no shutdown events,
1051 * we want the task to finish.
1053 * If we did post shutdown events,
1054 * will still want the task's
1055 * quantum to be applied.
1057 was_idle = task_shutdown(task);
1061 if (EMPTY(task->events)) {
1063 * Nothing else to do for this task
1066 XTRACE(isc_msgcat_get(isc_msgcat,
1070 if (task->references == 0 &&
1071 TASK_SHUTTINGDOWN(task)) {
1075 XTRACE(isc_msgcat_get(
1080 finished = ISC_TRUE;
1081 task->state = task_state_done;
1083 task->state = task_state_idle;
1085 } else if (dispatch_count >= task->quantum) {
1087 * Our quantum has expired, but
1088 * there is more work to be done.
1089 * We'll requeue it to the ready
1092 * We don't check quantum until
1093 * dispatching at least one event,
1094 * so the minimum quantum is one.
1096 XTRACE(isc_msgcat_get(isc_msgcat,
1100 task->state = task_state_ready;
1105 UNLOCK(&task->lock);
1108 task_finished(task);
1110 LOCK(&manager->lock);
1111 manager->tasks_running--;
1112 #ifdef USE_WORKER_THREADS
1113 if (manager->exclusive_requested &&
1114 manager->tasks_running == 1) {
1115 SIGNAL(&manager->exclusive_granted);
1117 #endif /* USE_WORKER_THREADS */
1120 * We know we're awake, so we don't have
1121 * to wakeup any sleeping threads if the
1122 * ready queue is empty before we requeue.
1124 * A possible optimization if the queue is
1125 * empty is to 'goto' the 'if (task != NULL)'
1126 * block, avoiding the ENQUEUE of the task
1127 * and the subsequent immediate DEQUEUE
1128 * (since it is the only executable task).
1129 * We don't do this because then we'd be
1130 * skipping the exit_requested check. The
1131 * cost of ENQUEUE is low anyway, especially
1132 * when you consider that we'd have to do
1133 * an extra EMPTY check to see if we could
1134 * do the optimization. If the ready queue
1135 * were usually nonempty, the 'optimization'
1136 * might even hurt rather than help.
1138 #ifdef USE_WORKER_THREADS
1139 ENQUEUE(manager->ready_tasks, task,
1142 ENQUEUE(ready_tasks, task, ready_link);
1147 #ifndef USE_WORKER_THREADS
1148 ISC_LIST_APPENDLIST(manager->ready_tasks, ready_tasks, ready_link);
1150 UNLOCK(&manager->lock);
1153 #ifdef USE_WORKER_THREADS
1154 static isc_threadresult_t
1159 isc__taskmgr_t *manager = uap;
1161 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1162 ISC_MSG_STARTING, "starting"));
1166 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1167 ISC_MSG_EXITING, "exiting"));
1169 #ifdef OPENSSL_LEAKS
1170 ERR_remove_state(0);
1173 return ((isc_threadresult_t)0);
1175 #endif /* USE_WORKER_THREADS */
1178 manager_free(isc__taskmgr_t *manager) {
1181 #ifdef USE_WORKER_THREADS
1182 (void)isc_condition_destroy(&manager->exclusive_granted);
1183 (void)isc_condition_destroy(&manager->work_available);
1184 isc_mem_free(manager->mctx, manager->threads);
1185 #endif /* USE_WORKER_THREADS */
1186 DESTROYLOCK(&manager->lock);
1187 manager->common.impmagic = 0;
1188 manager->common.magic = 0;
1189 mctx = manager->mctx;
1190 isc_mem_put(mctx, manager, sizeof(*manager));
1191 isc_mem_detach(&mctx);
1193 #ifdef USE_SHARED_MANAGER
1195 #endif /* USE_SHARED_MANAGER */
1198 ISC_TASKFUNC_SCOPE isc_result_t
1199 isc__taskmgr_create(isc_mem_t *mctx, unsigned int workers,
1200 unsigned int default_quantum, isc_taskmgr_t **managerp)
1202 isc_result_t result;
1203 unsigned int i, started = 0;
1204 isc__taskmgr_t *manager;
1207 * Create a new task manager.
1210 REQUIRE(workers > 0);
1211 REQUIRE(managerp != NULL && *managerp == NULL);
1213 #ifndef USE_WORKER_THREADS
1218 #ifdef USE_SHARED_MANAGER
1219 if (taskmgr != NULL) {
1220 if (taskmgr->refs == 0)
1221 return (ISC_R_SHUTTINGDOWN);
1223 *managerp = (isc_taskmgr_t *)taskmgr;
1224 return (ISC_R_SUCCESS);
1226 #endif /* USE_SHARED_MANAGER */
1228 manager = isc_mem_get(mctx, sizeof(*manager));
1229 if (manager == NULL)
1230 return (ISC_R_NOMEMORY);
1231 manager->common.methods = &taskmgrmethods;
1232 manager->common.impmagic = TASK_MANAGER_MAGIC;
1233 manager->common.magic = ISCAPI_TASKMGR_MAGIC;
1234 manager->mctx = NULL;
1235 result = isc_mutex_init(&manager->lock);
1236 if (result != ISC_R_SUCCESS)
1239 #ifdef USE_WORKER_THREADS
1240 manager->workers = 0;
1241 manager->threads = isc_mem_allocate(mctx,
1242 workers * sizeof(isc_thread_t));
1243 if (manager->threads == NULL) {
1244 result = ISC_R_NOMEMORY;
1247 if (isc_condition_init(&manager->work_available) != ISC_R_SUCCESS) {
1248 UNEXPECTED_ERROR(__FILE__, __LINE__,
1249 "isc_condition_init() %s",
1250 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1251 ISC_MSG_FAILED, "failed"));
1252 result = ISC_R_UNEXPECTED;
1253 goto cleanup_threads;
1255 if (isc_condition_init(&manager->exclusive_granted) != ISC_R_SUCCESS) {
1256 UNEXPECTED_ERROR(__FILE__, __LINE__,
1257 "isc_condition_init() %s",
1258 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1259 ISC_MSG_FAILED, "failed"));
1260 result = ISC_R_UNEXPECTED;
1261 goto cleanup_workavailable;
1263 #endif /* USE_WORKER_THREADS */
1264 if (default_quantum == 0)
1265 default_quantum = DEFAULT_DEFAULT_QUANTUM;
1266 manager->default_quantum = default_quantum;
1267 INIT_LIST(manager->tasks);
1268 INIT_LIST(manager->ready_tasks);
1269 manager->tasks_running = 0;
1270 manager->exclusive_requested = ISC_FALSE;
1271 manager->exiting = ISC_FALSE;
1272 manager->excl = NULL;
1274 isc_mem_attach(mctx, &manager->mctx);
1276 #ifdef USE_WORKER_THREADS
1277 LOCK(&manager->lock);
1281 for (i = 0; i < workers; i++) {
1282 if (isc_thread_create(run, manager,
1283 &manager->threads[manager->workers]) ==
1289 UNLOCK(&manager->lock);
1292 manager_free(manager);
1293 return (ISC_R_NOTHREADS);
1295 isc_thread_setconcurrency(workers);
1296 #endif /* USE_WORKER_THREADS */
1297 #ifdef USE_SHARED_MANAGER
1300 #endif /* USE_SHARED_MANAGER */
1302 *managerp = (isc_taskmgr_t *)manager;
1304 return (ISC_R_SUCCESS);
1306 #ifdef USE_WORKER_THREADS
1307 cleanup_workavailable:
1308 (void)isc_condition_destroy(&manager->work_available);
1310 isc_mem_free(mctx, manager->threads);
1312 DESTROYLOCK(&manager->lock);
1315 isc_mem_put(mctx, manager, sizeof(*manager));
1319 ISC_TASKFUNC_SCOPE void
1320 isc__taskmgr_destroy(isc_taskmgr_t **managerp) {
1321 isc__taskmgr_t *manager;
1326 * Destroy '*managerp'.
1329 REQUIRE(managerp != NULL);
1330 manager = (isc__taskmgr_t *)*managerp;
1331 REQUIRE(VALID_MANAGER(manager));
1333 #ifndef USE_WORKER_THREADS
1335 #endif /* USE_WORKER_THREADS */
1337 #ifdef USE_SHARED_MANAGER
1339 if (manager->refs > 0) {
1345 XTHREADTRACE("isc_taskmgr_destroy");
1347 * Only one non-worker thread may ever call this routine.
1348 * If a worker thread wants to initiate shutdown of the
1349 * task manager, it should ask some non-worker thread to call
1350 * isc_taskmgr_destroy(), e.g. by signalling a condition variable
1351 * that the startup thread is sleeping on.
1355 * Detach the exclusive task before acquiring the manager lock
1357 if (manager->excl != NULL)
1358 isc__task_detach((isc_task_t **) &manager->excl);
1361 * Unlike elsewhere, we're going to hold this lock a long time.
1362 * We need to do so, because otherwise the list of tasks could
1363 * change while we were traversing it.
1365 * This is also the only function where we will hold both the
1366 * task manager lock and a task lock at the same time.
1369 LOCK(&manager->lock);
1372 * Make sure we only get called once.
1374 INSIST(!manager->exiting);
1375 manager->exiting = ISC_TRUE;
1378 * Post shutdown event(s) to every task (if they haven't already been
1381 for (task = HEAD(manager->tasks);
1383 task = NEXT(task, link)) {
1385 if (task_shutdown(task))
1386 ENQUEUE(manager->ready_tasks, task, ready_link);
1387 UNLOCK(&task->lock);
1389 #ifdef USE_WORKER_THREADS
1391 * Wake up any sleeping workers. This ensures we get work done if
1392 * there's work left to do, and if there are already no tasks left
1393 * it will cause the workers to see manager->exiting.
1395 BROADCAST(&manager->work_available);
1396 UNLOCK(&manager->lock);
1399 * Wait for all the worker threads to exit.
1401 for (i = 0; i < manager->workers; i++)
1402 (void)isc_thread_join(manager->threads[i], NULL);
1403 #else /* USE_WORKER_THREADS */
1405 * Dispatch the shutdown events.
1407 UNLOCK(&manager->lock);
1408 while (isc__taskmgr_ready((isc_taskmgr_t *)manager))
1409 (void)isc__taskmgr_dispatch((isc_taskmgr_t *)manager);
1411 if (!ISC_LIST_EMPTY(manager->tasks))
1412 isc_mem_printallactive(stderr);
1414 INSIST(ISC_LIST_EMPTY(manager->tasks));
1415 #ifdef USE_SHARED_MANAGER
1418 #endif /* USE_WORKER_THREADS */
1420 manager_free(manager);
1425 #ifndef USE_WORKER_THREADS
1427 isc__taskmgr_ready(isc_taskmgr_t *manager0) {
1428 isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1430 #ifdef USE_SHARED_MANAGER
1431 if (manager == NULL)
1434 if (manager == NULL)
1436 return (ISC_TF(!ISC_LIST_EMPTY(manager->ready_tasks)));
1440 isc__taskmgr_dispatch(isc_taskmgr_t *manager0) {
1441 isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1443 #ifdef USE_SHARED_MANAGER
1444 if (manager == NULL)
1447 if (manager == NULL)
1448 return (ISC_R_NOTFOUND);
1452 return (ISC_R_SUCCESS);
1455 #endif /* USE_WORKER_THREADS */
1457 ISC_TASKFUNC_SCOPE void
1458 isc__taskmgr_setexcltask(isc_taskmgr_t *mgr0, isc_task_t *task0) {
1459 isc__taskmgr_t *mgr = (isc__taskmgr_t *) mgr0;
1460 isc__task_t *task = (isc__task_t *) task0;
1462 REQUIRE(VALID_MANAGER(mgr));
1463 REQUIRE(VALID_TASK(task));
1464 if (mgr->excl != NULL)
1465 isc__task_detach((isc_task_t **) &mgr->excl);
1466 isc__task_attach(task0, (isc_task_t **) &mgr->excl);
1469 ISC_TASKFUNC_SCOPE isc_result_t
1470 isc__taskmgr_excltask(isc_taskmgr_t *mgr0, isc_task_t **taskp) {
1471 isc__taskmgr_t *mgr = (isc__taskmgr_t *) mgr0;
1473 REQUIRE(VALID_MANAGER(mgr));
1474 REQUIRE(taskp != NULL && *taskp == NULL);
1476 if (mgr->excl == NULL)
1477 return (ISC_R_NOTFOUND);
1479 isc__task_attach((isc_task_t *) mgr->excl, taskp);
1480 return (ISC_R_SUCCESS);
1483 ISC_TASKFUNC_SCOPE isc_result_t
1484 isc__task_beginexclusive(isc_task_t *task0) {
1485 #ifdef USE_WORKER_THREADS
1486 isc__task_t *task = (isc__task_t *)task0;
1487 isc__taskmgr_t *manager = task->manager;
1489 REQUIRE(task->state == task_state_running);
1490 /* XXX: Require task == manager->excl? */
1492 LOCK(&manager->lock);
1493 if (manager->exclusive_requested) {
1494 UNLOCK(&manager->lock);
1495 return (ISC_R_LOCKBUSY);
1497 manager->exclusive_requested = ISC_TRUE;
1498 while (manager->tasks_running > 1) {
1499 WAIT(&manager->exclusive_granted, &manager->lock);
1501 UNLOCK(&manager->lock);
1505 return (ISC_R_SUCCESS);
1508 ISC_TASKFUNC_SCOPE void
1509 isc__task_endexclusive(isc_task_t *task0) {
1510 #ifdef USE_WORKER_THREADS
1511 isc__task_t *task = (isc__task_t *)task0;
1512 isc__taskmgr_t *manager = task->manager;
1514 REQUIRE(task->state == task_state_running);
1515 LOCK(&manager->lock);
1516 REQUIRE(manager->exclusive_requested);
1517 manager->exclusive_requested = ISC_FALSE;
1518 BROADCAST(&manager->work_available);
1519 UNLOCK(&manager->lock);
1525 #ifdef USE_SOCKETIMPREGISTER
1527 isc__task_register() {
1528 return (isc_task_register(isc__taskmgr_create));
1533 isc_task_exiting(isc_task_t *t) {
1534 isc__task_t *task = (isc__task_t *)t;
1536 REQUIRE(VALID_TASK(task));
1537 return (TASK_SHUTTINGDOWN(task));
1541 #if defined(HAVE_LIBXML2) && defined(BIND9)
1543 isc_taskmgr_renderxml(isc_taskmgr_t *mgr0, xmlTextWriterPtr writer) {
1544 isc__taskmgr_t *mgr = (isc__taskmgr_t *)mgr0;
1550 * Write out the thread-model, and some details about each depending
1551 * on which type is enabled.
1553 xmlTextWriterStartElement(writer, ISC_XMLCHAR "thread-model");
1554 #ifdef ISC_PLATFORM_USETHREADS
1555 xmlTextWriterStartElement(writer, ISC_XMLCHAR "type");
1556 xmlTextWriterWriteString(writer, ISC_XMLCHAR "threaded");
1557 xmlTextWriterEndElement(writer); /* type */
1559 xmlTextWriterStartElement(writer, ISC_XMLCHAR "worker-threads");
1560 xmlTextWriterWriteFormatString(writer, "%d", mgr->workers);
1561 xmlTextWriterEndElement(writer); /* worker-threads */
1562 #else /* ISC_PLATFORM_USETHREADS */
1563 xmlTextWriterStartElement(writer, ISC_XMLCHAR "type");
1564 xmlTextWriterWriteString(writer, ISC_XMLCHAR "non-threaded");
1565 xmlTextWriterEndElement(writer); /* type */
1567 xmlTextWriterStartElement(writer, ISC_XMLCHAR "references");
1568 xmlTextWriterWriteFormatString(writer, "%d", mgr->refs);
1569 xmlTextWriterEndElement(writer); /* references */
1570 #endif /* ISC_PLATFORM_USETHREADS */
1572 xmlTextWriterStartElement(writer, ISC_XMLCHAR "default-quantum");
1573 xmlTextWriterWriteFormatString(writer, "%d", mgr->default_quantum);
1574 xmlTextWriterEndElement(writer); /* default-quantum */
1576 xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks-running");
1577 xmlTextWriterWriteFormatString(writer, "%d", mgr->tasks_running);
1578 xmlTextWriterEndElement(writer); /* tasks-running */
1580 xmlTextWriterEndElement(writer); /* thread-model */
1582 xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks");
1583 task = ISC_LIST_HEAD(mgr->tasks);
1584 while (task != NULL) {
1586 xmlTextWriterStartElement(writer, ISC_XMLCHAR "task");
1588 if (task->name[0] != 0) {
1589 xmlTextWriterStartElement(writer, ISC_XMLCHAR "name");
1590 xmlTextWriterWriteFormatString(writer, "%s",
1592 xmlTextWriterEndElement(writer); /* name */
1595 xmlTextWriterStartElement(writer, ISC_XMLCHAR "references");
1596 xmlTextWriterWriteFormatString(writer, "%d", task->references);
1597 xmlTextWriterEndElement(writer); /* references */
1599 xmlTextWriterStartElement(writer, ISC_XMLCHAR "id");
1600 xmlTextWriterWriteFormatString(writer, "%p", task);
1601 xmlTextWriterEndElement(writer); /* id */
1603 xmlTextWriterStartElement(writer, ISC_XMLCHAR "state");
1604 xmlTextWriterWriteFormatString(writer, "%s",
1605 statenames[task->state]);
1606 xmlTextWriterEndElement(writer); /* state */
1608 xmlTextWriterStartElement(writer, ISC_XMLCHAR "quantum");
1609 xmlTextWriterWriteFormatString(writer, "%d", task->quantum);
1610 xmlTextWriterEndElement(writer); /* quantum */
1612 xmlTextWriterEndElement(writer);
1614 UNLOCK(&task->lock);
1615 task = ISC_LIST_NEXT(task, link);
1617 xmlTextWriterEndElement(writer); /* tasks */
1621 #endif /* HAVE_LIBXML2 && BIND9 */