2 * Copyright (C) 2004-2011 Internet Systems Consortium, Inc. ("ISC")
3 * Copyright (C) 1998-2003 Internet Software Consortium.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
10 * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
11 * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
12 * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
13 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
14 * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
15 * PERFORMANCE OF THIS SOFTWARE.
18 /* $Id: task.c,v 1.115.14.2 2011-02-28 01:20:03 tbox Exp $ */
21 * \author Principal Author: Bob Halley
25 * XXXRTH Need to document the states a task can be in, and the rules
26 * for changing states.
31 #include <isc/condition.h>
32 #include <isc/event.h>
33 #include <isc/magic.h>
36 #include <isc/platform.h>
37 #include <isc/string.h>
39 #include <isc/thread.h>
44 #include <openssl/err.h>
48 * For BIND9 internal applications:
49 * when built with threads we use multiple worker threads shared by the whole
51 * when built without threads we share a single global task manager and use
52 * an integrated event loop for socket, timer, and other generic task events.
53 * For generic library:
54 * we don't use either of them: an application can have multiple task managers
55 * whether or not it's threaded, and if the application is threaded each thread
56 * is expected to have a separate manager; no "worker threads" are shared by
57 * the application threads.
60 #ifdef ISC_PLATFORM_USETHREADS
61 #define USE_WORKER_THREADS
63 #define USE_SHARED_MANAGER
64 #endif /* ISC_PLATFORM_USETHREADS */
67 #ifndef USE_WORKER_THREADS
69 #endif /* USE_WORKER_THREADS */
72 #define XTRACE(m) fprintf(stderr, "task %p thread %lu: %s\n", \
73 task, isc_thread_self(), (m))
74 #define XTTRACE(t, m) fprintf(stderr, "task %p thread %lu: %s\n", \
75 (t), isc_thread_self(), (m))
76 #define XTHREADTRACE(m) fprintf(stderr, "thread %lu: %s\n", \
77 isc_thread_self(), (m))
81 #define XTHREADTRACE(m)
89 task_state_idle, task_state_ready, task_state_running,
93 #if defined(HAVE_LIBXML2) && defined(BIND9)
94 static const char *statenames[] = {
95 "idle", "ready", "running", "done",
99 #define TASK_MAGIC ISC_MAGIC('T', 'A', 'S', 'K')
100 #define VALID_TASK(t) ISC_MAGIC_VALID(t, TASK_MAGIC)
102 typedef struct isc__task isc__task_t;
103 typedef struct isc__taskmgr isc__taskmgr_t;
108 isc__taskmgr_t * manager;
110 /* Locked by task lock. */
112 unsigned int references;
113 isc_eventlist_t events;
114 isc_eventlist_t on_shutdown;
115 unsigned int quantum;
120 /* Locked by task manager lock. */
121 LINK(isc__task_t) link;
122 LINK(isc__task_t) ready_link;
125 #define TASK_F_SHUTTINGDOWN 0x01
127 #define TASK_SHUTTINGDOWN(t) (((t)->flags & TASK_F_SHUTTINGDOWN) \
130 #define TASK_MANAGER_MAGIC ISC_MAGIC('T', 'S', 'K', 'M')
131 #define VALID_MANAGER(m) ISC_MAGIC_VALID(m, TASK_MANAGER_MAGIC)
133 typedef ISC_LIST(isc__task_t) isc__tasklist_t;
135 struct isc__taskmgr {
137 isc_taskmgr_t common;
140 #ifdef ISC_PLATFORM_USETHREADS
141 unsigned int workers;
142 isc_thread_t * threads;
143 #endif /* ISC_PLATFORM_USETHREADS */
144 /* Locked by task manager lock. */
145 unsigned int default_quantum;
146 LIST(isc__task_t) tasks;
147 isc__tasklist_t ready_tasks;
148 #ifdef ISC_PLATFORM_USETHREADS
149 isc_condition_t work_available;
150 isc_condition_t exclusive_granted;
151 #endif /* ISC_PLATFORM_USETHREADS */
152 unsigned int tasks_running;
153 isc_boolean_t exclusive_requested;
154 isc_boolean_t exiting;
155 #ifdef USE_SHARED_MANAGER
157 #endif /* ISC_PLATFORM_USETHREADS */
160 #define DEFAULT_TASKMGR_QUANTUM 10
161 #define DEFAULT_DEFAULT_QUANTUM 5
162 #define FINISHED(m) ((m)->exiting && EMPTY((m)->tasks))
164 #ifdef USE_SHARED_MANAGER
165 static isc__taskmgr_t *taskmgr = NULL;
166 #endif /* USE_SHARED_MANAGER */
169 * The following can be either static or public, depending on build environment.
173 #define ISC_TASKFUNC_SCOPE
175 #define ISC_TASKFUNC_SCOPE static
178 ISC_TASKFUNC_SCOPE isc_result_t
179 isc__task_create(isc_taskmgr_t *manager0, unsigned int quantum,
181 ISC_TASKFUNC_SCOPE void
182 isc__task_attach(isc_task_t *source0, isc_task_t **targetp);
183 ISC_TASKFUNC_SCOPE void
184 isc__task_detach(isc_task_t **taskp);
185 ISC_TASKFUNC_SCOPE void
186 isc__task_send(isc_task_t *task0, isc_event_t **eventp);
187 ISC_TASKFUNC_SCOPE void
188 isc__task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp);
189 ISC_TASKFUNC_SCOPE unsigned int
190 isc__task_purgerange(isc_task_t *task0, void *sender, isc_eventtype_t first,
191 isc_eventtype_t last, void *tag);
192 ISC_TASKFUNC_SCOPE unsigned int
193 isc__task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
195 ISC_TASKFUNC_SCOPE isc_boolean_t
196 isc__task_purgeevent(isc_task_t *task0, isc_event_t *event);
197 ISC_TASKFUNC_SCOPE unsigned int
198 isc__task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
199 isc_eventtype_t last, void *tag,
200 isc_eventlist_t *events);
201 ISC_TASKFUNC_SCOPE unsigned int
202 isc__task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
203 void *tag, isc_eventlist_t *events);
204 ISC_TASKFUNC_SCOPE isc_result_t
205 isc__task_onshutdown(isc_task_t *task0, isc_taskaction_t action,
207 ISC_TASKFUNC_SCOPE void
208 isc__task_shutdown(isc_task_t *task0);
209 ISC_TASKFUNC_SCOPE void
210 isc__task_destroy(isc_task_t **taskp);
211 ISC_TASKFUNC_SCOPE void
212 isc__task_setname(isc_task_t *task0, const char *name, void *tag);
213 ISC_TASKFUNC_SCOPE const char *
214 isc__task_getname(isc_task_t *task0);
215 ISC_TASKFUNC_SCOPE void *
216 isc__task_gettag(isc_task_t *task0);
217 ISC_TASKFUNC_SCOPE void
218 isc__task_getcurrenttime(isc_task_t *task0, isc_stdtime_t *t);
219 ISC_TASKFUNC_SCOPE isc_result_t
220 isc__taskmgr_create(isc_mem_t *mctx, unsigned int workers,
221 unsigned int default_quantum, isc_taskmgr_t **managerp);
222 ISC_TASKFUNC_SCOPE void
223 isc__taskmgr_destroy(isc_taskmgr_t **managerp);
224 ISC_TASKFUNC_SCOPE isc_result_t
225 isc__task_beginexclusive(isc_task_t *task);
226 ISC_TASKFUNC_SCOPE void
227 isc__task_endexclusive(isc_task_t *task0);
229 static struct isc__taskmethods {
230 isc_taskmethods_t methods;
233 * The following are defined just for avoiding unused static functions.
236 void *purgeevent, *unsendrange, *getname, *gettag, *getcurrenttime;
244 isc__task_sendanddetach,
246 isc__task_onshutdown,
250 isc__task_purgerange,
251 isc__task_beginexclusive,
252 isc__task_endexclusive
256 (void *)isc__task_purgeevent, (void *)isc__task_unsendrange,
257 (void *)isc__task_getname, (void *)isc__task_gettag,
258 (void *)isc__task_getcurrenttime
262 static isc_taskmgrmethods_t taskmgrmethods = {
263 isc__taskmgr_destroy,
272 task_finished(isc__task_t *task) {
273 isc__taskmgr_t *manager = task->manager;
275 REQUIRE(EMPTY(task->events));
276 REQUIRE(EMPTY(task->on_shutdown));
277 REQUIRE(task->references == 0);
278 REQUIRE(task->state == task_state_done);
280 XTRACE("task_finished");
282 LOCK(&manager->lock);
283 UNLINK(manager->tasks, task, link);
284 #ifdef USE_WORKER_THREADS
285 if (FINISHED(manager)) {
287 * All tasks have completed and the
288 * task manager is exiting. Wake up
289 * any idle worker threads so they
292 BROADCAST(&manager->work_available);
294 #endif /* USE_WORKER_THREADS */
295 UNLOCK(&manager->lock);
297 DESTROYLOCK(&task->lock);
298 task->common.impmagic = 0;
299 task->common.magic = 0;
300 isc_mem_put(manager->mctx, task, sizeof(*task));
303 ISC_TASKFUNC_SCOPE isc_result_t
304 isc__task_create(isc_taskmgr_t *manager0, unsigned int quantum,
307 isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
309 isc_boolean_t exiting;
312 REQUIRE(VALID_MANAGER(manager));
313 REQUIRE(taskp != NULL && *taskp == NULL);
315 task = isc_mem_get(manager->mctx, sizeof(*task));
317 return (ISC_R_NOMEMORY);
318 XTRACE("isc_task_create");
319 task->manager = manager;
320 result = isc_mutex_init(&task->lock);
321 if (result != ISC_R_SUCCESS) {
322 isc_mem_put(manager->mctx, task, sizeof(*task));
325 task->state = task_state_idle;
326 task->references = 1;
327 INIT_LIST(task->events);
328 INIT_LIST(task->on_shutdown);
329 task->quantum = quantum;
332 memset(task->name, 0, sizeof(task->name));
334 INIT_LINK(task, link);
335 INIT_LINK(task, ready_link);
338 LOCK(&manager->lock);
339 if (!manager->exiting) {
340 if (task->quantum == 0)
341 task->quantum = manager->default_quantum;
342 APPEND(manager->tasks, task, link);
345 UNLOCK(&manager->lock);
348 DESTROYLOCK(&task->lock);
349 isc_mem_put(manager->mctx, task, sizeof(*task));
350 return (ISC_R_SHUTTINGDOWN);
353 task->common.methods = (isc_taskmethods_t *)&taskmethods;
354 task->common.magic = ISCAPI_TASK_MAGIC;
355 task->common.impmagic = TASK_MAGIC;
356 *taskp = (isc_task_t *)task;
358 return (ISC_R_SUCCESS);
361 ISC_TASKFUNC_SCOPE void
362 isc__task_attach(isc_task_t *source0, isc_task_t **targetp) {
363 isc__task_t *source = (isc__task_t *)source0;
366 * Attach *targetp to source.
369 REQUIRE(VALID_TASK(source));
370 REQUIRE(targetp != NULL && *targetp == NULL);
372 XTTRACE(source, "isc_task_attach");
375 source->references++;
376 UNLOCK(&source->lock);
378 *targetp = (isc_task_t *)source;
381 static inline isc_boolean_t
382 task_shutdown(isc__task_t *task) {
383 isc_boolean_t was_idle = ISC_FALSE;
384 isc_event_t *event, *prev;
387 * Caller must be holding the task's lock.
390 XTRACE("task_shutdown");
392 if (! TASK_SHUTTINGDOWN(task)) {
393 XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
394 ISC_MSG_SHUTTINGDOWN, "shutting down"));
395 task->flags |= TASK_F_SHUTTINGDOWN;
396 if (task->state == task_state_idle) {
397 INSIST(EMPTY(task->events));
398 task->state = task_state_ready;
401 INSIST(task->state == task_state_ready ||
402 task->state == task_state_running);
404 * Note that we post shutdown events LIFO.
406 for (event = TAIL(task->on_shutdown);
409 prev = PREV(event, ev_link);
410 DEQUEUE(task->on_shutdown, event, ev_link);
411 ENQUEUE(task->events, event, ev_link);
419 task_ready(isc__task_t *task) {
420 isc__taskmgr_t *manager = task->manager;
422 REQUIRE(VALID_MANAGER(manager));
423 REQUIRE(task->state == task_state_ready);
425 XTRACE("task_ready");
427 LOCK(&manager->lock);
429 ENQUEUE(manager->ready_tasks, task, ready_link);
430 #ifdef USE_WORKER_THREADS
431 SIGNAL(&manager->work_available);
432 #endif /* USE_WORKER_THREADS */
434 UNLOCK(&manager->lock);
437 static inline isc_boolean_t
438 task_detach(isc__task_t *task) {
441 * Caller must be holding the task lock.
444 REQUIRE(task->references > 0);
449 if (task->references == 0 && task->state == task_state_idle) {
450 INSIST(EMPTY(task->events));
452 * There are no references to this task, and no
453 * pending events. We could try to optimize and
454 * either initiate shutdown or clean up the task,
455 * depending on its state, but it's easier to just
456 * make the task ready and allow run() or the event
457 * loop to deal with shutting down and termination.
459 task->state = task_state_ready;
466 ISC_TASKFUNC_SCOPE void
467 isc__task_detach(isc_task_t **taskp) {
469 isc_boolean_t was_idle;
472 * Detach *taskp from its task.
475 REQUIRE(taskp != NULL);
476 task = (isc__task_t *)*taskp;
477 REQUIRE(VALID_TASK(task));
479 XTRACE("isc_task_detach");
482 was_idle = task_detach(task);
491 static inline isc_boolean_t
492 task_send(isc__task_t *task, isc_event_t **eventp) {
493 isc_boolean_t was_idle = ISC_FALSE;
497 * Caller must be holding the task lock.
500 REQUIRE(eventp != NULL);
502 REQUIRE(event != NULL);
503 REQUIRE(event->ev_type > 0);
504 REQUIRE(task->state != task_state_done);
508 if (task->state == task_state_idle) {
510 INSIST(EMPTY(task->events));
511 task->state = task_state_ready;
513 INSIST(task->state == task_state_ready ||
514 task->state == task_state_running);
515 ENQUEUE(task->events, event, ev_link);
521 ISC_TASKFUNC_SCOPE void
522 isc__task_send(isc_task_t *task0, isc_event_t **eventp) {
523 isc__task_t *task = (isc__task_t *)task0;
524 isc_boolean_t was_idle;
527 * Send '*event' to 'task'.
530 REQUIRE(VALID_TASK(task));
532 XTRACE("isc_task_send");
535 * We're trying hard to hold locks for as short a time as possible.
536 * We're also trying to hold as few locks as possible. This is why
537 * some processing is deferred until after the lock is released.
540 was_idle = task_send(task, eventp);
545 * We need to add this task to the ready queue.
547 * We've waited until now to do it because making a task
548 * ready requires locking the manager. If we tried to do
549 * this while holding the task lock, we could deadlock.
551 * We've changed the state to ready, so no one else will
552 * be trying to add this task to the ready queue. The
553 * only way to leave the ready state is by executing the
554 * task. It thus doesn't matter if events are added,
555 * removed, or a shutdown is started in the interval
556 * between the time we released the task lock, and the time
557 * we add the task to the ready queue.
563 ISC_TASKFUNC_SCOPE void
564 isc__task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp) {
565 isc_boolean_t idle1, idle2;
569 * Send '*event' to '*taskp' and then detach '*taskp' from its
573 REQUIRE(taskp != NULL);
574 task = (isc__task_t *)*taskp;
575 REQUIRE(VALID_TASK(task));
577 XTRACE("isc_task_sendanddetach");
580 idle1 = task_send(task, eventp);
581 idle2 = task_detach(task);
585 * If idle1, then idle2 shouldn't be true as well since we're holding
586 * the task lock, and thus the task cannot switch from ready back to
589 INSIST(!(idle1 && idle2));
597 #define PURGE_OK(event) (((event)->ev_attributes & ISC_EVENTATTR_NOPURGE) == 0)
600 dequeue_events(isc__task_t *task, void *sender, isc_eventtype_t first,
601 isc_eventtype_t last, void *tag,
602 isc_eventlist_t *events, isc_boolean_t purging)
604 isc_event_t *event, *next_event;
605 unsigned int count = 0;
607 REQUIRE(VALID_TASK(task));
608 REQUIRE(last >= first);
610 XTRACE("dequeue_events");
613 * Events matching 'sender', whose type is >= first and <= last, and
614 * whose tag is 'tag' will be dequeued. If 'purging', matching events
615 * which are marked as unpurgable will not be dequeued.
617 * sender == NULL means "any sender", and tag == NULL means "any tag".
622 for (event = HEAD(task->events); event != NULL; event = next_event) {
623 next_event = NEXT(event, ev_link);
624 if (event->ev_type >= first && event->ev_type <= last &&
625 (sender == NULL || event->ev_sender == sender) &&
626 (tag == NULL || event->ev_tag == tag) &&
627 (!purging || PURGE_OK(event))) {
628 DEQUEUE(task->events, event, ev_link);
629 ENQUEUE(*events, event, ev_link);
639 ISC_TASKFUNC_SCOPE unsigned int
640 isc__task_purgerange(isc_task_t *task0, void *sender, isc_eventtype_t first,
641 isc_eventtype_t last, void *tag)
643 isc__task_t *task = (isc__task_t *)task0;
645 isc_eventlist_t events;
646 isc_event_t *event, *next_event;
649 * Purge events from a task's event queue.
652 XTRACE("isc_task_purgerange");
654 ISC_LIST_INIT(events);
656 count = dequeue_events(task, sender, first, last, tag, &events,
659 for (event = HEAD(events); event != NULL; event = next_event) {
660 next_event = NEXT(event, ev_link);
661 isc_event_free(&event);
665 * Note that purging never changes the state of the task.
671 ISC_TASKFUNC_SCOPE unsigned int
672 isc__task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
676 * Purge events from a task's event queue.
679 XTRACE("isc_task_purge");
681 return (isc__task_purgerange(task, sender, type, type, tag));
684 ISC_TASKFUNC_SCOPE isc_boolean_t
685 isc__task_purgeevent(isc_task_t *task0, isc_event_t *event) {
686 isc__task_t *task = (isc__task_t *)task0;
687 isc_event_t *curr_event, *next_event;
690 * Purge 'event' from a task's event queue.
692 * XXXRTH: WARNING: This method may be removed before beta.
695 REQUIRE(VALID_TASK(task));
698 * If 'event' is on the task's event queue, it will be purged,
699 * unless it is marked as unpurgeable. 'event' does not have to be
700 * on the task's event queue; in fact, it can even be an invalid
701 * pointer. Purging only occurs if the event is actually on the task's
704 * Purging never changes the state of the task.
708 for (curr_event = HEAD(task->events);
710 curr_event = next_event) {
711 next_event = NEXT(curr_event, ev_link);
712 if (curr_event == event && PURGE_OK(event)) {
713 DEQUEUE(task->events, curr_event, ev_link);
719 if (curr_event == NULL)
722 isc_event_free(&curr_event);
727 ISC_TASKFUNC_SCOPE unsigned int
728 isc__task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
729 isc_eventtype_t last, void *tag,
730 isc_eventlist_t *events)
733 * Remove events from a task's event queue.
736 XTRACE("isc_task_unsendrange");
738 return (dequeue_events((isc__task_t *)task, sender, first,
739 last, tag, events, ISC_FALSE));
742 ISC_TASKFUNC_SCOPE unsigned int
743 isc__task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
744 void *tag, isc_eventlist_t *events)
747 * Remove events from a task's event queue.
750 XTRACE("isc_task_unsend");
752 return (dequeue_events((isc__task_t *)task, sender, type,
753 type, tag, events, ISC_FALSE));
756 ISC_TASKFUNC_SCOPE isc_result_t
757 isc__task_onshutdown(isc_task_t *task0, isc_taskaction_t action,
760 isc__task_t *task = (isc__task_t *)task0;
761 isc_boolean_t disallowed = ISC_FALSE;
762 isc_result_t result = ISC_R_SUCCESS;
766 * Send a shutdown event with action 'action' and argument 'arg' when
767 * 'task' is shutdown.
770 REQUIRE(VALID_TASK(task));
771 REQUIRE(action != NULL);
773 event = isc_event_allocate(task->manager->mctx,
775 ISC_TASKEVENT_SHUTDOWN,
780 return (ISC_R_NOMEMORY);
783 if (TASK_SHUTTINGDOWN(task)) {
784 disallowed = ISC_TRUE;
785 result = ISC_R_SHUTTINGDOWN;
787 ENQUEUE(task->on_shutdown, event, ev_link);
791 isc_mem_put(task->manager->mctx, event, sizeof(*event));
796 ISC_TASKFUNC_SCOPE void
797 isc__task_shutdown(isc_task_t *task0) {
798 isc__task_t *task = (isc__task_t *)task0;
799 isc_boolean_t was_idle;
805 REQUIRE(VALID_TASK(task));
808 was_idle = task_shutdown(task);
815 ISC_TASKFUNC_SCOPE void
816 isc__task_destroy(isc_task_t **taskp) {
822 REQUIRE(taskp != NULL);
824 isc_task_shutdown(*taskp);
825 isc_task_detach(taskp);
828 ISC_TASKFUNC_SCOPE void
829 isc__task_setname(isc_task_t *task0, const char *name, void *tag) {
830 isc__task_t *task = (isc__task_t *)task0;
836 REQUIRE(VALID_TASK(task));
839 memset(task->name, 0, sizeof(task->name));
840 strncpy(task->name, name, sizeof(task->name) - 1);
845 ISC_TASKFUNC_SCOPE const char *
846 isc__task_getname(isc_task_t *task0) {
847 isc__task_t *task = (isc__task_t *)task0;
849 REQUIRE(VALID_TASK(task));
854 ISC_TASKFUNC_SCOPE void *
855 isc__task_gettag(isc_task_t *task0) {
856 isc__task_t *task = (isc__task_t *)task0;
858 REQUIRE(VALID_TASK(task));
863 ISC_TASKFUNC_SCOPE void
864 isc__task_getcurrenttime(isc_task_t *task0, isc_stdtime_t *t) {
865 isc__task_t *task = (isc__task_t *)task0;
867 REQUIRE(VALID_TASK(task));
881 dispatch(isc__taskmgr_t *manager) {
883 #ifndef USE_WORKER_THREADS
884 unsigned int total_dispatch_count = 0;
885 isc__tasklist_t ready_tasks;
886 #endif /* USE_WORKER_THREADS */
888 REQUIRE(VALID_MANAGER(manager));
891 * Again we're trying to hold the lock for as short a time as possible
892 * and to do as little locking and unlocking as possible.
894 * In both while loops, the appropriate lock must be held before the
895 * while body starts. Code which acquired the lock at the top of
896 * the loop would be more readable, but would result in a lot of
897 * extra locking. Compare:
904 * while (expression) {
909 * Unlocked part here...
916 * Note how if the loop continues we unlock and then immediately lock.
917 * For N iterations of the loop, this code does 2N+1 locks and 2N+1
918 * unlocks. Also note that the lock is not held when the while
919 * condition is tested, which may or may not be important, depending
925 * while (expression) {
929 * Unlocked part here...
936 * For N iterations of the loop, this code does N+1 locks and N+1
937 * unlocks. The while expression is always protected by the lock.
940 #ifndef USE_WORKER_THREADS
941 ISC_LIST_INIT(ready_tasks);
943 LOCK(&manager->lock);
944 while (!FINISHED(manager)) {
945 #ifdef USE_WORKER_THREADS
947 * For reasons similar to those given in the comment in
948 * isc_task_send() above, it is safe for us to dequeue
949 * the task while only holding the manager lock, and then
950 * change the task to running state while only holding the
953 while ((EMPTY(manager->ready_tasks) ||
954 manager->exclusive_requested) &&
957 XTHREADTRACE(isc_msgcat_get(isc_msgcat,
959 ISC_MSG_WAIT, "wait"));
960 WAIT(&manager->work_available, &manager->lock);
961 XTHREADTRACE(isc_msgcat_get(isc_msgcat,
963 ISC_MSG_AWAKE, "awake"));
965 #else /* USE_WORKER_THREADS */
966 if (total_dispatch_count >= DEFAULT_TASKMGR_QUANTUM ||
967 EMPTY(manager->ready_tasks))
969 #endif /* USE_WORKER_THREADS */
970 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK,
971 ISC_MSG_WORKING, "working"));
973 task = HEAD(manager->ready_tasks);
975 unsigned int dispatch_count = 0;
976 isc_boolean_t done = ISC_FALSE;
977 isc_boolean_t requeue = ISC_FALSE;
978 isc_boolean_t finished = ISC_FALSE;
981 INSIST(VALID_TASK(task));
984 * Note we only unlock the manager lock if we actually
985 * have a task to do. We must reacquire the manager
986 * lock before exiting the 'if (task != NULL)' block.
988 DEQUEUE(manager->ready_tasks, task, ready_link);
989 manager->tasks_running++;
990 UNLOCK(&manager->lock);
993 INSIST(task->state == task_state_ready);
994 task->state = task_state_running;
995 XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
996 ISC_MSG_RUNNING, "running"));
997 isc_stdtime_get(&task->now);
999 if (!EMPTY(task->events)) {
1000 event = HEAD(task->events);
1001 DEQUEUE(task->events, event, ev_link);
1004 * Execute the event action.
1006 XTRACE(isc_msgcat_get(isc_msgcat,
1010 if (event->ev_action != NULL) {
1011 UNLOCK(&task->lock);
1018 #ifndef USE_WORKER_THREADS
1019 total_dispatch_count++;
1020 #endif /* USE_WORKER_THREADS */
1023 if (task->references == 0 &&
1024 EMPTY(task->events) &&
1025 !TASK_SHUTTINGDOWN(task)) {
1026 isc_boolean_t was_idle;
1029 * There are no references and no
1030 * pending events for this task,
1031 * which means it will not become
1032 * runnable again via an external
1033 * action (such as sending an event
1036 * We initiate shutdown to prevent
1037 * it from becoming a zombie.
1039 * We do this here instead of in
1040 * the "if EMPTY(task->events)" block
1043 * If we post no shutdown events,
1044 * we want the task to finish.
1046 * If we did post shutdown events,
1047 * will still want the task's
1048 * quantum to be applied.
1050 was_idle = task_shutdown(task);
1054 if (EMPTY(task->events)) {
1056 * Nothing else to do for this task
1059 XTRACE(isc_msgcat_get(isc_msgcat,
1063 if (task->references == 0 &&
1064 TASK_SHUTTINGDOWN(task)) {
1068 XTRACE(isc_msgcat_get(
1073 finished = ISC_TRUE;
1074 task->state = task_state_done;
1076 task->state = task_state_idle;
1078 } else if (dispatch_count >= task->quantum) {
1080 * Our quantum has expired, but
1081 * there is more work to be done.
1082 * We'll requeue it to the ready
1085 * We don't check quantum until
1086 * dispatching at least one event,
1087 * so the minimum quantum is one.
1089 XTRACE(isc_msgcat_get(isc_msgcat,
1093 task->state = task_state_ready;
1098 UNLOCK(&task->lock);
1101 task_finished(task);
1103 LOCK(&manager->lock);
1104 manager->tasks_running--;
1105 #ifdef USE_WORKER_THREADS
1106 if (manager->exclusive_requested &&
1107 manager->tasks_running == 1) {
1108 SIGNAL(&manager->exclusive_granted);
1110 #endif /* USE_WORKER_THREADS */
1113 * We know we're awake, so we don't have
1114 * to wakeup any sleeping threads if the
1115 * ready queue is empty before we requeue.
1117 * A possible optimization if the queue is
1118 * empty is to 'goto' the 'if (task != NULL)'
1119 * block, avoiding the ENQUEUE of the task
1120 * and the subsequent immediate DEQUEUE
1121 * (since it is the only executable task).
1122 * We don't do this because then we'd be
1123 * skipping the exit_requested check. The
1124 * cost of ENQUEUE is low anyway, especially
1125 * when you consider that we'd have to do
1126 * an extra EMPTY check to see if we could
1127 * do the optimization. If the ready queue
1128 * were usually nonempty, the 'optimization'
1129 * might even hurt rather than help.
1131 #ifdef USE_WORKER_THREADS
1132 ENQUEUE(manager->ready_tasks, task,
1135 ENQUEUE(ready_tasks, task, ready_link);
1140 #ifndef USE_WORKER_THREADS
1141 ISC_LIST_APPENDLIST(manager->ready_tasks, ready_tasks, ready_link);
1143 UNLOCK(&manager->lock);
1146 #ifdef USE_WORKER_THREADS
1147 static isc_threadresult_t
1152 isc__taskmgr_t *manager = uap;
1154 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1155 ISC_MSG_STARTING, "starting"));
1159 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1160 ISC_MSG_EXITING, "exiting"));
1162 #ifdef OPENSSL_LEAKS
1163 ERR_remove_state(0);
1166 return ((isc_threadresult_t)0);
1168 #endif /* USE_WORKER_THREADS */
1171 manager_free(isc__taskmgr_t *manager) {
1174 #ifdef USE_WORKER_THREADS
1175 (void)isc_condition_destroy(&manager->exclusive_granted);
1176 (void)isc_condition_destroy(&manager->work_available);
1177 isc_mem_free(manager->mctx, manager->threads);
1178 #endif /* USE_WORKER_THREADS */
1179 DESTROYLOCK(&manager->lock);
1180 manager->common.impmagic = 0;
1181 manager->common.magic = 0;
1182 mctx = manager->mctx;
1183 isc_mem_put(mctx, manager, sizeof(*manager));
1184 isc_mem_detach(&mctx);
1186 #ifdef USE_SHARED_MANAGER
1188 #endif /* USE_SHARED_MANAGER */
1191 ISC_TASKFUNC_SCOPE isc_result_t
1192 isc__taskmgr_create(isc_mem_t *mctx, unsigned int workers,
1193 unsigned int default_quantum, isc_taskmgr_t **managerp)
1195 isc_result_t result;
1196 unsigned int i, started = 0;
1197 isc__taskmgr_t *manager;
1200 * Create a new task manager.
1203 REQUIRE(workers > 0);
1204 REQUIRE(managerp != NULL && *managerp == NULL);
1206 #ifndef USE_WORKER_THREADS
1211 #ifdef USE_SHARED_MANAGER
1212 if (taskmgr != NULL) {
1213 if (taskmgr->refs == 0)
1214 return (ISC_R_SHUTTINGDOWN);
1216 *managerp = (isc_taskmgr_t *)taskmgr;
1217 return (ISC_R_SUCCESS);
1219 #endif /* USE_SHARED_MANAGER */
1221 manager = isc_mem_get(mctx, sizeof(*manager));
1222 if (manager == NULL)
1223 return (ISC_R_NOMEMORY);
1224 manager->common.methods = &taskmgrmethods;
1225 manager->common.impmagic = TASK_MANAGER_MAGIC;
1226 manager->common.magic = ISCAPI_TASKMGR_MAGIC;
1227 manager->mctx = NULL;
1228 result = isc_mutex_init(&manager->lock);
1229 if (result != ISC_R_SUCCESS)
1232 #ifdef USE_WORKER_THREADS
1233 manager->workers = 0;
1234 manager->threads = isc_mem_allocate(mctx,
1235 workers * sizeof(isc_thread_t));
1236 if (manager->threads == NULL) {
1237 result = ISC_R_NOMEMORY;
1240 if (isc_condition_init(&manager->work_available) != ISC_R_SUCCESS) {
1241 UNEXPECTED_ERROR(__FILE__, __LINE__,
1242 "isc_condition_init() %s",
1243 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1244 ISC_MSG_FAILED, "failed"));
1245 result = ISC_R_UNEXPECTED;
1246 goto cleanup_threads;
1248 if (isc_condition_init(&manager->exclusive_granted) != ISC_R_SUCCESS) {
1249 UNEXPECTED_ERROR(__FILE__, __LINE__,
1250 "isc_condition_init() %s",
1251 isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1252 ISC_MSG_FAILED, "failed"));
1253 result = ISC_R_UNEXPECTED;
1254 goto cleanup_workavailable;
1256 #endif /* USE_WORKER_THREADS */
1257 if (default_quantum == 0)
1258 default_quantum = DEFAULT_DEFAULT_QUANTUM;
1259 manager->default_quantum = default_quantum;
1260 INIT_LIST(manager->tasks);
1261 INIT_LIST(manager->ready_tasks);
1262 manager->tasks_running = 0;
1263 manager->exclusive_requested = ISC_FALSE;
1264 manager->exiting = ISC_FALSE;
1266 isc_mem_attach(mctx, &manager->mctx);
1268 #ifdef USE_WORKER_THREADS
1269 LOCK(&manager->lock);
1273 for (i = 0; i < workers; i++) {
1274 if (isc_thread_create(run, manager,
1275 &manager->threads[manager->workers]) ==
1281 UNLOCK(&manager->lock);
1284 manager_free(manager);
1285 return (ISC_R_NOTHREADS);
1287 isc_thread_setconcurrency(workers);
1288 #endif /* USE_WORKER_THREADS */
1289 #ifdef USE_SHARED_MANAGER
1292 #endif /* USE_SHARED_MANAGER */
1294 *managerp = (isc_taskmgr_t *)manager;
1296 return (ISC_R_SUCCESS);
1298 #ifdef USE_WORKER_THREADS
1299 cleanup_workavailable:
1300 (void)isc_condition_destroy(&manager->work_available);
1302 isc_mem_free(mctx, manager->threads);
1304 DESTROYLOCK(&manager->lock);
1307 isc_mem_put(mctx, manager, sizeof(*manager));
1311 ISC_TASKFUNC_SCOPE void
1312 isc__taskmgr_destroy(isc_taskmgr_t **managerp) {
1313 isc__taskmgr_t *manager;
1318 * Destroy '*managerp'.
1321 REQUIRE(managerp != NULL);
1322 manager = (isc__taskmgr_t *)*managerp;
1323 REQUIRE(VALID_MANAGER(manager));
1325 #ifndef USE_WORKER_THREADS
1327 #endif /* USE_WORKER_THREADS */
1329 #ifdef USE_SHARED_MANAGER
1331 if (manager->refs > 0) {
1337 XTHREADTRACE("isc_taskmgr_destroy");
1339 * Only one non-worker thread may ever call this routine.
1340 * If a worker thread wants to initiate shutdown of the
1341 * task manager, it should ask some non-worker thread to call
1342 * isc_taskmgr_destroy(), e.g. by signalling a condition variable
1343 * that the startup thread is sleeping on.
1347 * Unlike elsewhere, we're going to hold this lock a long time.
1348 * We need to do so, because otherwise the list of tasks could
1349 * change while we were traversing it.
1351 * This is also the only function where we will hold both the
1352 * task manager lock and a task lock at the same time.
1355 LOCK(&manager->lock);
1358 * Make sure we only get called once.
1360 INSIST(!manager->exiting);
1361 manager->exiting = ISC_TRUE;
1364 * Post shutdown event(s) to every task (if they haven't already been
1367 for (task = HEAD(manager->tasks);
1369 task = NEXT(task, link)) {
1371 if (task_shutdown(task))
1372 ENQUEUE(manager->ready_tasks, task, ready_link);
1373 UNLOCK(&task->lock);
1375 #ifdef USE_WORKER_THREADS
1377 * Wake up any sleeping workers. This ensures we get work done if
1378 * there's work left to do, and if there are already no tasks left
1379 * it will cause the workers to see manager->exiting.
1381 BROADCAST(&manager->work_available);
1382 UNLOCK(&manager->lock);
1385 * Wait for all the worker threads to exit.
1387 for (i = 0; i < manager->workers; i++)
1388 (void)isc_thread_join(manager->threads[i], NULL);
1389 #else /* USE_WORKER_THREADS */
1391 * Dispatch the shutdown events.
1393 UNLOCK(&manager->lock);
1394 while (isc__taskmgr_ready((isc_taskmgr_t *)manager))
1395 (void)isc__taskmgr_dispatch((isc_taskmgr_t *)manager);
1397 if (!ISC_LIST_EMPTY(manager->tasks))
1398 isc_mem_printallactive(stderr);
1400 INSIST(ISC_LIST_EMPTY(manager->tasks));
1401 #ifdef USE_SHARED_MANAGER
1404 #endif /* USE_WORKER_THREADS */
1406 manager_free(manager);
1411 #ifndef USE_WORKER_THREADS
1413 isc__taskmgr_ready(isc_taskmgr_t *manager0) {
1414 isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1416 #ifdef USE_SHARED_MANAGER
1417 if (manager == NULL)
1420 if (manager == NULL)
1422 return (ISC_TF(!ISC_LIST_EMPTY(manager->ready_tasks)));
1426 isc__taskmgr_dispatch(isc_taskmgr_t *manager0) {
1427 isc__taskmgr_t *manager = (isc__taskmgr_t *)manager0;
1429 #ifdef USE_SHARED_MANAGER
1430 if (manager == NULL)
1433 if (manager == NULL)
1434 return (ISC_R_NOTFOUND);
1438 return (ISC_R_SUCCESS);
1441 #endif /* USE_WORKER_THREADS */
1443 ISC_TASKFUNC_SCOPE isc_result_t
1444 isc__task_beginexclusive(isc_task_t *task0) {
1445 #ifdef USE_WORKER_THREADS
1446 isc__task_t *task = (isc__task_t *)task0;
1447 isc__taskmgr_t *manager = task->manager;
1448 REQUIRE(task->state == task_state_running);
1449 LOCK(&manager->lock);
1450 if (manager->exclusive_requested) {
1451 UNLOCK(&manager->lock);
1452 return (ISC_R_LOCKBUSY);
1454 manager->exclusive_requested = ISC_TRUE;
1455 while (manager->tasks_running > 1) {
1456 WAIT(&manager->exclusive_granted, &manager->lock);
1458 UNLOCK(&manager->lock);
1462 return (ISC_R_SUCCESS);
1465 ISC_TASKFUNC_SCOPE void
1466 isc__task_endexclusive(isc_task_t *task0) {
1467 #ifdef USE_WORKER_THREADS
1468 isc__task_t *task = (isc__task_t *)task0;
1469 isc__taskmgr_t *manager = task->manager;
1471 REQUIRE(task->state == task_state_running);
1472 LOCK(&manager->lock);
1473 REQUIRE(manager->exclusive_requested);
1474 manager->exclusive_requested = ISC_FALSE;
1475 BROADCAST(&manager->work_available);
1476 UNLOCK(&manager->lock);
1482 #ifdef USE_SOCKETIMPREGISTER
1484 isc__task_register() {
1485 return (isc_task_register(isc__taskmgr_create));
1490 isc_task_exiting(isc_task_t *t) {
1491 isc__task_t *task = (isc__task_t *)t;
1493 REQUIRE(VALID_TASK(task));
1494 return (TASK_SHUTTINGDOWN(task));
1498 #if defined(HAVE_LIBXML2) && defined(BIND9)
1500 isc_taskmgr_renderxml(isc_taskmgr_t *mgr0, xmlTextWriterPtr writer) {
1501 isc__taskmgr_t *mgr = (isc__taskmgr_t *)mgr0;
1507 * Write out the thread-model, and some details about each depending
1508 * on which type is enabled.
1510 xmlTextWriterStartElement(writer, ISC_XMLCHAR "thread-model");
1511 #ifdef ISC_PLATFORM_USETHREADS
1512 xmlTextWriterStartElement(writer, ISC_XMLCHAR "type");
1513 xmlTextWriterWriteString(writer, ISC_XMLCHAR "threaded");
1514 xmlTextWriterEndElement(writer); /* type */
1516 xmlTextWriterStartElement(writer, ISC_XMLCHAR "worker-threads");
1517 xmlTextWriterWriteFormatString(writer, "%d", mgr->workers);
1518 xmlTextWriterEndElement(writer); /* worker-threads */
1519 #else /* ISC_PLATFORM_USETHREADS */
1520 xmlTextWriterStartElement(writer, ISC_XMLCHAR "type");
1521 xmlTextWriterWriteString(writer, ISC_XMLCHAR "non-threaded");
1522 xmlTextWriterEndElement(writer); /* type */
1524 xmlTextWriterStartElement(writer, ISC_XMLCHAR "references");
1525 xmlTextWriterWriteFormatString(writer, "%d", mgr->refs);
1526 xmlTextWriterEndElement(writer); /* references */
1527 #endif /* ISC_PLATFORM_USETHREADS */
1529 xmlTextWriterStartElement(writer, ISC_XMLCHAR "default-quantum");
1530 xmlTextWriterWriteFormatString(writer, "%d", mgr->default_quantum);
1531 xmlTextWriterEndElement(writer); /* default-quantum */
1533 xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks-running");
1534 xmlTextWriterWriteFormatString(writer, "%d", mgr->tasks_running);
1535 xmlTextWriterEndElement(writer); /* tasks-running */
1537 xmlTextWriterEndElement(writer); /* thread-model */
1539 xmlTextWriterStartElement(writer, ISC_XMLCHAR "tasks");
1540 task = ISC_LIST_HEAD(mgr->tasks);
1541 while (task != NULL) {
1543 xmlTextWriterStartElement(writer, ISC_XMLCHAR "task");
1545 if (task->name[0] != 0) {
1546 xmlTextWriterStartElement(writer, ISC_XMLCHAR "name");
1547 xmlTextWriterWriteFormatString(writer, "%s",
1549 xmlTextWriterEndElement(writer); /* name */
1552 xmlTextWriterStartElement(writer, ISC_XMLCHAR "references");
1553 xmlTextWriterWriteFormatString(writer, "%d", task->references);
1554 xmlTextWriterEndElement(writer); /* references */
1556 xmlTextWriterStartElement(writer, ISC_XMLCHAR "id");
1557 xmlTextWriterWriteFormatString(writer, "%p", task);
1558 xmlTextWriterEndElement(writer); /* id */
1560 xmlTextWriterStartElement(writer, ISC_XMLCHAR "state");
1561 xmlTextWriterWriteFormatString(writer, "%s",
1562 statenames[task->state]);
1563 xmlTextWriterEndElement(writer); /* state */
1565 xmlTextWriterStartElement(writer, ISC_XMLCHAR "quantum");
1566 xmlTextWriterWriteFormatString(writer, "%d", task->quantum);
1567 xmlTextWriterEndElement(writer); /* quantum */
1569 xmlTextWriterEndElement(writer);
1571 UNLOCK(&task->lock);
1572 task = ISC_LIST_NEXT(task, link);
1574 xmlTextWriterEndElement(writer); /* tasks */
1578 #endif /* HAVE_LIBXML2 && BIND9 */