]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - contrib/bind9/lib/isc/task.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / contrib / bind9 / lib / isc / task.c
1 /*
2  * Copyright (C) 2004-2006  Internet Systems Consortium, Inc. ("ISC")
3  * Copyright (C) 1998-2003  Internet Software Consortium.
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
10  * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
11  * AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
12  * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
13  * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
14  * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
15  * PERFORMANCE OF THIS SOFTWARE.
16  */
17
18 /* $Id: task.c,v 1.91.18.6 2006/01/04 23:50:23 marka Exp $ */
19
20 /*! \file
21  * \author Principal Author: Bob Halley
22  */
23
24 /*
25  * XXXRTH  Need to document the states a task can be in, and the rules
26  * for changing states.
27  */
28
29 #include <config.h>
30
31 #include <isc/condition.h>
32 #include <isc/event.h>
33 #include <isc/magic.h>
34 #include <isc/mem.h>
35 #include <isc/msgs.h>
36 #include <isc/platform.h>
37 #include <isc/string.h>
38 #include <isc/task.h>
39 #include <isc/thread.h>
40 #include <isc/util.h>
41
42 #ifndef ISC_PLATFORM_USETHREADS
43 #include "task_p.h"
44 #endif /* ISC_PLATFORM_USETHREADS */
45
46 #define ISC_TASK_NAMES 1
47
48 #ifdef ISC_TASK_TRACE
49 #define XTRACE(m)               fprintf(stderr, "task %p thread %lu: %s\n", \
50                                        task, isc_thread_self(), (m))
51 #define XTTRACE(t, m)           fprintf(stderr, "task %p thread %lu: %s\n", \
52                                        (t), isc_thread_self(), (m))
53 #define XTHREADTRACE(m)         fprintf(stderr, "thread %lu: %s\n", \
54                                        isc_thread_self(), (m))
55 #else
56 #define XTRACE(m)
57 #define XTTRACE(t, m)
58 #define XTHREADTRACE(m)
59 #endif
60
61 /***
62  *** Types.
63  ***/
64
65 typedef enum {
66         task_state_idle, task_state_ready, task_state_running,
67         task_state_done
68 } task_state_t;
69
70 #define TASK_MAGIC                      ISC_MAGIC('T', 'A', 'S', 'K')
71 #define VALID_TASK(t)                   ISC_MAGIC_VALID(t, TASK_MAGIC)
72
73 struct isc_task {
74         /* Not locked. */
75         unsigned int                    magic;
76         isc_taskmgr_t *                 manager;
77         isc_mutex_t                     lock;
78         /* Locked by task lock. */
79         task_state_t                    state;
80         unsigned int                    references;
81         isc_eventlist_t                 events;
82         isc_eventlist_t                 on_shutdown;
83         unsigned int                    quantum;
84         unsigned int                    flags;
85         isc_stdtime_t                   now;
86 #ifdef ISC_TASK_NAMES
87         char                            name[16];
88         void *                          tag;
89 #endif
90         /* Locked by task manager lock. */
91         LINK(isc_task_t)                link;
92         LINK(isc_task_t)                ready_link;
93 };
94
95 #define TASK_F_SHUTTINGDOWN             0x01
96
97 #define TASK_SHUTTINGDOWN(t)            (((t)->flags & TASK_F_SHUTTINGDOWN) \
98                                          != 0)
99
100 #define TASK_MANAGER_MAGIC              ISC_MAGIC('T', 'S', 'K', 'M')
101 #define VALID_MANAGER(m)                ISC_MAGIC_VALID(m, TASK_MANAGER_MAGIC)
102
103 struct isc_taskmgr {
104         /* Not locked. */
105         unsigned int                    magic;
106         isc_mem_t *                     mctx;
107         isc_mutex_t                     lock;
108 #ifdef ISC_PLATFORM_USETHREADS
109         unsigned int                    workers;
110         isc_thread_t *                  threads;
111 #endif /* ISC_PLATFORM_USETHREADS */
112         /* Locked by task manager lock. */
113         unsigned int                    default_quantum;
114         LIST(isc_task_t)                tasks;
115         isc_tasklist_t                  ready_tasks;
116 #ifdef ISC_PLATFORM_USETHREADS
117         isc_condition_t                 work_available;
118         isc_condition_t                 exclusive_granted;
119 #endif /* ISC_PLATFORM_USETHREADS */
120         unsigned int                    tasks_running;
121         isc_boolean_t                   exclusive_requested;
122         isc_boolean_t                   exiting;
123 #ifndef ISC_PLATFORM_USETHREADS
124         unsigned int                    refs;
125 #endif /* ISC_PLATFORM_USETHREADS */
126 };
127
128 #define DEFAULT_TASKMGR_QUANTUM         10
129 #define DEFAULT_DEFAULT_QUANTUM         5
130 #define FINISHED(m)                     ((m)->exiting && EMPTY((m)->tasks))
131
132 #ifndef ISC_PLATFORM_USETHREADS
133 static isc_taskmgr_t *taskmgr = NULL;
134 #endif /* ISC_PLATFORM_USETHREADS */
135
136 /***
137  *** Tasks.
138  ***/
139
140 static void
141 task_finished(isc_task_t *task) {
142         isc_taskmgr_t *manager = task->manager;
143
144         REQUIRE(EMPTY(task->events));
145         REQUIRE(EMPTY(task->on_shutdown));
146         REQUIRE(task->references == 0);
147         REQUIRE(task->state == task_state_done);
148
149         XTRACE("task_finished");
150
151         LOCK(&manager->lock);
152         UNLINK(manager->tasks, task, link);
153 #ifdef ISC_PLATFORM_USETHREADS
154         if (FINISHED(manager)) {
155                 /*
156                  * All tasks have completed and the
157                  * task manager is exiting.  Wake up
158                  * any idle worker threads so they
159                  * can exit.
160                  */
161                 BROADCAST(&manager->work_available);
162         }
163 #endif /* ISC_PLATFORM_USETHREADS */
164         UNLOCK(&manager->lock);
165
166         DESTROYLOCK(&task->lock);
167         task->magic = 0;
168         isc_mem_put(manager->mctx, task, sizeof(*task));
169 }
170
171 isc_result_t
172 isc_task_create(isc_taskmgr_t *manager, unsigned int quantum,
173                 isc_task_t **taskp)
174 {
175         isc_task_t *task;
176         isc_boolean_t exiting;
177         isc_result_t result;
178
179         REQUIRE(VALID_MANAGER(manager));
180         REQUIRE(taskp != NULL && *taskp == NULL);
181
182         task = isc_mem_get(manager->mctx, sizeof(*task));
183         if (task == NULL)
184                 return (ISC_R_NOMEMORY);
185         XTRACE("isc_task_create");
186         task->manager = manager;
187         result = isc_mutex_init(&task->lock);
188         if (result != ISC_R_SUCCESS) {
189                 isc_mem_put(manager->mctx, task, sizeof(*task));
190                 return (result);
191         }
192         task->state = task_state_idle;
193         task->references = 1;
194         INIT_LIST(task->events);
195         INIT_LIST(task->on_shutdown);
196         task->quantum = quantum;
197         task->flags = 0;
198         task->now = 0;
199 #ifdef ISC_TASK_NAMES
200         memset(task->name, 0, sizeof(task->name));
201         task->tag = NULL;
202 #endif
203         INIT_LINK(task, link);
204         INIT_LINK(task, ready_link);
205
206         exiting = ISC_FALSE;
207         LOCK(&manager->lock);
208         if (!manager->exiting) {
209                 if (task->quantum == 0)
210                         task->quantum = manager->default_quantum;
211                 APPEND(manager->tasks, task, link);
212         } else
213                 exiting = ISC_TRUE;
214         UNLOCK(&manager->lock);
215
216         if (exiting) {
217                 DESTROYLOCK(&task->lock);
218                 isc_mem_put(manager->mctx, task, sizeof(*task));
219                 return (ISC_R_SHUTTINGDOWN);
220         }
221
222         task->magic = TASK_MAGIC;
223         *taskp = task;
224
225         return (ISC_R_SUCCESS);
226 }
227
228 void
229 isc_task_attach(isc_task_t *source, isc_task_t **targetp) {
230
231         /*
232          * Attach *targetp to source.
233          */
234
235         REQUIRE(VALID_TASK(source));
236         REQUIRE(targetp != NULL && *targetp == NULL);
237
238         XTTRACE(source, "isc_task_attach");
239
240         LOCK(&source->lock);
241         source->references++;
242         UNLOCK(&source->lock);
243
244         *targetp = source;
245 }
246
247 static inline isc_boolean_t
248 task_shutdown(isc_task_t *task) {
249         isc_boolean_t was_idle = ISC_FALSE;
250         isc_event_t *event, *prev;
251
252         /*
253          * Caller must be holding the task's lock.
254          */
255
256         XTRACE("task_shutdown");
257
258         if (! TASK_SHUTTINGDOWN(task)) {
259                 XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
260                                       ISC_MSG_SHUTTINGDOWN, "shutting down"));
261                 task->flags |= TASK_F_SHUTTINGDOWN;
262                 if (task->state == task_state_idle) {
263                         INSIST(EMPTY(task->events));
264                         task->state = task_state_ready;
265                         was_idle = ISC_TRUE;
266                 }
267                 INSIST(task->state == task_state_ready ||
268                        task->state == task_state_running);
269                 /*
270                  * Note that we post shutdown events LIFO.
271                  */
272                 for (event = TAIL(task->on_shutdown);
273                      event != NULL;
274                      event = prev) {
275                         prev = PREV(event, ev_link);
276                         DEQUEUE(task->on_shutdown, event, ev_link);
277                         ENQUEUE(task->events, event, ev_link);
278                 }
279         }
280
281         return (was_idle);
282 }
283
284 static inline void
285 task_ready(isc_task_t *task) {
286         isc_taskmgr_t *manager = task->manager;
287
288         REQUIRE(VALID_MANAGER(manager));
289         REQUIRE(task->state == task_state_ready);
290
291         XTRACE("task_ready");
292
293         LOCK(&manager->lock);
294
295         ENQUEUE(manager->ready_tasks, task, ready_link);
296 #ifdef ISC_PLATFORM_USETHREADS
297         SIGNAL(&manager->work_available);
298 #endif /* ISC_PLATFORM_USETHREADS */
299
300         UNLOCK(&manager->lock);
301 }
302
303 static inline isc_boolean_t
304 task_detach(isc_task_t *task) {
305
306         /*
307          * Caller must be holding the task lock.
308          */
309
310         REQUIRE(task->references > 0);
311
312         XTRACE("detach");
313
314         task->references--;
315         if (task->references == 0 && task->state == task_state_idle) {
316                 INSIST(EMPTY(task->events));
317                 /*
318                  * There are no references to this task, and no
319                  * pending events.  We could try to optimize and
320                  * either initiate shutdown or clean up the task,
321                  * depending on its state, but it's easier to just
322                  * make the task ready and allow run() or the event
323                  * loop to deal with shutting down and termination.
324                  */
325                 task->state = task_state_ready;
326                 return (ISC_TRUE);
327         }
328
329         return (ISC_FALSE);
330 }
331
332 void
333 isc_task_detach(isc_task_t **taskp) {
334         isc_task_t *task;
335         isc_boolean_t was_idle;
336
337         /*
338          * Detach *taskp from its task.
339          */
340
341         REQUIRE(taskp != NULL);
342         task = *taskp;
343         REQUIRE(VALID_TASK(task));
344
345         XTRACE("isc_task_detach");
346
347         LOCK(&task->lock);
348         was_idle = task_detach(task);
349         UNLOCK(&task->lock);
350
351         if (was_idle)
352                 task_ready(task);
353
354         *taskp = NULL;
355 }
356
357 static inline isc_boolean_t
358 task_send(isc_task_t *task, isc_event_t **eventp) {
359         isc_boolean_t was_idle = ISC_FALSE;
360         isc_event_t *event;
361
362         /*
363          * Caller must be holding the task lock.
364          */
365
366         REQUIRE(eventp != NULL);
367         event = *eventp;
368         REQUIRE(event != NULL);
369         REQUIRE(event->ev_type > 0);
370         REQUIRE(task->state != task_state_done);
371
372         XTRACE("task_send");
373
374         if (task->state == task_state_idle) {
375                 was_idle = ISC_TRUE;
376                 INSIST(EMPTY(task->events));
377                 task->state = task_state_ready;
378         }
379         INSIST(task->state == task_state_ready ||
380                task->state == task_state_running);
381         ENQUEUE(task->events, event, ev_link);
382         *eventp = NULL;
383
384         return (was_idle);
385 }
386
387 void
388 isc_task_send(isc_task_t *task, isc_event_t **eventp) {
389         isc_boolean_t was_idle;
390
391         /*
392          * Send '*event' to 'task'.
393          */
394
395         REQUIRE(VALID_TASK(task));
396
397         XTRACE("isc_task_send");
398
399         /*
400          * We're trying hard to hold locks for as short a time as possible.
401          * We're also trying to hold as few locks as possible.  This is why
402          * some processing is deferred until after the lock is released.
403          */
404         LOCK(&task->lock);
405         was_idle = task_send(task, eventp);
406         UNLOCK(&task->lock);
407
408         if (was_idle) {
409                 /*
410                  * We need to add this task to the ready queue.
411                  *
412                  * We've waited until now to do it because making a task
413                  * ready requires locking the manager.  If we tried to do
414                  * this while holding the task lock, we could deadlock.
415                  *
416                  * We've changed the state to ready, so no one else will
417                  * be trying to add this task to the ready queue.  The
418                  * only way to leave the ready state is by executing the
419                  * task.  It thus doesn't matter if events are added,
420                  * removed, or a shutdown is started in the interval
421                  * between the time we released the task lock, and the time
422                  * we add the task to the ready queue.
423                  */
424                 task_ready(task);
425         }
426 }
427
428 void
429 isc_task_sendanddetach(isc_task_t **taskp, isc_event_t **eventp) {
430         isc_boolean_t idle1, idle2;
431         isc_task_t *task;
432
433         /*
434          * Send '*event' to '*taskp' and then detach '*taskp' from its
435          * task.
436          */
437
438         REQUIRE(taskp != NULL);
439         task = *taskp;
440         REQUIRE(VALID_TASK(task));
441
442         XTRACE("isc_task_sendanddetach");
443
444         LOCK(&task->lock);
445         idle1 = task_send(task, eventp);
446         idle2 = task_detach(task);
447         UNLOCK(&task->lock);
448
449         /*
450          * If idle1, then idle2 shouldn't be true as well since we're holding
451          * the task lock, and thus the task cannot switch from ready back to
452          * idle.
453          */
454         INSIST(!(idle1 && idle2));
455
456         if (idle1 || idle2)
457                 task_ready(task);
458
459         *taskp = NULL;
460 }
461
462 #define PURGE_OK(event) (((event)->ev_attributes & ISC_EVENTATTR_NOPURGE) == 0)
463
464 static unsigned int
465 dequeue_events(isc_task_t *task, void *sender, isc_eventtype_t first,
466                isc_eventtype_t last, void *tag,
467                isc_eventlist_t *events, isc_boolean_t purging)
468 {
469         isc_event_t *event, *next_event;
470         unsigned int count = 0;
471
472         REQUIRE(VALID_TASK(task));
473         REQUIRE(last >= first);
474
475         XTRACE("dequeue_events");
476
477         /*
478          * Events matching 'sender', whose type is >= first and <= last, and
479          * whose tag is 'tag' will be dequeued.  If 'purging', matching events
480          * which are marked as unpurgable will not be dequeued.
481          *
482          * sender == NULL means "any sender", and tag == NULL means "any tag".
483          */
484
485         LOCK(&task->lock);
486
487         for (event = HEAD(task->events); event != NULL; event = next_event) {
488                 next_event = NEXT(event, ev_link);
489                 if (event->ev_type >= first && event->ev_type <= last &&
490                     (sender == NULL || event->ev_sender == sender) &&
491                     (tag == NULL || event->ev_tag == tag) &&
492                     (!purging || PURGE_OK(event))) {
493                         DEQUEUE(task->events, event, ev_link);
494                         ENQUEUE(*events, event, ev_link);
495                         count++;
496                 }
497         }
498
499         UNLOCK(&task->lock);
500
501         return (count);
502 }
503
504 unsigned int
505 isc_task_purgerange(isc_task_t *task, void *sender, isc_eventtype_t first,
506                     isc_eventtype_t last, void *tag)
507 {
508         unsigned int count;
509         isc_eventlist_t events;
510         isc_event_t *event, *next_event;
511
512         /*
513          * Purge events from a task's event queue.
514          */
515
516         XTRACE("isc_task_purgerange");
517
518         ISC_LIST_INIT(events);
519
520         count = dequeue_events(task, sender, first, last, tag, &events,
521                                ISC_TRUE);
522
523         for (event = HEAD(events); event != NULL; event = next_event) {
524                 next_event = NEXT(event, ev_link);
525                 isc_event_free(&event);
526         }
527
528         /*
529          * Note that purging never changes the state of the task.
530          */
531
532         return (count);
533 }
534
535 unsigned int
536 isc_task_purge(isc_task_t *task, void *sender, isc_eventtype_t type,
537                void *tag)
538 {
539         /*
540          * Purge events from a task's event queue.
541          */
542
543         XTRACE("isc_task_purge");
544
545         return (isc_task_purgerange(task, sender, type, type, tag));
546 }
547
548 isc_boolean_t
549 isc_task_purgeevent(isc_task_t *task, isc_event_t *event) {
550         isc_event_t *curr_event, *next_event;
551
552         /*
553          * Purge 'event' from a task's event queue.
554          *
555          * XXXRTH:  WARNING:  This method may be removed before beta.
556          */
557
558         REQUIRE(VALID_TASK(task));
559
560         /*
561          * If 'event' is on the task's event queue, it will be purged,
562          * unless it is marked as unpurgeable.  'event' does not have to be
563          * on the task's event queue; in fact, it can even be an invalid
564          * pointer.  Purging only occurs if the event is actually on the task's
565          * event queue.
566          *
567          * Purging never changes the state of the task.
568          */
569
570         LOCK(&task->lock);
571         for (curr_event = HEAD(task->events);
572              curr_event != NULL;
573              curr_event = next_event) {
574                 next_event = NEXT(curr_event, ev_link);
575                 if (curr_event == event && PURGE_OK(event)) {
576                         DEQUEUE(task->events, curr_event, ev_link);
577                         break;
578                 }
579         }
580         UNLOCK(&task->lock);
581
582         if (curr_event == NULL)
583                 return (ISC_FALSE);
584
585         isc_event_free(&curr_event);
586
587         return (ISC_TRUE);
588 }
589
590 unsigned int
591 isc_task_unsendrange(isc_task_t *task, void *sender, isc_eventtype_t first,
592                      isc_eventtype_t last, void *tag,
593                      isc_eventlist_t *events)
594 {
595         /*
596          * Remove events from a task's event queue.
597          */
598
599         XTRACE("isc_task_unsendrange");
600
601         return (dequeue_events(task, sender, first, last, tag, events,
602                                ISC_FALSE));
603 }
604
605 unsigned int
606 isc_task_unsend(isc_task_t *task, void *sender, isc_eventtype_t type,
607                 void *tag, isc_eventlist_t *events)
608 {
609         /*
610          * Remove events from a task's event queue.
611          */
612
613         XTRACE("isc_task_unsend");
614
615         return (dequeue_events(task, sender, type, type, tag, events,
616                                ISC_FALSE));
617 }
618
619 isc_result_t
620 isc_task_onshutdown(isc_task_t *task, isc_taskaction_t action, const void *arg)
621 {
622         isc_boolean_t disallowed = ISC_FALSE;
623         isc_result_t result = ISC_R_SUCCESS;
624         isc_event_t *event;
625
626         /*
627          * Send a shutdown event with action 'action' and argument 'arg' when
628          * 'task' is shutdown.
629          */
630
631         REQUIRE(VALID_TASK(task));
632         REQUIRE(action != NULL);
633
634         event = isc_event_allocate(task->manager->mctx,
635                                    NULL,
636                                    ISC_TASKEVENT_SHUTDOWN,
637                                    action,
638                                    arg,
639                                    sizeof(*event));
640         if (event == NULL)
641                 return (ISC_R_NOMEMORY);
642
643         LOCK(&task->lock);
644         if (TASK_SHUTTINGDOWN(task)) {
645                 disallowed = ISC_TRUE;
646                 result = ISC_R_SHUTTINGDOWN;
647         } else
648                 ENQUEUE(task->on_shutdown, event, ev_link);
649         UNLOCK(&task->lock);
650
651         if (disallowed)
652                 isc_mem_put(task->manager->mctx, event, sizeof(*event));
653
654         return (result);
655 }
656
657 void
658 isc_task_shutdown(isc_task_t *task) {
659         isc_boolean_t was_idle;
660
661         /*
662          * Shutdown 'task'.
663          */
664
665         REQUIRE(VALID_TASK(task));
666
667         LOCK(&task->lock);
668         was_idle = task_shutdown(task);
669         UNLOCK(&task->lock);
670
671         if (was_idle)
672                 task_ready(task);
673 }
674
675 void
676 isc_task_destroy(isc_task_t **taskp) {
677
678         /*
679          * Destroy '*taskp'.
680          */
681
682         REQUIRE(taskp != NULL);
683
684         isc_task_shutdown(*taskp);
685         isc_task_detach(taskp);
686 }
687
688 void
689 isc_task_setname(isc_task_t *task, const char *name, void *tag) {
690
691         /*
692          * Name 'task'.
693          */
694
695         REQUIRE(VALID_TASK(task));
696
697 #ifdef ISC_TASK_NAMES
698         LOCK(&task->lock);
699         memset(task->name, 0, sizeof(task->name));
700         strncpy(task->name, name, sizeof(task->name) - 1);
701         task->tag = tag;
702         UNLOCK(&task->lock);
703 #else
704         UNUSED(name);
705         UNUSED(tag);
706 #endif
707
708 }
709
710 const char *
711 isc_task_getname(isc_task_t *task) {
712         return (task->name);
713 }
714
715 void *
716 isc_task_gettag(isc_task_t *task) {
717         return (task->tag);
718 }
719
720 void
721 isc_task_getcurrenttime(isc_task_t *task, isc_stdtime_t *t) {
722         REQUIRE(VALID_TASK(task));
723         REQUIRE(t != NULL);
724
725         LOCK(&task->lock);
726
727         *t = task->now;
728
729         UNLOCK(&task->lock);
730 }
731
732 /***
733  *** Task Manager.
734  ***/
735 static void
736 dispatch(isc_taskmgr_t *manager) {
737         isc_task_t *task;
738 #ifndef ISC_PLATFORM_USETHREADS
739         unsigned int total_dispatch_count = 0;
740         isc_tasklist_t ready_tasks;
741 #endif /* ISC_PLATFORM_USETHREADS */
742
743         REQUIRE(VALID_MANAGER(manager));
744
745         /*
746          * Again we're trying to hold the lock for as short a time as possible
747          * and to do as little locking and unlocking as possible.
748          *
749          * In both while loops, the appropriate lock must be held before the
750          * while body starts.  Code which acquired the lock at the top of
751          * the loop would be more readable, but would result in a lot of
752          * extra locking.  Compare:
753          *
754          * Straightforward:
755          *
756          *      LOCK();
757          *      ...
758          *      UNLOCK();
759          *      while (expression) {
760          *              LOCK();
761          *              ...
762          *              UNLOCK();
763          *
764          *              Unlocked part here...
765          *
766          *              LOCK();
767          *              ...
768          *              UNLOCK();
769          *      }
770          *
771          * Note how if the loop continues we unlock and then immediately lock.
772          * For N iterations of the loop, this code does 2N+1 locks and 2N+1
773          * unlocks.  Also note that the lock is not held when the while
774          * condition is tested, which may or may not be important, depending
775          * on the expression.
776          *
777          * As written:
778          *
779          *      LOCK();
780          *      while (expression) {
781          *              ...
782          *              UNLOCK();
783          *
784          *              Unlocked part here...
785          *
786          *              LOCK();
787          *              ...
788          *      }
789          *      UNLOCK();
790          *
791          * For N iterations of the loop, this code does N+1 locks and N+1
792          * unlocks.  The while expression is always protected by the lock.
793          */
794
795 #ifndef ISC_PLATFORM_USETHREADS
796         ISC_LIST_INIT(ready_tasks);
797 #endif
798         LOCK(&manager->lock);
799         while (!FINISHED(manager)) {
800 #ifdef ISC_PLATFORM_USETHREADS
801                 /*
802                  * For reasons similar to those given in the comment in
803                  * isc_task_send() above, it is safe for us to dequeue
804                  * the task while only holding the manager lock, and then
805                  * change the task to running state while only holding the
806                  * task lock.
807                  */
808                 while ((EMPTY(manager->ready_tasks) ||
809                         manager->exclusive_requested) &&
810                         !FINISHED(manager)) 
811                 {
812                         XTHREADTRACE(isc_msgcat_get(isc_msgcat,
813                                                     ISC_MSGSET_GENERAL,
814                                                     ISC_MSG_WAIT, "wait"));
815                         WAIT(&manager->work_available, &manager->lock);
816                         XTHREADTRACE(isc_msgcat_get(isc_msgcat,
817                                                     ISC_MSGSET_TASK,
818                                                     ISC_MSG_AWAKE, "awake"));
819                 }
820 #else /* ISC_PLATFORM_USETHREADS */
821                 if (total_dispatch_count >= DEFAULT_TASKMGR_QUANTUM ||
822                     EMPTY(manager->ready_tasks))
823                         break;
824 #endif /* ISC_PLATFORM_USETHREADS */
825                 XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_TASK,
826                                             ISC_MSG_WORKING, "working"));
827
828                 task = HEAD(manager->ready_tasks);
829                 if (task != NULL) {
830                         unsigned int dispatch_count = 0;
831                         isc_boolean_t done = ISC_FALSE;
832                         isc_boolean_t requeue = ISC_FALSE;
833                         isc_boolean_t finished = ISC_FALSE;
834                         isc_event_t *event;
835
836                         INSIST(VALID_TASK(task));
837
838                         /*
839                          * Note we only unlock the manager lock if we actually
840                          * have a task to do.  We must reacquire the manager
841                          * lock before exiting the 'if (task != NULL)' block.
842                          */
843                         DEQUEUE(manager->ready_tasks, task, ready_link);
844                         manager->tasks_running++;
845                         UNLOCK(&manager->lock);
846
847                         LOCK(&task->lock);
848                         INSIST(task->state == task_state_ready);
849                         task->state = task_state_running;
850                         XTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
851                                               ISC_MSG_RUNNING, "running"));
852                         isc_stdtime_get(&task->now);
853                         do {
854                                 if (!EMPTY(task->events)) {
855                                         event = HEAD(task->events);
856                                         DEQUEUE(task->events, event, ev_link);
857
858                                         /*
859                                          * Execute the event action.
860                                          */
861                                         XTRACE(isc_msgcat_get(isc_msgcat,
862                                                             ISC_MSGSET_TASK,
863                                                             ISC_MSG_EXECUTE,
864                                                             "execute action"));
865                                         if (event->ev_action != NULL) {
866                                                 UNLOCK(&task->lock);
867                                                 (event->ev_action)(task,event);
868                                                 LOCK(&task->lock);
869                                         }
870                                         dispatch_count++;
871 #ifndef ISC_PLATFORM_USETHREADS
872                                         total_dispatch_count++;
873 #endif /* ISC_PLATFORM_USETHREADS */
874                                 }
875
876                                 if (task->references == 0 &&
877                                     EMPTY(task->events) &&
878                                     !TASK_SHUTTINGDOWN(task)) {
879                                         isc_boolean_t was_idle;
880
881                                         /*
882                                          * There are no references and no
883                                          * pending events for this task,
884                                          * which means it will not become
885                                          * runnable again via an external
886                                          * action (such as sending an event
887                                          * or detaching).
888                                          *
889                                          * We initiate shutdown to prevent
890                                          * it from becoming a zombie.
891                                          *
892                                          * We do this here instead of in
893                                          * the "if EMPTY(task->events)" block
894                                          * below because:
895                                          *
896                                          *      If we post no shutdown events,
897                                          *      we want the task to finish.
898                                          *
899                                          *      If we did post shutdown events,
900                                          *      will still want the task's
901                                          *      quantum to be applied.
902                                          */
903                                         was_idle = task_shutdown(task);
904                                         INSIST(!was_idle);
905                                 }
906
907                                 if (EMPTY(task->events)) {
908                                         /*
909                                          * Nothing else to do for this task
910                                          * right now.
911                                          */
912                                         XTRACE(isc_msgcat_get(isc_msgcat,
913                                                               ISC_MSGSET_TASK,
914                                                               ISC_MSG_EMPTY,
915                                                               "empty"));
916                                         if (task->references == 0 &&
917                                             TASK_SHUTTINGDOWN(task)) {
918                                                 /*
919                                                  * The task is done.
920                                                  */
921                                                 XTRACE(isc_msgcat_get(
922                                                                isc_msgcat,
923                                                                ISC_MSGSET_TASK,
924                                                                ISC_MSG_DONE,
925                                                                "done"));
926                                                 finished = ISC_TRUE;
927                                                 task->state = task_state_done;
928                                         } else
929                                                 task->state = task_state_idle;
930                                         done = ISC_TRUE;
931                                 } else if (dispatch_count >= task->quantum) {
932                                         /*
933                                          * Our quantum has expired, but
934                                          * there is more work to be done.
935                                          * We'll requeue it to the ready
936                                          * queue later.
937                                          *
938                                          * We don't check quantum until
939                                          * dispatching at least one event,
940                                          * so the minimum quantum is one.
941                                          */
942                                         XTRACE(isc_msgcat_get(isc_msgcat,
943                                                               ISC_MSGSET_TASK,
944                                                               ISC_MSG_QUANTUM,
945                                                               "quantum"));
946                                         task->state = task_state_ready;
947                                         requeue = ISC_TRUE;
948                                         done = ISC_TRUE;
949                                 }
950                         } while (!done);
951                         UNLOCK(&task->lock);
952
953                         if (finished)
954                                 task_finished(task);
955
956                         LOCK(&manager->lock);
957                         manager->tasks_running--;
958 #ifdef ISC_PLATFORM_USETHREADS
959                         if (manager->exclusive_requested &&
960                             manager->tasks_running == 1) {
961                                 SIGNAL(&manager->exclusive_granted);
962                         }
963 #endif /* ISC_PLATFORM_USETHREADS */
964                         if (requeue) {
965                                 /*
966                                  * We know we're awake, so we don't have
967                                  * to wakeup any sleeping threads if the
968                                  * ready queue is empty before we requeue.
969                                  *
970                                  * A possible optimization if the queue is
971                                  * empty is to 'goto' the 'if (task != NULL)'
972                                  * block, avoiding the ENQUEUE of the task
973                                  * and the subsequent immediate DEQUEUE
974                                  * (since it is the only executable task).
975                                  * We don't do this because then we'd be
976                                  * skipping the exit_requested check.  The
977                                  * cost of ENQUEUE is low anyway, especially
978                                  * when you consider that we'd have to do
979                                  * an extra EMPTY check to see if we could
980                                  * do the optimization.  If the ready queue
981                                  * were usually nonempty, the 'optimization'
982                                  * might even hurt rather than help.
983                                  */
984 #ifdef ISC_PLATFORM_USETHREADS
985                                 ENQUEUE(manager->ready_tasks, task,
986                                         ready_link);
987 #else
988                                 ENQUEUE(ready_tasks, task, ready_link);
989 #endif
990                         }
991                 }
992         }
993 #ifndef ISC_PLATFORM_USETHREADS
994         ISC_LIST_APPENDLIST(manager->ready_tasks, ready_tasks, ready_link);
995 #endif
996         UNLOCK(&manager->lock);
997 }
998
999 #ifdef ISC_PLATFORM_USETHREADS
1000 static isc_threadresult_t
1001 #ifdef _WIN32
1002 WINAPI
1003 #endif
1004 run(void *uap) {
1005         isc_taskmgr_t *manager = uap;
1006
1007         XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1008                                     ISC_MSG_STARTING, "starting"));
1009
1010         dispatch(manager);
1011
1012         XTHREADTRACE(isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1013                                     ISC_MSG_EXITING, "exiting"));
1014
1015         return ((isc_threadresult_t)0);
1016 }
1017 #endif /* ISC_PLATFORM_USETHREADS */
1018
1019 static void
1020 manager_free(isc_taskmgr_t *manager) {
1021         isc_mem_t *mctx;
1022
1023 #ifdef ISC_PLATFORM_USETHREADS
1024         (void)isc_condition_destroy(&manager->exclusive_granted);       
1025         (void)isc_condition_destroy(&manager->work_available);
1026         isc_mem_free(manager->mctx, manager->threads);
1027 #endif /* ISC_PLATFORM_USETHREADS */
1028         DESTROYLOCK(&manager->lock);
1029         manager->magic = 0;
1030         mctx = manager->mctx;
1031         isc_mem_put(mctx, manager, sizeof(*manager));
1032         isc_mem_detach(&mctx);
1033 }
1034
1035 isc_result_t
1036 isc_taskmgr_create(isc_mem_t *mctx, unsigned int workers,
1037                    unsigned int default_quantum, isc_taskmgr_t **managerp)
1038 {
1039         isc_result_t result;
1040         unsigned int i, started = 0;
1041         isc_taskmgr_t *manager;
1042
1043         /*
1044          * Create a new task manager.
1045          */
1046
1047         REQUIRE(workers > 0);
1048         REQUIRE(managerp != NULL && *managerp == NULL);
1049
1050 #ifndef ISC_PLATFORM_USETHREADS
1051         UNUSED(i);
1052         UNUSED(started);
1053         UNUSED(workers);
1054
1055         if (taskmgr != NULL) {
1056                 taskmgr->refs++;
1057                 *managerp = taskmgr;
1058                 return (ISC_R_SUCCESS);
1059         }
1060 #endif /* ISC_PLATFORM_USETHREADS */
1061
1062         manager = isc_mem_get(mctx, sizeof(*manager));
1063         if (manager == NULL)
1064                 return (ISC_R_NOMEMORY);
1065         manager->magic = TASK_MANAGER_MAGIC;
1066         manager->mctx = NULL;
1067         result = isc_mutex_init(&manager->lock);
1068         if (result != ISC_R_SUCCESS)
1069                 goto cleanup_mgr;
1070
1071 #ifdef ISC_PLATFORM_USETHREADS
1072         manager->workers = 0;
1073         manager->threads = isc_mem_allocate(mctx,
1074                                             workers * sizeof(isc_thread_t));
1075         if (manager->threads == NULL) {
1076                 result = ISC_R_NOMEMORY;
1077                 goto cleanup_lock;
1078         }
1079         if (isc_condition_init(&manager->work_available) != ISC_R_SUCCESS) {
1080                 UNEXPECTED_ERROR(__FILE__, __LINE__,
1081                                  "isc_condition_init() %s",
1082                                  isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1083                                                 ISC_MSG_FAILED, "failed"));
1084                 result = ISC_R_UNEXPECTED;
1085                 goto cleanup_threads;
1086         }
1087         if (isc_condition_init(&manager->exclusive_granted) != ISC_R_SUCCESS) {
1088                 UNEXPECTED_ERROR(__FILE__, __LINE__,
1089                                  "isc_condition_init() %s",
1090                                  isc_msgcat_get(isc_msgcat, ISC_MSGSET_GENERAL,
1091                                                 ISC_MSG_FAILED, "failed"));
1092                 result = ISC_R_UNEXPECTED;
1093                 goto cleanup_workavailable;
1094         }
1095 #endif /* ISC_PLATFORM_USETHREADS */
1096         if (default_quantum == 0)
1097                 default_quantum = DEFAULT_DEFAULT_QUANTUM;
1098         manager->default_quantum = default_quantum;
1099         INIT_LIST(manager->tasks);
1100         INIT_LIST(manager->ready_tasks);
1101         manager->tasks_running = 0;
1102         manager->exclusive_requested = ISC_FALSE;
1103         manager->exiting = ISC_FALSE;
1104
1105         isc_mem_attach(mctx, &manager->mctx);
1106
1107 #ifdef ISC_PLATFORM_USETHREADS
1108         LOCK(&manager->lock);
1109         /*
1110          * Start workers.
1111          */
1112         for (i = 0; i < workers; i++) {
1113                 if (isc_thread_create(run, manager,
1114                                       &manager->threads[manager->workers]) ==
1115                     ISC_R_SUCCESS) {
1116                         manager->workers++;
1117                         started++;
1118                 }
1119         }
1120         UNLOCK(&manager->lock);
1121
1122         if (started == 0) {
1123                 manager_free(manager);
1124                 return (ISC_R_NOTHREADS);
1125         }
1126         isc_thread_setconcurrency(workers);
1127 #else /* ISC_PLATFORM_USETHREADS */
1128         manager->refs = 1;
1129         taskmgr = manager;
1130 #endif /* ISC_PLATFORM_USETHREADS */
1131
1132         *managerp = manager;
1133
1134         return (ISC_R_SUCCESS);
1135
1136 #ifdef ISC_PLATFORM_USETHREADS
1137  cleanup_workavailable:
1138         (void)isc_condition_destroy(&manager->work_available);
1139  cleanup_threads:
1140         isc_mem_free(mctx, manager->threads);
1141  cleanup_lock:
1142         DESTROYLOCK(&manager->lock);
1143 #endif
1144  cleanup_mgr:
1145         isc_mem_put(mctx, manager, sizeof(*manager));
1146         return (result);
1147 }
1148
1149 void
1150 isc_taskmgr_destroy(isc_taskmgr_t **managerp) {
1151         isc_taskmgr_t *manager;
1152         isc_task_t *task;
1153         unsigned int i;
1154
1155         /*
1156          * Destroy '*managerp'.
1157          */
1158
1159         REQUIRE(managerp != NULL);
1160         manager = *managerp;
1161         REQUIRE(VALID_MANAGER(manager));
1162
1163 #ifndef ISC_PLATFORM_USETHREADS
1164         UNUSED(i);
1165
1166         if (manager->refs > 1) {
1167                 manager->refs--;
1168                 *managerp = NULL;
1169                 return;
1170         }
1171 #endif /* ISC_PLATFORM_USETHREADS */
1172
1173         XTHREADTRACE("isc_taskmgr_destroy");
1174         /*
1175          * Only one non-worker thread may ever call this routine.
1176          * If a worker thread wants to initiate shutdown of the
1177          * task manager, it should ask some non-worker thread to call
1178          * isc_taskmgr_destroy(), e.g. by signalling a condition variable
1179          * that the startup thread is sleeping on.
1180          */
1181
1182         /*
1183          * Unlike elsewhere, we're going to hold this lock a long time.
1184          * We need to do so, because otherwise the list of tasks could
1185          * change while we were traversing it.
1186          *
1187          * This is also the only function where we will hold both the
1188          * task manager lock and a task lock at the same time.
1189          */
1190
1191         LOCK(&manager->lock);
1192
1193         /*
1194          * Make sure we only get called once.
1195          */
1196         INSIST(!manager->exiting);
1197         manager->exiting = ISC_TRUE;
1198
1199         /*
1200          * Post shutdown event(s) to every task (if they haven't already been
1201          * posted).
1202          */
1203         for (task = HEAD(manager->tasks);
1204              task != NULL;
1205              task = NEXT(task, link)) {
1206                 LOCK(&task->lock);
1207                 if (task_shutdown(task))
1208                         ENQUEUE(manager->ready_tasks, task, ready_link);
1209                 UNLOCK(&task->lock);
1210         }
1211 #ifdef ISC_PLATFORM_USETHREADS
1212         /*
1213          * Wake up any sleeping workers.  This ensures we get work done if
1214          * there's work left to do, and if there are already no tasks left
1215          * it will cause the workers to see manager->exiting.
1216          */
1217         BROADCAST(&manager->work_available);
1218         UNLOCK(&manager->lock);
1219
1220         /*
1221          * Wait for all the worker threads to exit.
1222          */
1223         for (i = 0; i < manager->workers; i++)
1224                 (void)isc_thread_join(manager->threads[i], NULL);
1225 #else /* ISC_PLATFORM_USETHREADS */
1226         /*
1227          * Dispatch the shutdown events.
1228          */
1229         UNLOCK(&manager->lock);
1230         while (isc__taskmgr_ready())
1231                 (void)isc__taskmgr_dispatch();
1232         if (!ISC_LIST_EMPTY(manager->tasks))
1233                 isc_mem_printallactive(stderr);
1234         INSIST(ISC_LIST_EMPTY(manager->tasks));
1235 #endif /* ISC_PLATFORM_USETHREADS */
1236
1237         manager_free(manager);
1238
1239         *managerp = NULL;
1240 }
1241
1242 #ifndef ISC_PLATFORM_USETHREADS
1243 isc_boolean_t
1244 isc__taskmgr_ready(void) {
1245         if (taskmgr == NULL)
1246                 return (ISC_FALSE);
1247         return (ISC_TF(!ISC_LIST_EMPTY(taskmgr->ready_tasks)));
1248 }
1249
1250 isc_result_t
1251 isc__taskmgr_dispatch(void) {
1252         isc_taskmgr_t *manager = taskmgr;
1253
1254         if (taskmgr == NULL)
1255                 return (ISC_R_NOTFOUND);
1256
1257         dispatch(manager);
1258
1259         return (ISC_R_SUCCESS);
1260 }
1261
1262 #endif /* ISC_PLATFORM_USETHREADS */
1263
1264 isc_result_t
1265 isc_task_beginexclusive(isc_task_t *task) {
1266 #ifdef ISC_PLATFORM_USETHREADS  
1267         isc_taskmgr_t *manager = task->manager;
1268         REQUIRE(task->state == task_state_running);
1269         LOCK(&manager->lock);
1270         if (manager->exclusive_requested) {
1271                 UNLOCK(&manager->lock);                 
1272                 return (ISC_R_LOCKBUSY);
1273         }
1274         manager->exclusive_requested = ISC_TRUE;
1275         while (manager->tasks_running > 1) {
1276                 WAIT(&manager->exclusive_granted, &manager->lock);
1277         }
1278         UNLOCK(&manager->lock); 
1279 #else
1280         UNUSED(task);
1281 #endif
1282         return (ISC_R_SUCCESS);
1283 }
1284
1285 void
1286 isc_task_endexclusive(isc_task_t *task) {
1287 #ifdef ISC_PLATFORM_USETHREADS  
1288         isc_taskmgr_t *manager = task->manager;
1289         REQUIRE(task->state == task_state_running);
1290         LOCK(&manager->lock);
1291         REQUIRE(manager->exclusive_requested);
1292         manager->exclusive_requested = ISC_FALSE;
1293         BROADCAST(&manager->work_available);
1294         UNLOCK(&manager->lock);
1295 #else
1296         UNUSED(task);
1297 #endif
1298 }