2 * Licensed to the Apache Software Foundation (ASF) under one or more
3 * contributor license agreements. See the NOTICE file distributed
4 * with this work for additional information regarding copyright
5 * ownership. The ASF licenses this file to you under the Apache
6 * License, Version 2.0 (the "License"); you may not use this file
7 * except in compliance with the License. You may obtain a copy of
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
15 * implied. See the License for the specific language governing
16 * permissions and limitations under the License.
20 #include "apr_thread_pool.h"
22 #include "apr_thread_cond.h"
23 #include "apr_portable.h"
27 #define TASK_PRIORITY_SEGS 4
28 #define TASK_PRIORITY_SEG(x) (((x)->dispatch.priority & 0xFF) / 64)
30 typedef struct apr_thread_pool_task
32 APR_RING_ENTRY(apr_thread_pool_task) link;
33 apr_thread_start_t func;
41 } apr_thread_pool_task_t;
43 APR_RING_HEAD(apr_thread_pool_tasks, apr_thread_pool_task);
45 struct apr_thread_list_elt
47 APR_RING_ENTRY(apr_thread_list_elt) link;
49 volatile void *current_owner;
50 volatile enum { TH_RUN, TH_STOP, TH_PROBATION } state;
53 APR_RING_HEAD(apr_thread_list, apr_thread_list_elt);
55 struct apr_thread_pool
58 volatile apr_size_t thd_max;
59 volatile apr_size_t idle_max;
60 volatile apr_interval_time_t idle_wait;
61 volatile apr_size_t thd_cnt;
62 volatile apr_size_t idle_cnt;
63 volatile apr_size_t task_cnt;
64 volatile apr_size_t scheduled_task_cnt;
65 volatile apr_size_t threshold;
66 volatile apr_size_t tasks_run;
67 volatile apr_size_t tasks_high;
68 volatile apr_size_t thd_high;
69 volatile apr_size_t thd_timed_out;
70 struct apr_thread_pool_tasks *tasks;
71 struct apr_thread_pool_tasks *scheduled_tasks;
72 struct apr_thread_list *busy_thds;
73 struct apr_thread_list *idle_thds;
74 apr_thread_mutex_t *lock;
75 apr_thread_cond_t *cond;
76 volatile int terminated;
77 struct apr_thread_pool_tasks *recycled_tasks;
78 struct apr_thread_list *recycled_thds;
79 apr_thread_pool_task_t *task_idx[TASK_PRIORITY_SEGS];
82 static apr_status_t thread_pool_construct(apr_thread_pool_t * me,
83 apr_size_t init_threads,
84 apr_size_t max_threads)
89 me->thd_max = max_threads;
90 me->idle_max = init_threads;
91 me->threshold = init_threads / 2;
92 rv = apr_thread_mutex_create(&me->lock, APR_THREAD_MUTEX_NESTED,
94 if (APR_SUCCESS != rv) {
97 rv = apr_thread_cond_create(&me->cond, me->pool);
98 if (APR_SUCCESS != rv) {
99 apr_thread_mutex_destroy(me->lock);
102 me->tasks = apr_palloc(me->pool, sizeof(*me->tasks));
106 APR_RING_INIT(me->tasks, apr_thread_pool_task, link);
107 me->scheduled_tasks = apr_palloc(me->pool, sizeof(*me->scheduled_tasks));
108 if (!me->scheduled_tasks) {
111 APR_RING_INIT(me->scheduled_tasks, apr_thread_pool_task, link);
112 me->recycled_tasks = apr_palloc(me->pool, sizeof(*me->recycled_tasks));
113 if (!me->recycled_tasks) {
116 APR_RING_INIT(me->recycled_tasks, apr_thread_pool_task, link);
117 me->busy_thds = apr_palloc(me->pool, sizeof(*me->busy_thds));
118 if (!me->busy_thds) {
121 APR_RING_INIT(me->busy_thds, apr_thread_list_elt, link);
122 me->idle_thds = apr_palloc(me->pool, sizeof(*me->idle_thds));
123 if (!me->idle_thds) {
126 APR_RING_INIT(me->idle_thds, apr_thread_list_elt, link);
127 me->recycled_thds = apr_palloc(me->pool, sizeof(*me->recycled_thds));
128 if (!me->recycled_thds) {
131 APR_RING_INIT(me->recycled_thds, apr_thread_list_elt, link);
132 me->thd_cnt = me->idle_cnt = me->task_cnt = me->scheduled_task_cnt = 0;
133 me->tasks_run = me->tasks_high = me->thd_high = me->thd_timed_out = 0;
136 for (i = 0; i < TASK_PRIORITY_SEGS; i++) {
137 me->task_idx[i] = NULL;
142 apr_thread_mutex_destroy(me->lock);
143 apr_thread_cond_destroy(me->cond);
149 * NOTE: This function is not thread safe by itself. Caller should hold the lock
151 static apr_thread_pool_task_t *pop_task(apr_thread_pool_t * me)
153 apr_thread_pool_task_t *task = NULL;
156 /* check for scheduled tasks */
157 if (me->scheduled_task_cnt > 0) {
158 task = APR_RING_FIRST(me->scheduled_tasks);
159 assert(task != NULL);
161 APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
164 if (task->dispatch.time <= apr_time_now()) {
165 --me->scheduled_task_cnt;
166 APR_RING_REMOVE(task, link);
170 /* check for normal tasks if we're not returning a scheduled task */
171 if (me->task_cnt == 0) {
175 task = APR_RING_FIRST(me->tasks);
176 assert(task != NULL);
177 assert(task != APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link));
179 seg = TASK_PRIORITY_SEG(task);
180 if (task == me->task_idx[seg]) {
181 me->task_idx[seg] = APR_RING_NEXT(task, link);
182 if (me->task_idx[seg] == APR_RING_SENTINEL(me->tasks,
183 apr_thread_pool_task, link)
184 || TASK_PRIORITY_SEG(me->task_idx[seg]) != seg) {
185 me->task_idx[seg] = NULL;
188 APR_RING_REMOVE(task, link);
192 static apr_interval_time_t waiting_time(apr_thread_pool_t * me)
194 apr_thread_pool_task_t *task = NULL;
196 task = APR_RING_FIRST(me->scheduled_tasks);
197 assert(task != NULL);
199 APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
201 return task->dispatch.time - apr_time_now();
205 * NOTE: This function is not thread safe by itself. Caller should hold the lock
207 static struct apr_thread_list_elt *elt_new(apr_thread_pool_t * me,
210 struct apr_thread_list_elt *elt;
212 if (APR_RING_EMPTY(me->recycled_thds, apr_thread_list_elt, link)) {
213 elt = apr_pcalloc(me->pool, sizeof(*elt));
219 elt = APR_RING_FIRST(me->recycled_thds);
220 APR_RING_REMOVE(elt, link);
223 APR_RING_ELEM_INIT(elt, link);
225 elt->current_owner = NULL;
231 * The worker thread function. Take a task from the queue and perform it if
232 * there is any. Otherwise, put itself into the idle thread list and waiting
233 * for signal to wake up.
234 * The thread terminate directly by detach and exit when it is asked to stop
235 * after finishing a task. Otherwise, the thread should be in idle thread list
236 * and should be joined.
238 static void *APR_THREAD_FUNC thread_pool_func(apr_thread_t * t, void *param)
240 apr_thread_pool_t *me = param;
241 apr_thread_pool_task_t *task = NULL;
242 apr_interval_time_t wait;
243 struct apr_thread_list_elt *elt;
245 apr_thread_mutex_lock(me->lock);
246 elt = elt_new(me, t);
248 apr_thread_mutex_unlock(me->lock);
249 apr_thread_exit(t, APR_ENOMEM);
252 while (!me->terminated && elt->state != TH_STOP) {
253 /* Test if not new element, it is awakened from idle */
254 if (APR_RING_NEXT(elt, link) != elt) {
256 APR_RING_REMOVE(elt, link);
259 APR_RING_INSERT_TAIL(me->busy_thds, elt, apr_thread_list_elt, link);
261 while (NULL != task && !me->terminated) {
263 elt->current_owner = task->owner;
264 apr_thread_mutex_unlock(me->lock);
265 apr_thread_data_set(task, "apr_thread_pool_task", NULL, t);
266 task->func(t, task->param);
267 apr_thread_mutex_lock(me->lock);
268 APR_RING_INSERT_TAIL(me->recycled_tasks, task,
269 apr_thread_pool_task, link);
270 elt->current_owner = NULL;
271 if (TH_STOP == elt->state) {
276 assert(NULL == elt->current_owner);
277 if (TH_STOP != elt->state)
278 APR_RING_REMOVE(elt, link);
280 /* Test if a busy thread been asked to stop, which is not joinable */
281 if ((me->idle_cnt >= me->idle_max
282 && !(me->scheduled_task_cnt && 0 >= me->idle_max)
284 || me->terminated || elt->state != TH_RUN) {
286 if ((TH_PROBATION == elt->state) && me->idle_wait)
288 APR_RING_INSERT_TAIL(me->recycled_thds, elt,
289 apr_thread_list_elt, link);
290 apr_thread_mutex_unlock(me->lock);
291 apr_thread_detach(t);
292 apr_thread_exit(t, APR_SUCCESS);
293 return NULL; /* should not be here, safe net */
296 /* busy thread become idle */
298 APR_RING_INSERT_TAIL(me->idle_thds, elt, apr_thread_list_elt, link);
301 * If there is a scheduled task, always scheduled to perform that task.
302 * Since there is no guarantee that current idle threads are scheduled
303 * for next scheduled task.
305 if (me->scheduled_task_cnt)
306 wait = waiting_time(me);
307 else if (me->idle_cnt > me->idle_max) {
308 wait = me->idle_wait;
309 elt->state = TH_PROBATION;
315 apr_thread_cond_timedwait(me->cond, me->lock, wait);
318 apr_thread_cond_wait(me->cond, me->lock);
322 /* idle thread been asked to stop, will be joined */
324 apr_thread_mutex_unlock(me->lock);
325 apr_thread_exit(t, APR_SUCCESS);
326 return NULL; /* should not be here, safe net */
329 static apr_status_t thread_pool_cleanup(void *me)
331 apr_thread_pool_t *_myself = me;
333 _myself->terminated = 1;
334 apr_thread_pool_idle_max_set(_myself, 0);
335 while (_myself->thd_cnt) {
336 apr_sleep(20 * 1000); /* spin lock with 20 ms */
338 apr_thread_mutex_destroy(_myself->lock);
339 apr_thread_cond_destroy(_myself->cond);
343 APU_DECLARE(apr_status_t) apr_thread_pool_create(apr_thread_pool_t ** me,
344 apr_size_t init_threads,
345 apr_size_t max_threads,
349 apr_status_t rv = APR_SUCCESS;
350 apr_thread_pool_t *tp;
353 tp = apr_pcalloc(pool, sizeof(apr_thread_pool_t));
356 * This pool will be used by different threads. As we cannot ensure that
357 * our caller won't use the pool without acquiring the mutex, we must
358 * create a new sub pool.
360 rv = apr_pool_create(&tp->pool, pool);
361 if (APR_SUCCESS != rv)
363 rv = thread_pool_construct(tp, init_threads, max_threads);
364 if (APR_SUCCESS != rv)
366 apr_pool_pre_cleanup_register(tp->pool, tp, thread_pool_cleanup);
368 while (init_threads) {
369 /* Grab the mutex as apr_thread_create() and thread_pool_func() will
370 * allocate from (*me)->pool. This is dangerous if there are multiple
371 * initial threads to create.
373 apr_thread_mutex_lock(tp->lock);
374 rv = apr_thread_create(&t, NULL, thread_pool_func, tp, tp->pool);
375 apr_thread_mutex_unlock(tp->lock);
376 if (APR_SUCCESS != rv) {
380 if (tp->thd_cnt > tp->thd_high) {
381 tp->thd_high = tp->thd_cnt;
386 if (rv == APR_SUCCESS) {
393 APU_DECLARE(apr_status_t) apr_thread_pool_destroy(apr_thread_pool_t * me)
395 apr_pool_destroy(me->pool);
400 * NOTE: This function is not thread safe by itself. Caller should hold the lock
402 static apr_thread_pool_task_t *task_new(apr_thread_pool_t * me,
403 apr_thread_start_t func,
404 void *param, apr_byte_t priority,
405 void *owner, apr_time_t time)
407 apr_thread_pool_task_t *t;
409 if (APR_RING_EMPTY(me->recycled_tasks, apr_thread_pool_task, link)) {
410 t = apr_pcalloc(me->pool, sizeof(*t));
416 t = APR_RING_FIRST(me->recycled_tasks);
417 APR_RING_REMOVE(t, link);
420 APR_RING_ELEM_INIT(t, link);
425 t->dispatch.time = apr_time_now() + time;
428 t->dispatch.priority = priority;
434 * Test it the task is the only one within the priority segment.
435 * If it is not, return the first element with same or lower priority.
436 * Otherwise, add the task into the queue and return NULL.
438 * NOTE: This function is not thread safe by itself. Caller should hold the lock
440 static apr_thread_pool_task_t *add_if_empty(apr_thread_pool_t * me,
441 apr_thread_pool_task_t * const t)
445 apr_thread_pool_task_t *t_next;
447 seg = TASK_PRIORITY_SEG(t);
448 if (me->task_idx[seg]) {
449 assert(APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) !=
451 t_next = me->task_idx[seg];
452 while (t_next->dispatch.priority > t->dispatch.priority) {
453 t_next = APR_RING_NEXT(t_next, link);
454 if (APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) ==
462 for (next = seg - 1; next >= 0; next--) {
463 if (me->task_idx[next]) {
464 APR_RING_INSERT_BEFORE(me->task_idx[next], t, link);
469 APR_RING_INSERT_TAIL(me->tasks, t, apr_thread_pool_task, link);
471 me->task_idx[seg] = t;
476 * schedule a task to run in "time" microseconds. Find the spot in the ring where
477 * the time fits. Adjust the short_time so the thread wakes up when the time is reached.
479 static apr_status_t schedule_task(apr_thread_pool_t *me,
480 apr_thread_start_t func, void *param,
481 void *owner, apr_interval_time_t time)
483 apr_thread_pool_task_t *t;
484 apr_thread_pool_task_t *t_loc;
486 apr_status_t rv = APR_SUCCESS;
487 apr_thread_mutex_lock(me->lock);
489 t = task_new(me, func, param, 0, owner, time);
491 apr_thread_mutex_unlock(me->lock);
494 t_loc = APR_RING_FIRST(me->scheduled_tasks);
495 while (NULL != t_loc) {
496 /* if the time is less than the entry insert ahead of it */
497 if (t->dispatch.time < t_loc->dispatch.time) {
498 ++me->scheduled_task_cnt;
499 APR_RING_INSERT_BEFORE(t_loc, t, link);
503 t_loc = APR_RING_NEXT(t_loc, link);
505 APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
507 ++me->scheduled_task_cnt;
508 APR_RING_INSERT_TAIL(me->scheduled_tasks, t,
509 apr_thread_pool_task, link);
514 /* there should be at least one thread for scheduled tasks */
515 if (0 == me->thd_cnt) {
516 rv = apr_thread_create(&thd, NULL, thread_pool_func, me, me->pool);
517 if (APR_SUCCESS == rv) {
519 if (me->thd_cnt > me->thd_high)
520 me->thd_high = me->thd_cnt;
523 apr_thread_cond_signal(me->cond);
524 apr_thread_mutex_unlock(me->lock);
528 static apr_status_t add_task(apr_thread_pool_t *me, apr_thread_start_t func,
529 void *param, apr_byte_t priority, int push,
532 apr_thread_pool_task_t *t;
533 apr_thread_pool_task_t *t_loc;
535 apr_status_t rv = APR_SUCCESS;
537 apr_thread_mutex_lock(me->lock);
539 t = task_new(me, func, param, priority, owner, 0);
541 apr_thread_mutex_unlock(me->lock);
545 t_loc = add_if_empty(me, t);
551 while (APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link) !=
552 t_loc && t_loc->dispatch.priority >= t->dispatch.priority) {
553 t_loc = APR_RING_NEXT(t_loc, link);
556 APR_RING_INSERT_BEFORE(t_loc, t, link);
558 if (t_loc == me->task_idx[TASK_PRIORITY_SEG(t)]) {
559 me->task_idx[TASK_PRIORITY_SEG(t)] = t;
565 if (me->task_cnt > me->tasks_high)
566 me->tasks_high = me->task_cnt;
567 if (0 == me->thd_cnt || (0 == me->idle_cnt && me->thd_cnt < me->thd_max &&
568 me->task_cnt > me->threshold)) {
569 rv = apr_thread_create(&thd, NULL, thread_pool_func, me, me->pool);
570 if (APR_SUCCESS == rv) {
572 if (me->thd_cnt > me->thd_high)
573 me->thd_high = me->thd_cnt;
577 apr_thread_cond_signal(me->cond);
578 apr_thread_mutex_unlock(me->lock);
583 APU_DECLARE(apr_status_t) apr_thread_pool_push(apr_thread_pool_t *me,
584 apr_thread_start_t func,
589 return add_task(me, func, param, priority, 1, owner);
592 APU_DECLARE(apr_status_t) apr_thread_pool_schedule(apr_thread_pool_t *me,
593 apr_thread_start_t func,
595 apr_interval_time_t time,
598 return schedule_task(me, func, param, owner, time);
601 APU_DECLARE(apr_status_t) apr_thread_pool_top(apr_thread_pool_t *me,
602 apr_thread_start_t func,
607 return add_task(me, func, param, priority, 0, owner);
610 static apr_status_t remove_scheduled_tasks(apr_thread_pool_t *me,
613 apr_thread_pool_task_t *t_loc;
614 apr_thread_pool_task_t *next;
616 t_loc = APR_RING_FIRST(me->scheduled_tasks);
618 APR_RING_SENTINEL(me->scheduled_tasks, apr_thread_pool_task,
620 next = APR_RING_NEXT(t_loc, link);
621 /* if this is the owner remove it */
622 if (t_loc->owner == owner) {
623 --me->scheduled_task_cnt;
624 APR_RING_REMOVE(t_loc, link);
631 static apr_status_t remove_tasks(apr_thread_pool_t *me, void *owner)
633 apr_thread_pool_task_t *t_loc;
634 apr_thread_pool_task_t *next;
637 t_loc = APR_RING_FIRST(me->tasks);
638 while (t_loc != APR_RING_SENTINEL(me->tasks, apr_thread_pool_task, link)) {
639 next = APR_RING_NEXT(t_loc, link);
640 if (t_loc->owner == owner) {
642 seg = TASK_PRIORITY_SEG(t_loc);
643 if (t_loc == me->task_idx[seg]) {
644 me->task_idx[seg] = APR_RING_NEXT(t_loc, link);
645 if (me->task_idx[seg] == APR_RING_SENTINEL(me->tasks,
646 apr_thread_pool_task,
648 || TASK_PRIORITY_SEG(me->task_idx[seg]) != seg) {
649 me->task_idx[seg] = NULL;
652 APR_RING_REMOVE(t_loc, link);
659 static void wait_on_busy_threads(apr_thread_pool_t *me, void *owner)
662 apr_os_thread_t *os_thread;
664 struct apr_thread_list_elt *elt;
665 apr_thread_mutex_lock(me->lock);
666 elt = APR_RING_FIRST(me->busy_thds);
667 while (elt != APR_RING_SENTINEL(me->busy_thds, apr_thread_list_elt, link)) {
668 if (elt->current_owner != owner) {
669 elt = APR_RING_NEXT(elt, link);
673 /* make sure the thread is not the one calling tasks_cancel */
674 apr_os_thread_get(&os_thread, elt->thd);
676 /* hack for apr win32 bug */
677 assert(!apr_os_thread_equal(apr_os_thread_current(), os_thread));
679 assert(!apr_os_thread_equal(apr_os_thread_current(), *os_thread));
682 while (elt->current_owner == owner) {
683 apr_thread_mutex_unlock(me->lock);
684 apr_sleep(200 * 1000);
685 apr_thread_mutex_lock(me->lock);
687 elt = APR_RING_FIRST(me->busy_thds);
689 apr_thread_mutex_unlock(me->lock);
693 APU_DECLARE(apr_status_t) apr_thread_pool_tasks_cancel(apr_thread_pool_t *me,
696 apr_status_t rv = APR_SUCCESS;
698 apr_thread_mutex_lock(me->lock);
699 if (me->task_cnt > 0) {
700 rv = remove_tasks(me, owner);
702 if (me->scheduled_task_cnt > 0) {
703 rv = remove_scheduled_tasks(me, owner);
705 apr_thread_mutex_unlock(me->lock);
706 wait_on_busy_threads(me, owner);
711 APU_DECLARE(apr_size_t) apr_thread_pool_tasks_count(apr_thread_pool_t *me)
716 APU_DECLARE(apr_size_t)
717 apr_thread_pool_scheduled_tasks_count(apr_thread_pool_t *me)
719 return me->scheduled_task_cnt;
722 APU_DECLARE(apr_size_t) apr_thread_pool_threads_count(apr_thread_pool_t *me)
727 APU_DECLARE(apr_size_t) apr_thread_pool_busy_count(apr_thread_pool_t *me)
729 return me->thd_cnt - me->idle_cnt;
732 APU_DECLARE(apr_size_t) apr_thread_pool_idle_count(apr_thread_pool_t *me)
737 APU_DECLARE(apr_size_t)
738 apr_thread_pool_tasks_run_count(apr_thread_pool_t * me)
740 return me->tasks_run;
743 APU_DECLARE(apr_size_t)
744 apr_thread_pool_tasks_high_count(apr_thread_pool_t * me)
746 return me->tasks_high;
749 APU_DECLARE(apr_size_t)
750 apr_thread_pool_threads_high_count(apr_thread_pool_t * me)
755 APU_DECLARE(apr_size_t)
756 apr_thread_pool_threads_idle_timeout_count(apr_thread_pool_t * me)
758 return me->thd_timed_out;
762 APU_DECLARE(apr_size_t) apr_thread_pool_idle_max_get(apr_thread_pool_t *me)
767 APU_DECLARE(apr_interval_time_t)
768 apr_thread_pool_idle_wait_get(apr_thread_pool_t * me)
770 return me->idle_wait;
774 * This function stop extra idle threads to the cnt.
775 * @return the number of threads stopped
776 * NOTE: There could be busy threads become idle during this function
778 static struct apr_thread_list_elt *trim_threads(apr_thread_pool_t *me,
779 apr_size_t *cnt, int idle)
781 struct apr_thread_list *thds;
782 apr_size_t n, n_dbg, i;
783 struct apr_thread_list_elt *head, *tail, *elt;
785 apr_thread_mutex_lock(me->lock);
787 thds = me->idle_thds;
791 thds = me->busy_thds;
792 n = me->thd_cnt - me->idle_cnt;
795 apr_thread_mutex_unlock(me->lock);
801 head = APR_RING_FIRST(thds);
802 for (i = 0; i < *cnt; i++) {
803 head = APR_RING_NEXT(head, link);
805 tail = APR_RING_LAST(thds);
807 APR_RING_UNSPLICE(head, tail, link);
812 for (elt = head; elt != tail; elt = APR_RING_NEXT(elt, link)) {
813 elt->state = TH_STOP;
816 elt->state = TH_STOP;
821 apr_thread_mutex_unlock(me->lock);
823 APR_RING_PREV(head, link) = NULL;
824 APR_RING_NEXT(tail, link) = NULL;
828 static apr_size_t trim_idle_threads(apr_thread_pool_t *me, apr_size_t cnt)
831 struct apr_thread_list_elt *elt, *head, *tail;
834 elt = trim_threads(me, &cnt, 1);
836 apr_thread_mutex_lock(me->lock);
837 apr_thread_cond_broadcast(me->cond);
838 apr_thread_mutex_unlock(me->lock);
841 if (NULL != (head = elt)) {
844 apr_thread_join(&rv, elt->thd);
845 elt = APR_RING_NEXT(elt, link);
848 apr_thread_mutex_lock(me->lock);
849 APR_RING_SPLICE_TAIL(me->recycled_thds, head, tail,
850 apr_thread_list_elt, link);
851 apr_thread_mutex_unlock(me->lock);
853 assert(cnt == n_dbg);
858 /* don't join on busy threads for performance reasons, who knows how long will
859 * the task takes to perform
861 static apr_size_t trim_busy_threads(apr_thread_pool_t *me, apr_size_t cnt)
863 trim_threads(me, &cnt, 0);
867 APU_DECLARE(apr_size_t) apr_thread_pool_idle_max_set(apr_thread_pool_t *me,
871 cnt = trim_idle_threads(me, cnt);
875 APU_DECLARE(apr_interval_time_t)
876 apr_thread_pool_idle_wait_set(apr_thread_pool_t * me,
877 apr_interval_time_t timeout)
879 apr_interval_time_t oldtime;
881 oldtime = me->idle_wait;
882 me->idle_wait = timeout;
887 APU_DECLARE(apr_size_t) apr_thread_pool_thread_max_get(apr_thread_pool_t *me)
893 * This function stop extra working threads to the new limit.
894 * NOTE: There could be busy threads become idle during this function
896 APU_DECLARE(apr_size_t) apr_thread_pool_thread_max_set(apr_thread_pool_t *me,
902 if (0 == cnt || me->thd_cnt <= cnt) {
906 n = me->thd_cnt - cnt;
907 if (n >= me->idle_cnt) {
908 trim_busy_threads(me, n - me->idle_cnt);
909 trim_idle_threads(me, 0);
912 trim_idle_threads(me, me->idle_cnt - n);
917 APU_DECLARE(apr_size_t) apr_thread_pool_threshold_get(apr_thread_pool_t *me)
919 return me->threshold;
922 APU_DECLARE(apr_size_t) apr_thread_pool_threshold_set(apr_thread_pool_t *me,
932 APU_DECLARE(apr_status_t) apr_thread_pool_task_owner_get(apr_thread_t *thd,
936 apr_thread_pool_task_t *task;
939 rv = apr_thread_data_get(&data, "apr_thread_pool_task", thd);
940 if (rv != APR_SUCCESS) {
950 *owner = task->owner;
954 #endif /* APR_HAS_THREADS */
956 /* vim: set ts=4 sw=4 et cin tw=80: */