1 /* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements. See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License. You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "apr_private.h"
20 #include "apr_atomic.h"
21 #include "apr_portable.h" /* for get_os_proc */
22 #include "apr_strings.h"
23 #include "apr_general.h"
24 #include "apr_pools.h"
25 #include "apr_allocator.h"
27 #include "apr_thread_mutex.h"
30 #define APR_WANT_MEMFUNC
35 #include <stdlib.h> /* for malloc, free and abort */
39 #include <unistd.h> /* for getpid and sysconf */
42 #if APR_ALLOCATOR_USES_MMAP
51 * XXX: This is not optimal when using --enable-allocator-uses-mmap on
52 * XXX: machines with large pagesize, but currently the sink is assumed
53 * XXX: to be index 0, so MIN_ALLOC must be at least two pages.
55 #define MIN_ALLOC (2 * BOUNDARY_SIZE)
58 #if APR_ALLOCATOR_USES_MMAP && defined(_SC_PAGESIZE)
59 static unsigned int boundary_index;
60 static unsigned int boundary_size;
61 #define BOUNDARY_INDEX boundary_index
62 #define BOUNDARY_SIZE boundary_size
64 #define BOUNDARY_INDEX 12
65 #define BOUNDARY_SIZE (1 << BOUNDARY_INDEX)
69 * Timing constants for killing subprocesses
70 * There is a total 3-second delay between sending a SIGINT
71 * and sending of the final SIGKILL.
72 * TIMEOUT_INTERVAL should be set to TIMEOUT_USECS / 64
73 * for the exponetial timeout alogrithm.
75 #define TIMEOUT_USECS 3000000
76 #define TIMEOUT_INTERVAL 46875
81 * @note The max_free_index and current_free_index fields are not really
82 * indices, but quantities of BOUNDARY_SIZE big memory blocks.
85 struct apr_allocator_t {
86 /** largest used index into free[], always < MAX_INDEX */
87 apr_uint32_t max_index;
88 /** Total size (in BOUNDARY_SIZE multiples) of unused memory before
89 * blocks are given back. @see apr_allocator_max_free_set().
90 * @note Initialized to APR_ALLOCATOR_MAX_FREE_UNLIMITED,
91 * which means to never give back blocks.
93 apr_uint32_t max_free_index;
95 * Memory size (in BOUNDARY_SIZE multiples) that currently must be freed
96 * before blocks are given back. Range: 0..max_free_index
98 apr_uint32_t current_free_index;
100 apr_thread_mutex_t *mutex;
101 #endif /* APR_HAS_THREADS */
104 * Lists of free nodes. Slot 0 is used for oversized nodes,
105 * and the slots 1..MAX_INDEX-1 contain nodes of sizes
106 * (i+1) * BOUNDARY_SIZE. Example for BOUNDARY_INDEX == 12:
107 * slot 0: nodes larger than 81920
111 * slot 19: size 81920
113 apr_memnode_t *free[MAX_INDEX];
116 #define SIZEOF_ALLOCATOR_T APR_ALIGN_DEFAULT(sizeof(apr_allocator_t))
123 APR_DECLARE(apr_status_t) apr_allocator_create(apr_allocator_t **allocator)
125 apr_allocator_t *new_allocator;
129 if ((new_allocator = malloc(SIZEOF_ALLOCATOR_T)) == NULL)
132 memset(new_allocator, 0, SIZEOF_ALLOCATOR_T);
133 new_allocator->max_free_index = APR_ALLOCATOR_MAX_FREE_UNLIMITED;
135 *allocator = new_allocator;
140 APR_DECLARE(void) apr_allocator_destroy(apr_allocator_t *allocator)
143 apr_memnode_t *node, **ref;
145 for (index = 0; index < MAX_INDEX; index++) {
146 ref = &allocator->free[index];
147 while ((node = *ref) != NULL) {
149 #if APR_ALLOCATOR_USES_MMAP
150 munmap(node, (node->index+1) << BOUNDARY_INDEX);
161 APR_DECLARE(void) apr_allocator_mutex_set(apr_allocator_t *allocator,
162 apr_thread_mutex_t *mutex)
164 allocator->mutex = mutex;
167 APR_DECLARE(apr_thread_mutex_t *) apr_allocator_mutex_get(
168 apr_allocator_t *allocator)
170 return allocator->mutex;
172 #endif /* APR_HAS_THREADS */
174 APR_DECLARE(void) apr_allocator_owner_set(apr_allocator_t *allocator,
177 allocator->owner = pool;
180 APR_DECLARE(apr_pool_t *) apr_allocator_owner_get(apr_allocator_t *allocator)
182 return allocator->owner;
185 APR_DECLARE(void) apr_allocator_max_free_set(apr_allocator_t *allocator,
188 apr_uint32_t max_free_index;
189 apr_uint32_t size = (APR_UINT32_TRUNC_CAST)in_size;
192 apr_thread_mutex_t *mutex;
194 mutex = apr_allocator_mutex_get(allocator);
196 apr_thread_mutex_lock(mutex);
197 #endif /* APR_HAS_THREADS */
199 max_free_index = APR_ALIGN(size, BOUNDARY_SIZE) >> BOUNDARY_INDEX;
200 allocator->current_free_index += max_free_index;
201 allocator->current_free_index -= allocator->max_free_index;
202 allocator->max_free_index = max_free_index;
203 if (allocator->current_free_index > max_free_index)
204 allocator->current_free_index = max_free_index;
208 apr_thread_mutex_unlock(mutex);
213 apr_memnode_t *allocator_alloc(apr_allocator_t *allocator, apr_size_t in_size)
215 apr_memnode_t *node, **ref;
216 apr_uint32_t max_index;
217 apr_size_t size, i, index;
219 /* Round up the block size to the next boundary, but always
220 * allocate at least a certain size (MIN_ALLOC).
222 size = APR_ALIGN(in_size + APR_MEMNODE_T_SIZE, BOUNDARY_SIZE);
223 if (size < in_size) {
226 if (size < MIN_ALLOC)
229 /* Find the index for this node size by
230 * dividing its size by the boundary size
232 index = (size >> BOUNDARY_INDEX) - 1;
234 if (index > APR_UINT32_MAX) {
238 /* First see if there are any nodes in the area we know
239 * our node will fit into.
241 if (index <= allocator->max_index) {
243 if (allocator->mutex)
244 apr_thread_mutex_lock(allocator->mutex);
245 #endif /* APR_HAS_THREADS */
247 /* Walk the free list to see if there are
248 * any nodes on it of the requested size
250 * NOTE: an optimization would be to check
251 * allocator->free[index] first and if no
252 * node is present, directly use
253 * allocator->free[max_index]. This seems
254 * like overkill though and could cause
257 max_index = allocator->max_index;
258 ref = &allocator->free[index];
260 while (*ref == NULL && i < max_index) {
265 if ((node = *ref) != NULL) {
266 /* If we have found a node and it doesn't have any
267 * nodes waiting in line behind it _and_ we are on
268 * the highest available index, find the new highest
271 if ((*ref = node->next) == NULL && i >= max_index) {
276 while (*ref == NULL && max_index > 0);
278 allocator->max_index = max_index;
281 allocator->current_free_index += node->index + 1;
282 if (allocator->current_free_index > allocator->max_free_index)
283 allocator->current_free_index = allocator->max_free_index;
286 if (allocator->mutex)
287 apr_thread_mutex_unlock(allocator->mutex);
288 #endif /* APR_HAS_THREADS */
291 node->first_avail = (char *)node + APR_MEMNODE_T_SIZE;
297 if (allocator->mutex)
298 apr_thread_mutex_unlock(allocator->mutex);
299 #endif /* APR_HAS_THREADS */
302 /* If we found nothing, seek the sink (at index 0), if
305 else if (allocator->free[0]) {
307 if (allocator->mutex)
308 apr_thread_mutex_lock(allocator->mutex);
309 #endif /* APR_HAS_THREADS */
311 /* Walk the free list to see if there are
312 * any nodes on it of the requested size
314 ref = &allocator->free[0];
315 while ((node = *ref) != NULL && index > node->index)
321 allocator->current_free_index += node->index + 1;
322 if (allocator->current_free_index > allocator->max_free_index)
323 allocator->current_free_index = allocator->max_free_index;
326 if (allocator->mutex)
327 apr_thread_mutex_unlock(allocator->mutex);
328 #endif /* APR_HAS_THREADS */
331 node->first_avail = (char *)node + APR_MEMNODE_T_SIZE;
337 if (allocator->mutex)
338 apr_thread_mutex_unlock(allocator->mutex);
339 #endif /* APR_HAS_THREADS */
342 /* If we haven't got a suitable node, malloc a new one
345 #if APR_ALLOCATOR_USES_MMAP
346 if ((node = mmap(NULL, size, PROT_READ|PROT_WRITE,
347 MAP_PRIVATE|MAP_ANON, -1, 0)) == MAP_FAILED)
349 if ((node = malloc(size)) == NULL)
354 node->index = (APR_UINT32_TRUNC_CAST)index;
355 node->first_avail = (char *)node + APR_MEMNODE_T_SIZE;
356 node->endp = (char *)node + size;
362 void allocator_free(apr_allocator_t *allocator, apr_memnode_t *node)
364 apr_memnode_t *next, *freelist = NULL;
365 apr_uint32_t index, max_index;
366 apr_uint32_t max_free_index, current_free_index;
369 if (allocator->mutex)
370 apr_thread_mutex_lock(allocator->mutex);
371 #endif /* APR_HAS_THREADS */
373 max_index = allocator->max_index;
374 max_free_index = allocator->max_free_index;
375 current_free_index = allocator->current_free_index;
377 /* Walk the list of submitted nodes and free them one by one,
378 * shoving them in the right 'size' buckets as we go.
384 if (max_free_index != APR_ALLOCATOR_MAX_FREE_UNLIMITED
385 && index + 1 > current_free_index) {
386 node->next = freelist;
389 else if (index < MAX_INDEX) {
390 /* Add the node to the appropiate 'size' bucket. Adjust
391 * the max_index when appropiate.
393 if ((node->next = allocator->free[index]) == NULL
394 && index > max_index) {
397 allocator->free[index] = node;
398 if (current_free_index >= index + 1)
399 current_free_index -= index + 1;
401 current_free_index = 0;
404 /* This node is too large to keep in a specific size bucket,
405 * just add it to the sink (at index 0).
407 node->next = allocator->free[0];
408 allocator->free[0] = node;
409 if (current_free_index >= index + 1)
410 current_free_index -= index + 1;
412 current_free_index = 0;
414 } while ((node = next) != NULL);
416 allocator->max_index = max_index;
417 allocator->current_free_index = current_free_index;
420 if (allocator->mutex)
421 apr_thread_mutex_unlock(allocator->mutex);
422 #endif /* APR_HAS_THREADS */
424 while (freelist != NULL) {
426 freelist = node->next;
427 #if APR_ALLOCATOR_USES_MMAP
428 munmap(node, (node->index+1) << BOUNDARY_INDEX);
435 APR_DECLARE(apr_memnode_t *) apr_allocator_alloc(apr_allocator_t *allocator,
438 return allocator_alloc(allocator, size);
441 APR_DECLARE(void) apr_allocator_free(apr_allocator_t *allocator,
444 allocator_free(allocator, node);
453 #define APR_POOL_DEBUG_GENERAL 0x01
454 #define APR_POOL_DEBUG_VERBOSE 0x02
455 #define APR_POOL_DEBUG_LIFETIME 0x04
456 #define APR_POOL_DEBUG_OWNER 0x08
457 #define APR_POOL_DEBUG_VERBOSE_ALLOC 0x10
459 #define APR_POOL_DEBUG_VERBOSE_ALL (APR_POOL_DEBUG_VERBOSE \
460 | APR_POOL_DEBUG_VERBOSE_ALLOC)
467 typedef struct cleanup_t cleanup_t;
469 /** A list of processes */
470 struct process_chain {
471 /** The process ID */
473 apr_kill_conditions_e kill_how;
474 /** The next process in the list */
475 struct process_chain *next;
481 typedef struct debug_node_t debug_node_t;
483 struct debug_node_t {
490 #define SIZEOF_DEBUG_NODE_T APR_ALIGN_DEFAULT(sizeof(debug_node_t))
492 #endif /* APR_POOL_DEBUG */
494 /* The ref field in the apr_pool_t struct holds a
495 * pointer to the pointer referencing this pool.
496 * It is used for parent, child, sibling management.
497 * Look at apr_pool_create_ex() and apr_pool_destroy()
498 * to see how it is used.
506 cleanup_t *free_cleanups;
507 apr_allocator_t *allocator;
508 struct process_chain *subprocesses;
509 apr_abortfunc_t abort_fn;
510 apr_hash_t *user_data;
514 apr_memnode_t *active;
515 apr_memnode_t *self; /* The node containing the pool itself */
516 char *self_first_avail;
518 #else /* APR_POOL_DEBUG */
519 apr_pool_t *joined; /* the caller has guaranteed that this pool
520 * will survive as long as ->joined */
522 const char *file_line;
523 apr_uint32_t creation_flags;
524 unsigned int stat_alloc;
525 unsigned int stat_total_alloc;
526 unsigned int stat_clear;
528 apr_os_thread_t owner;
529 apr_thread_mutex_t *mutex;
530 #endif /* APR_HAS_THREADS */
531 #endif /* APR_POOL_DEBUG */
533 apr_os_proc_t owner_proc;
534 #endif /* defined(NETWARE) */
535 cleanup_t *pre_cleanups;
538 #define SIZEOF_POOL_T APR_ALIGN_DEFAULT(sizeof(apr_pool_t))
545 static apr_byte_t apr_pools_initialized = 0;
546 static apr_pool_t *global_pool = NULL;
549 static apr_allocator_t *global_allocator = NULL;
550 #endif /* !APR_POOL_DEBUG */
552 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
553 static apr_file_t *file_stderr = NULL;
554 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
560 static void run_cleanups(cleanup_t **c);
561 static void free_proc_chain(struct process_chain *procs);
564 static void pool_destroy_debug(apr_pool_t *pool, const char *file_line);
572 APR_DECLARE(apr_status_t) apr_pool_initialize(void)
576 if (apr_pools_initialized++)
579 #if APR_ALLOCATOR_USES_MMAP && defined(_SC_PAGESIZE)
580 boundary_size = sysconf(_SC_PAGESIZE);
582 while ( (1 << boundary_index) < boundary_size)
584 boundary_size = (1 << boundary_index);
587 if ((rv = apr_allocator_create(&global_allocator)) != APR_SUCCESS) {
588 apr_pools_initialized = 0;
592 if ((rv = apr_pool_create_ex(&global_pool, NULL, NULL,
593 global_allocator)) != APR_SUCCESS) {
594 apr_allocator_destroy(global_allocator);
595 global_allocator = NULL;
596 apr_pools_initialized = 0;
600 apr_pool_tag(global_pool, "apr_global_pool");
602 /* This has to happen here because mutexes might be backed by
603 * atomics. It used to be snug and safe in apr_initialize().
605 * Warning: apr_atomic_init() must always be called, by any
606 * means possible, from apr_initialize().
608 if ((rv = apr_atomic_init(global_pool)) != APR_SUCCESS) {
614 apr_thread_mutex_t *mutex;
616 if ((rv = apr_thread_mutex_create(&mutex,
617 APR_THREAD_MUTEX_DEFAULT,
618 global_pool)) != APR_SUCCESS) {
622 apr_allocator_mutex_set(global_allocator, mutex);
624 #endif /* APR_HAS_THREADS */
626 apr_allocator_owner_set(global_allocator, global_pool);
631 APR_DECLARE(void) apr_pool_terminate(void)
633 if (!apr_pools_initialized)
636 if (--apr_pools_initialized)
639 apr_pool_destroy(global_pool); /* This will also destroy the mutex */
642 global_allocator = NULL;
646 /* Node list management helper macros; list_insert() inserts 'node'
648 #define list_insert(node, point) do { \
649 node->ref = point->ref; \
651 node->next = point; \
652 point->ref = &node->next; \
655 /* list_remove() removes 'node' from its list. */
656 #define list_remove(node) do { \
657 *node->ref = node->next; \
658 node->next->ref = node->ref; \
661 /* Returns the amount of free space in the given node. */
662 #define node_free_space(node_) ((apr_size_t)(node_->endp - node_->first_avail))
668 APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t in_size)
670 apr_memnode_t *active, *node;
672 apr_size_t size, free_index;
674 size = APR_ALIGN_DEFAULT(in_size);
675 if (size < in_size) {
677 pool->abort_fn(APR_ENOMEM);
681 active = pool->active;
683 /* If the active node has enough bytes left, use it. */
684 if (size <= node_free_space(active)) {
685 mem = active->first_avail;
686 active->first_avail += size;
692 if (size <= node_free_space(node)) {
696 if ((node = allocator_alloc(pool->allocator, size)) == NULL) {
698 pool->abort_fn(APR_ENOMEM);
704 node->free_index = 0;
706 mem = node->first_avail;
707 node->first_avail += size;
709 list_insert(node, active);
713 free_index = (APR_ALIGN(active->endp - active->first_avail + 1,
714 BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX;
716 active->free_index = (APR_UINT32_TRUNC_CAST)free_index;
718 if (free_index >= node->free_index)
724 while (free_index < node->free_index);
727 list_insert(active, node);
732 /* Provide an implementation of apr_pcalloc for backward compatibility
733 * with code built before apr_pcalloc was a macro
740 APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size);
741 APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size)
745 if ((mem = apr_palloc(pool, size)) != NULL) {
746 memset(mem, 0, size);
754 * Pool creation/destruction
757 APR_DECLARE(void) apr_pool_clear(apr_pool_t *pool)
759 apr_memnode_t *active;
761 /* Run pre destroy cleanups */
762 run_cleanups(&pool->pre_cleanups);
763 pool->pre_cleanups = NULL;
765 /* Destroy the subpools. The subpools will detach themselves from
766 * this pool thus this loop is safe and easy.
769 apr_pool_destroy(pool->child);
772 run_cleanups(&pool->cleanups);
773 pool->cleanups = NULL;
774 pool->free_cleanups = NULL;
776 /* Free subprocesses */
777 free_proc_chain(pool->subprocesses);
778 pool->subprocesses = NULL;
780 /* Clear the user data. */
781 pool->user_data = NULL;
783 /* Find the node attached to the pool structure, reset it, make
784 * it the active node and free the rest of the nodes.
786 active = pool->active = pool->self;
787 active->first_avail = pool->self_first_avail;
789 if (active->next == active)
793 allocator_free(pool->allocator, active->next);
794 active->next = active;
795 active->ref = &active->next;
798 APR_DECLARE(void) apr_pool_destroy(apr_pool_t *pool)
800 apr_memnode_t *active;
801 apr_allocator_t *allocator;
803 /* Run pre destroy cleanups */
804 run_cleanups(&pool->pre_cleanups);
805 pool->pre_cleanups = NULL;
807 /* Destroy the subpools. The subpools will detach themselve from
808 * this pool thus this loop is safe and easy.
811 apr_pool_destroy(pool->child);
814 run_cleanups(&pool->cleanups);
816 /* Free subprocesses */
817 free_proc_chain(pool->subprocesses);
819 /* Remove the pool from the parents child list */
822 apr_thread_mutex_t *mutex;
824 if ((mutex = apr_allocator_mutex_get(pool->parent->allocator)) != NULL)
825 apr_thread_mutex_lock(mutex);
826 #endif /* APR_HAS_THREADS */
828 if ((*pool->ref = pool->sibling) != NULL)
829 pool->sibling->ref = pool->ref;
833 apr_thread_mutex_unlock(mutex);
834 #endif /* APR_HAS_THREADS */
837 /* Find the block attached to the pool structure. Save a copy of the
838 * allocator pointer, because the pool struct soon will be no more.
840 allocator = pool->allocator;
845 if (apr_allocator_owner_get(allocator) == pool) {
846 /* Make sure to remove the lock, since it is highly likely to
849 apr_allocator_mutex_set(allocator, NULL);
851 #endif /* APR_HAS_THREADS */
853 /* Free all the nodes in the pool (including the node holding the
854 * pool struct), by giving them back to the allocator.
856 allocator_free(allocator, active);
858 /* If this pool happens to be the owner of the allocator, free
859 * everything in the allocator (that includes the pool struct
860 * and the allocator). Don't worry about destroying the optional mutex
861 * in the allocator, it will have been destroyed by the cleanup function.
863 if (apr_allocator_owner_get(allocator) == pool) {
864 apr_allocator_destroy(allocator);
868 APR_DECLARE(apr_status_t) apr_pool_create_ex(apr_pool_t **newpool,
870 apr_abortfunc_t abort_fn,
871 apr_allocator_t *allocator)
879 parent = global_pool;
881 /* parent will always be non-NULL here except the first time a
882 * pool is created, in which case allocator is guaranteed to be
885 if (!abort_fn && parent)
886 abort_fn = parent->abort_fn;
888 if (allocator == NULL)
889 allocator = parent->allocator;
891 if ((node = allocator_alloc(allocator,
892 MIN_ALLOC - APR_MEMNODE_T_SIZE)) == NULL) {
894 abort_fn(APR_ENOMEM);
900 node->ref = &node->next;
902 pool = (apr_pool_t *)node->first_avail;
903 node->first_avail = pool->self_first_avail = (char *)pool + SIZEOF_POOL_T;
905 pool->allocator = allocator;
906 pool->active = pool->self = node;
907 pool->abort_fn = abort_fn;
909 pool->cleanups = NULL;
910 pool->free_cleanups = NULL;
911 pool->pre_cleanups = NULL;
912 pool->subprocesses = NULL;
913 pool->user_data = NULL;
917 pool->owner_proc = (apr_os_proc_t)getnlmhandle();
918 #endif /* defined(NETWARE) */
920 if ((pool->parent = parent) != NULL) {
922 apr_thread_mutex_t *mutex;
924 if ((mutex = apr_allocator_mutex_get(parent->allocator)) != NULL)
925 apr_thread_mutex_lock(mutex);
926 #endif /* APR_HAS_THREADS */
928 if ((pool->sibling = parent->child) != NULL)
929 pool->sibling->ref = &pool->sibling;
931 parent->child = pool;
932 pool->ref = &parent->child;
936 apr_thread_mutex_unlock(mutex);
937 #endif /* APR_HAS_THREADS */
940 pool->sibling = NULL;
949 /* Deprecated. Renamed to apr_pool_create_unmanaged_ex
951 APR_DECLARE(apr_status_t) apr_pool_create_core_ex(apr_pool_t **newpool,
952 apr_abortfunc_t abort_fn,
953 apr_allocator_t *allocator)
955 return apr_pool_create_unmanaged_ex(newpool, abort_fn, allocator);
958 APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex(apr_pool_t **newpool,
959 apr_abortfunc_t abort_fn,
960 apr_allocator_t *allocator)
964 apr_allocator_t *pool_allocator;
968 if (!apr_pools_initialized)
970 if ((pool_allocator = allocator) == NULL) {
971 if ((pool_allocator = malloc(SIZEOF_ALLOCATOR_T)) == NULL) {
973 abort_fn(APR_ENOMEM);
977 memset(pool_allocator, 0, SIZEOF_ALLOCATOR_T);
978 pool_allocator->max_free_index = APR_ALLOCATOR_MAX_FREE_UNLIMITED;
980 if ((node = allocator_alloc(pool_allocator,
981 MIN_ALLOC - APR_MEMNODE_T_SIZE)) == NULL) {
983 abort_fn(APR_ENOMEM);
989 node->ref = &node->next;
991 pool = (apr_pool_t *)node->first_avail;
992 node->first_avail = pool->self_first_avail = (char *)pool + SIZEOF_POOL_T;
994 pool->allocator = pool_allocator;
995 pool->active = pool->self = node;
996 pool->abort_fn = abort_fn;
998 pool->cleanups = NULL;
999 pool->free_cleanups = NULL;
1000 pool->pre_cleanups = NULL;
1001 pool->subprocesses = NULL;
1002 pool->user_data = NULL;
1004 pool->parent = NULL;
1005 pool->sibling = NULL;
1009 pool->owner_proc = (apr_os_proc_t)getnlmhandle();
1010 #endif /* defined(NETWARE) */
1012 pool_allocator->owner = pool;
1023 * apr_psprintf is implemented by writing directly into the current
1024 * block of the pool, starting right at first_avail. If there's
1025 * insufficient room, then a new block is allocated and the earlier
1026 * output is copied over. The new block isn't linked into the pool
1027 * until all the output is done.
1029 * Note that this is completely safe because nothing else can
1030 * allocate in this apr_pool_t while apr_psprintf is running. alarms are
1031 * blocked, and the only thing outside of apr_pools.c that's invoked
1032 * is apr_vformatter -- which was purposefully written to be
1033 * self-contained with no callouts.
1036 struct psprintf_data {
1037 apr_vformatter_buff_t vbuff;
1038 apr_memnode_t *node;
1040 apr_byte_t got_a_new_node;
1041 apr_memnode_t *free;
1044 #define APR_PSPRINTF_MIN_STRINGSIZE 32
1046 static int psprintf_flush(apr_vformatter_buff_t *vbuff)
1048 struct psprintf_data *ps = (struct psprintf_data *)vbuff;
1049 apr_memnode_t *node, *active;
1050 apr_size_t cur_len, size;
1053 apr_size_t free_index;
1057 strp = ps->vbuff.curpos;
1058 cur_len = strp - active->first_avail;
1059 size = cur_len << 1;
1061 /* Make sure that we don't try to use a block that has less
1062 * than APR_PSPRINTF_MIN_STRINGSIZE bytes left in it. This
1063 * also catches the case where size == 0, which would result
1064 * in reusing a block that can't even hold the NUL byte.
1066 if (size < APR_PSPRINTF_MIN_STRINGSIZE)
1067 size = APR_PSPRINTF_MIN_STRINGSIZE;
1069 node = active->next;
1070 if (!ps->got_a_new_node && size <= node_free_space(node)) {
1073 list_insert(node, active);
1075 node->free_index = 0;
1077 pool->active = node;
1079 free_index = (APR_ALIGN(active->endp - active->first_avail + 1,
1080 BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX;
1082 active->free_index = (APR_UINT32_TRUNC_CAST)free_index;
1083 node = active->next;
1084 if (free_index < node->free_index) {
1088 while (free_index < node->free_index);
1090 list_remove(active);
1091 list_insert(active, node);
1094 node = pool->active;
1097 if ((node = allocator_alloc(pool->allocator, size)) == NULL)
1100 if (ps->got_a_new_node) {
1101 active->next = ps->free;
1105 ps->got_a_new_node = 1;
1108 memcpy(node->first_avail, active->first_avail, cur_len);
1111 ps->vbuff.curpos = node->first_avail + cur_len;
1112 ps->vbuff.endpos = node->endp - 1; /* Save a byte for NUL terminator */
1117 APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list ap)
1119 struct psprintf_data ps;
1122 apr_memnode_t *active, *node;
1123 apr_size_t free_index;
1125 ps.node = active = pool->active;
1127 ps.vbuff.curpos = ps.node->first_avail;
1129 /* Save a byte for the NUL terminator */
1130 ps.vbuff.endpos = ps.node->endp - 1;
1131 ps.got_a_new_node = 0;
1134 /* Make sure that the first node passed to apr_vformatter has at least
1135 * room to hold the NUL terminator.
1137 if (ps.node->first_avail == ps.node->endp) {
1138 if (psprintf_flush(&ps.vbuff) == -1) {
1139 if (pool->abort_fn) {
1140 pool->abort_fn(APR_ENOMEM);
1147 if (apr_vformatter(psprintf_flush, &ps.vbuff, fmt, ap) == -1) {
1149 pool->abort_fn(APR_ENOMEM);
1154 strp = ps.vbuff.curpos;
1157 size = strp - ps.node->first_avail;
1158 size = APR_ALIGN_DEFAULT(size);
1159 strp = ps.node->first_avail;
1160 ps.node->first_avail += size;
1163 allocator_free(pool->allocator, ps.free);
1166 * Link the node in if it's a new one
1168 if (!ps.got_a_new_node)
1171 active = pool->active;
1174 node->free_index = 0;
1176 list_insert(node, active);
1178 pool->active = node;
1180 free_index = (APR_ALIGN(active->endp - active->first_avail + 1,
1181 BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX;
1183 active->free_index = (APR_UINT32_TRUNC_CAST)free_index;
1184 node = active->next;
1186 if (free_index >= node->free_index)
1192 while (free_index < node->free_index);
1194 list_remove(active);
1195 list_insert(active, node);
1201 #else /* APR_POOL_DEBUG */
1203 * Debug helper functions
1208 * Walk the pool tree rooted at pool, depth first. When fn returns
1209 * anything other than 0, abort the traversal and return the value
1212 static int apr_pool_walk_tree(apr_pool_t *pool,
1213 int (*fn)(apr_pool_t *pool, void *data),
1219 rv = fn(pool, data);
1225 apr_thread_mutex_lock(pool->mutex);
1227 #endif /* APR_HAS_THREADS */
1229 child = pool->child;
1231 rv = apr_pool_walk_tree(child, fn, data);
1235 child = child->sibling;
1240 apr_thread_mutex_unlock(pool->mutex);
1242 #endif /* APR_HAS_THREADS */
1247 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
1248 static void apr_pool_log_event(apr_pool_t *pool, const char *event,
1249 const char *file_line, int deref)
1253 apr_file_printf(file_stderr,
1258 #endif /* APR_HAS_THREADS */
1261 "(%10lu/%10lu/%10lu) "
1266 (unsigned long)getpid(),
1268 (unsigned long)apr_os_thread_current(),
1269 #endif /* APR_HAS_THREADS */
1271 (unsigned long)apr_pool_num_bytes(pool, 0),
1272 (unsigned long)apr_pool_num_bytes(pool, 1),
1273 (unsigned long)apr_pool_num_bytes(global_pool, 1),
1276 pool->stat_alloc, pool->stat_total_alloc, pool->stat_clear);
1279 apr_file_printf(file_stderr,
1284 #endif /* APR_HAS_THREADS */
1291 (unsigned long)getpid(),
1293 (unsigned long)apr_os_thread_current(),
1294 #endif /* APR_HAS_THREADS */
1301 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1303 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_LIFETIME)
1304 static int pool_is_child_of(apr_pool_t *parent, void *data)
1306 apr_pool_t *pool = (apr_pool_t *)data;
1308 return (pool == parent);
1311 static int apr_pool_is_child_of(apr_pool_t *pool, apr_pool_t *parent)
1316 return apr_pool_walk_tree(parent, pool_is_child_of, pool);
1318 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_LIFETIME) */
1320 static void apr_pool_check_integrity(apr_pool_t *pool)
1322 /* Rule of thumb: use of the global pool is always
1323 * ok, since the only user is apr_pools.c. Unless
1324 * people have searched for the top level parent and
1325 * started to use that...
1327 if (pool == global_pool || global_pool == NULL)
1331 * This basically checks to see if the pool being used is still
1332 * a relative to the global pool. If not it was previously
1333 * destroyed, in which case we abort().
1335 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_LIFETIME)
1336 if (!apr_pool_is_child_of(pool, global_pool)) {
1337 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
1338 apr_pool_log_event(pool, "LIFE",
1339 __FILE__ ":apr_pool_integrity check", 0);
1340 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1343 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_LIFETIME) */
1345 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_OWNER)
1347 if (!apr_os_thread_equal(pool->owner, apr_os_thread_current())) {
1348 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
1349 apr_pool_log_event(pool, "THREAD",
1350 __FILE__ ":apr_pool_integrity check", 0);
1351 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1354 #endif /* APR_HAS_THREADS */
1355 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_OWNER) */
1360 * Initialization (debug)
1363 APR_DECLARE(apr_status_t) apr_pool_initialize(void)
1366 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
1368 apr_file_t *debug_log = NULL;
1371 if (apr_pools_initialized++)
1374 #if APR_ALLOCATOR_USES_MMAP && defined(_SC_PAGESIZE)
1375 boundary_size = sysconf(_SC_PAGESIZE);
1376 boundary_index = 12;
1377 while ( (1 << boundary_index) < boundary_size)
1379 boundary_size = (1 << boundary_index);
1382 /* Since the debug code works a bit differently then the
1383 * regular pools code, we ask for a lock here. The regular
1384 * pools code has got this lock embedded in the global
1385 * allocator, a concept unknown to debug mode.
1387 if ((rv = apr_pool_create_ex(&global_pool, NULL, NULL,
1388 NULL)) != APR_SUCCESS) {
1392 apr_pool_tag(global_pool, "APR global pool");
1394 apr_pools_initialized = 1;
1396 /* This has to happen here because mutexes might be backed by
1397 * atomics. It used to be snug and safe in apr_initialize().
1399 if ((rv = apr_atomic_init(global_pool)) != APR_SUCCESS) {
1403 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
1404 rv = apr_env_get(&logpath, "APR_POOL_DEBUG_LOG", global_pool);
1406 /* Don't pass file_stderr directly to apr_file_open() here, since
1407 * apr_file_open() can call back to apr_pool_log_event() and that
1408 * may attempt to use then then non-NULL but partially set up file
1410 if (rv == APR_SUCCESS) {
1411 apr_file_open(&debug_log, logpath, APR_APPEND|APR_WRITE|APR_CREATE,
1412 APR_OS_DEFAULT, global_pool);
1415 apr_file_open_stderr(&debug_log, global_pool);
1418 /* debug_log is now a file handle. */
1419 file_stderr = debug_log;
1422 apr_file_printf(file_stderr,
1426 #endif /* APR_HAS_THREADS */
1427 "] ACTION (SIZE /POOL SIZE /TOTAL SIZE) "
1428 "POOL \"TAG\" <__FILE__:__LINE__> (ALLOCS/TOTAL ALLOCS/CLEARS)\n");
1430 apr_pool_log_event(global_pool, "GLOBAL", __FILE__ ":apr_pool_initialize", 0);
1432 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1437 APR_DECLARE(void) apr_pool_terminate(void)
1439 if (!apr_pools_initialized)
1442 if (--apr_pools_initialized)
1445 apr_pool_destroy(global_pool); /* This will also destroy the mutex */
1448 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
1450 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1455 * Memory allocation (debug)
1458 static void *pool_alloc(apr_pool_t *pool, apr_size_t size)
1463 if ((mem = malloc(size)) == NULL) {
1465 pool->abort_fn(APR_ENOMEM);
1471 if (node == NULL || node->index == 64) {
1472 if ((node = malloc(SIZEOF_DEBUG_NODE_T)) == NULL) {
1475 pool->abort_fn(APR_ENOMEM);
1480 memset(node, 0, SIZEOF_DEBUG_NODE_T);
1482 node->next = pool->nodes;
1487 node->beginp[node->index] = mem;
1488 node->endp[node->index] = (char *)mem + size;
1492 pool->stat_total_alloc++;
1497 APR_DECLARE(void *) apr_palloc_debug(apr_pool_t *pool, apr_size_t size,
1498 const char *file_line)
1502 apr_pool_check_integrity(pool);
1504 mem = pool_alloc(pool, size);
1506 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALLOC)
1507 apr_pool_log_event(pool, "PALLOC", file_line, 1);
1508 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALLOC) */
1513 APR_DECLARE(void *) apr_pcalloc_debug(apr_pool_t *pool, apr_size_t size,
1514 const char *file_line)
1518 apr_pool_check_integrity(pool);
1520 mem = pool_alloc(pool, size);
1521 memset(mem, 0, size);
1523 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALLOC)
1524 apr_pool_log_event(pool, "PCALLOC", file_line, 1);
1525 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALLOC) */
1532 * Pool creation/destruction (debug)
1535 #define POOL_POISON_BYTE 'A'
1537 static void pool_clear_debug(apr_pool_t *pool, const char *file_line)
1542 /* Run pre destroy cleanups */
1543 run_cleanups(&pool->pre_cleanups);
1544 pool->pre_cleanups = NULL;
1546 /* Destroy the subpools. The subpools will detach themselves from
1547 * this pool thus this loop is safe and easy.
1550 pool_destroy_debug(pool->child, file_line);
1553 run_cleanups(&pool->cleanups);
1554 pool->free_cleanups = NULL;
1555 pool->cleanups = NULL;
1557 /* If new child pools showed up, this is a reason to raise a flag */
1561 /* Free subprocesses */
1562 free_proc_chain(pool->subprocesses);
1563 pool->subprocesses = NULL;
1565 /* Clear the user data. */
1566 pool->user_data = NULL;
1568 /* Free the blocks, scribbling over them first to help highlight
1569 * use-after-free issues. */
1570 while ((node = pool->nodes) != NULL) {
1571 pool->nodes = node->next;
1573 for (index = 0; index < node->index; index++) {
1574 memset(node->beginp[index], POOL_POISON_BYTE,
1575 (char *)node->endp[index] - (char *)node->beginp[index]);
1576 free(node->beginp[index]);
1579 memset(node, POOL_POISON_BYTE, SIZEOF_DEBUG_NODE_T);
1583 pool->stat_alloc = 0;
1587 APR_DECLARE(void) apr_pool_clear_debug(apr_pool_t *pool,
1588 const char *file_line)
1591 apr_thread_mutex_t *mutex = NULL;
1594 apr_pool_check_integrity(pool);
1596 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE)
1597 apr_pool_log_event(pool, "CLEAR", file_line, 1);
1598 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) */
1601 if (pool->parent != NULL)
1602 mutex = pool->parent->mutex;
1604 /* Lock the parent mutex before clearing so that if we have our
1605 * own mutex it won't be accessed by apr_pool_walk_tree after
1606 * it has been destroyed.
1608 if (mutex != NULL && mutex != pool->mutex) {
1609 apr_thread_mutex_lock(mutex);
1613 pool_clear_debug(pool, file_line);
1616 /* If we had our own mutex, it will have been destroyed by
1617 * the registered cleanups. Recreate the mutex. Unlock
1618 * the mutex we obtained above.
1620 if (mutex != pool->mutex) {
1621 (void)apr_thread_mutex_create(&pool->mutex,
1622 APR_THREAD_MUTEX_NESTED, pool);
1625 (void)apr_thread_mutex_unlock(mutex);
1627 #endif /* APR_HAS_THREADS */
1630 static void pool_destroy_debug(apr_pool_t *pool, const char *file_line)
1632 apr_pool_check_integrity(pool);
1634 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE)
1635 apr_pool_log_event(pool, "DESTROY", file_line, 1);
1636 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) */
1638 pool_clear_debug(pool, file_line);
1640 /* Remove the pool from the parents child list */
1643 apr_thread_mutex_t *mutex;
1645 if ((mutex = pool->parent->mutex) != NULL)
1646 apr_thread_mutex_lock(mutex);
1647 #endif /* APR_HAS_THREADS */
1649 if ((*pool->ref = pool->sibling) != NULL)
1650 pool->sibling->ref = pool->ref;
1654 apr_thread_mutex_unlock(mutex);
1655 #endif /* APR_HAS_THREADS */
1658 if (pool->allocator != NULL
1659 && apr_allocator_owner_get(pool->allocator) == pool) {
1660 apr_allocator_destroy(pool->allocator);
1663 /* Free the pool itself */
1667 APR_DECLARE(void) apr_pool_destroy_debug(apr_pool_t *pool,
1668 const char *file_line)
1671 /* Joined pools must not be explicitly destroyed; the caller
1672 * has broken the guarantee. */
1673 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
1674 apr_pool_log_event(pool, "LIFE",
1675 __FILE__ ":apr_pool_destroy abort on joined", 0);
1676 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1680 pool_destroy_debug(pool, file_line);
1683 APR_DECLARE(apr_status_t) apr_pool_create_ex_debug(apr_pool_t **newpool,
1685 apr_abortfunc_t abort_fn,
1686 apr_allocator_t *allocator,
1687 const char *file_line)
1694 parent = global_pool;
1697 apr_pool_check_integrity(parent);
1700 allocator = parent->allocator;
1703 if (!abort_fn && parent)
1704 abort_fn = parent->abort_fn;
1706 if ((pool = malloc(SIZEOF_POOL_T)) == NULL) {
1708 abort_fn(APR_ENOMEM);
1713 memset(pool, 0, SIZEOF_POOL_T);
1715 pool->allocator = allocator;
1716 pool->abort_fn = abort_fn;
1717 pool->tag = file_line;
1718 pool->file_line = file_line;
1720 if ((pool->parent = parent) != NULL) {
1723 apr_thread_mutex_lock(parent->mutex);
1724 #endif /* APR_HAS_THREADS */
1725 if ((pool->sibling = parent->child) != NULL)
1726 pool->sibling->ref = &pool->sibling;
1728 parent->child = pool;
1729 pool->ref = &parent->child;
1733 apr_thread_mutex_unlock(parent->mutex);
1734 #endif /* APR_HAS_THREADS */
1737 pool->sibling = NULL;
1742 pool->owner = apr_os_thread_current();
1743 #endif /* APR_HAS_THREADS */
1745 pool->owner_proc = (apr_os_proc_t)getnlmhandle();
1746 #endif /* defined(NETWARE) */
1749 if (parent == NULL || parent->allocator != allocator) {
1753 /* No matter what the creation flags say, always create
1754 * a lock. Without it integrity_check and apr_pool_num_bytes
1755 * blow up (because they traverse pools child lists that
1756 * possibly belong to another thread, in combination with
1757 * the pool having no lock). However, this might actually
1758 * hide problems like creating a child pool of a pool
1759 * belonging to another thread.
1761 if ((rv = apr_thread_mutex_create(&pool->mutex,
1762 APR_THREAD_MUTEX_NESTED, pool)) != APR_SUCCESS) {
1766 #endif /* APR_HAS_THREADS */
1771 pool->mutex = parent->mutex;
1772 #endif /* APR_HAS_THREADS */
1777 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE)
1778 apr_pool_log_event(pool, "CREATE", file_line, 1);
1779 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) */
1784 APR_DECLARE(apr_status_t) apr_pool_create_core_ex_debug(apr_pool_t **newpool,
1785 apr_abortfunc_t abort_fn,
1786 apr_allocator_t *allocator,
1787 const char *file_line)
1789 return apr_pool_create_unmanaged_ex_debug(newpool, abort_fn, allocator,
1793 APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex_debug(apr_pool_t **newpool,
1794 apr_abortfunc_t abort_fn,
1795 apr_allocator_t *allocator,
1796 const char *file_line)
1799 apr_allocator_t *pool_allocator;
1803 if ((pool = malloc(SIZEOF_POOL_T)) == NULL) {
1805 abort_fn(APR_ENOMEM);
1810 memset(pool, 0, SIZEOF_POOL_T);
1812 pool->abort_fn = abort_fn;
1813 pool->tag = file_line;
1814 pool->file_line = file_line;
1817 pool->owner = apr_os_thread_current();
1818 #endif /* APR_HAS_THREADS */
1820 pool->owner_proc = (apr_os_proc_t)getnlmhandle();
1821 #endif /* defined(NETWARE) */
1823 if ((pool_allocator = allocator) == NULL) {
1825 if ((rv = apr_allocator_create(&pool_allocator)) != APR_SUCCESS) {
1830 pool_allocator->owner = pool;
1832 pool->allocator = pool_allocator;
1834 if (pool->allocator != allocator) {
1838 /* No matter what the creation flags say, always create
1839 * a lock. Without it integrity_check and apr_pool_num_bytes
1840 * blow up (because they traverse pools child lists that
1841 * possibly belong to another thread, in combination with
1842 * the pool having no lock). However, this might actually
1843 * hide problems like creating a child pool of a pool
1844 * belonging to another thread.
1846 if ((rv = apr_thread_mutex_create(&pool->mutex,
1847 APR_THREAD_MUTEX_NESTED, pool)) != APR_SUCCESS) {
1851 #endif /* APR_HAS_THREADS */
1856 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE)
1857 apr_pool_log_event(pool, "CREATE", file_line, 1);
1858 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) */
1864 * "Print" functions (debug)
1867 struct psprintf_data {
1868 apr_vformatter_buff_t vbuff;
1873 static int psprintf_flush(apr_vformatter_buff_t *vbuff)
1875 struct psprintf_data *ps = (struct psprintf_data *)vbuff;
1878 size = ps->vbuff.curpos - ps->mem;
1881 if ((ps->mem = realloc(ps->mem, ps->size)) == NULL)
1884 ps->vbuff.curpos = ps->mem + size;
1885 ps->vbuff.endpos = ps->mem + ps->size - 1;
1890 APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list ap)
1892 struct psprintf_data ps;
1895 apr_pool_check_integrity(pool);
1898 ps.mem = malloc(ps.size);
1899 ps.vbuff.curpos = ps.mem;
1901 /* Save a byte for the NUL terminator */
1902 ps.vbuff.endpos = ps.mem + ps.size - 1;
1904 if (apr_vformatter(psprintf_flush, &ps.vbuff, fmt, ap) == -1) {
1906 pool->abort_fn(APR_ENOMEM);
1911 *ps.vbuff.curpos++ = '\0';
1917 if (node == NULL || node->index == 64) {
1918 if ((node = malloc(SIZEOF_DEBUG_NODE_T)) == NULL) {
1920 pool->abort_fn(APR_ENOMEM);
1925 node->next = pool->nodes;
1930 node->beginp[node->index] = ps.mem;
1931 node->endp[node->index] = ps.mem + ps.size;
1942 APR_DECLARE(void) apr_pool_join(apr_pool_t *p, apr_pool_t *sub)
1945 if (sub->parent != p) {
1952 static int pool_find(apr_pool_t *pool, void *data)
1954 void **pmem = (void **)data;
1961 for (index = 0; index < node->index; index++) {
1962 if (node->beginp[index] <= *pmem
1963 && node->endp[index] > *pmem) {
1975 APR_DECLARE(apr_pool_t *) apr_pool_find(const void *mem)
1977 void *pool = (void *)mem;
1979 if (apr_pool_walk_tree(global_pool, pool_find, &pool))
1985 static int pool_num_bytes(apr_pool_t *pool, void *data)
1987 apr_size_t *psize = (apr_size_t *)data;
1994 for (index = 0; index < node->index; index++) {
1995 *psize += (char *)node->endp[index] - (char *)node->beginp[index];
2004 APR_DECLARE(apr_size_t) apr_pool_num_bytes(apr_pool_t *pool, int recurse)
2006 apr_size_t size = 0;
2009 pool_num_bytes(pool, &size);
2014 apr_pool_walk_tree(pool, pool_num_bytes, &size);
2019 APR_DECLARE(void) apr_pool_lock(apr_pool_t *pool, int flag)
2023 #endif /* !APR_POOL_DEBUG */
2026 void netware_pool_proc_cleanup ()
2028 apr_pool_t *pool = global_pool->child;
2029 apr_os_proc_t owner_proc = (apr_os_proc_t)getnlmhandle();
2032 if (pool->owner_proc == owner_proc) {
2033 apr_pool_destroy (pool);
2034 pool = global_pool->child;
2037 pool = pool->sibling;
2042 #endif /* defined(NETWARE) */
2046 * "Print" functions (common)
2049 APR_DECLARE_NONSTD(char *) apr_psprintf(apr_pool_t *p, const char *fmt, ...)
2055 res = apr_pvsprintf(p, fmt, ap);
2064 APR_DECLARE(void) apr_pool_abort_set(apr_abortfunc_t abort_fn,
2067 pool->abort_fn = abort_fn;
2070 APR_DECLARE(apr_abortfunc_t) apr_pool_abort_get(apr_pool_t *pool)
2072 return pool->abort_fn;
2075 APR_DECLARE(apr_pool_t *) apr_pool_parent_get(apr_pool_t *pool)
2078 /* On NetWare, don't return the global_pool, return the application pool
2079 as the top most pool */
2080 if (pool->parent == global_pool)
2084 return pool->parent;
2087 APR_DECLARE(apr_allocator_t *) apr_pool_allocator_get(apr_pool_t *pool)
2089 return pool->allocator;
2092 /* return TRUE if a is an ancestor of b
2093 * NULL is considered an ancestor of all pools
2095 APR_DECLARE(int) apr_pool_is_ancestor(apr_pool_t *a, apr_pool_t *b)
2101 /* Find the pool with the longest lifetime guaranteed by the
2118 APR_DECLARE(void) apr_pool_tag(apr_pool_t *pool, const char *tag)
2125 * User data management
2128 APR_DECLARE(apr_status_t) apr_pool_userdata_set(const void *data, const char *key,
2129 apr_status_t (*cleanup) (void *),
2133 apr_pool_check_integrity(pool);
2134 #endif /* APR_POOL_DEBUG */
2136 if (pool->user_data == NULL)
2137 pool->user_data = apr_hash_make(pool);
2139 if (apr_hash_get(pool->user_data, key, APR_HASH_KEY_STRING) == NULL) {
2140 char *new_key = apr_pstrdup(pool, key);
2141 apr_hash_set(pool->user_data, new_key, APR_HASH_KEY_STRING, data);
2144 apr_hash_set(pool->user_data, key, APR_HASH_KEY_STRING, data);
2148 apr_pool_cleanup_register(pool, data, cleanup, cleanup);
2153 APR_DECLARE(apr_status_t) apr_pool_userdata_setn(const void *data,
2155 apr_status_t (*cleanup)(void *),
2159 apr_pool_check_integrity(pool);
2160 #endif /* APR_POOL_DEBUG */
2162 if (pool->user_data == NULL)
2163 pool->user_data = apr_hash_make(pool);
2165 apr_hash_set(pool->user_data, key, APR_HASH_KEY_STRING, data);
2168 apr_pool_cleanup_register(pool, data, cleanup, cleanup);
2173 APR_DECLARE(apr_status_t) apr_pool_userdata_get(void **data, const char *key,
2177 apr_pool_check_integrity(pool);
2178 #endif /* APR_POOL_DEBUG */
2180 if (pool->user_data == NULL) {
2184 *data = apr_hash_get(pool->user_data, key, APR_HASH_KEY_STRING);
2196 struct cleanup_t *next;
2198 apr_status_t (*plain_cleanup_fn)(void *data);
2199 apr_status_t (*child_cleanup_fn)(void *data);
2202 APR_DECLARE(void) apr_pool_cleanup_register(apr_pool_t *p, const void *data,
2203 apr_status_t (*plain_cleanup_fn)(void *data),
2204 apr_status_t (*child_cleanup_fn)(void *data))
2209 apr_pool_check_integrity(p);
2210 #endif /* APR_POOL_DEBUG */
2213 if (p->free_cleanups) {
2214 /* reuse a cleanup structure */
2215 c = p->free_cleanups;
2216 p->free_cleanups = c->next;
2218 c = apr_palloc(p, sizeof(cleanup_t));
2221 c->plain_cleanup_fn = plain_cleanup_fn;
2222 c->child_cleanup_fn = child_cleanup_fn;
2223 c->next = p->cleanups;
2228 APR_DECLARE(void) apr_pool_pre_cleanup_register(apr_pool_t *p, const void *data,
2229 apr_status_t (*plain_cleanup_fn)(void *data))
2234 apr_pool_check_integrity(p);
2235 #endif /* APR_POOL_DEBUG */
2238 if (p->free_cleanups) {
2239 /* reuse a cleanup structure */
2240 c = p->free_cleanups;
2241 p->free_cleanups = c->next;
2243 c = apr_palloc(p, sizeof(cleanup_t));
2246 c->plain_cleanup_fn = plain_cleanup_fn;
2247 c->next = p->pre_cleanups;
2248 p->pre_cleanups = c;
2252 APR_DECLARE(void) apr_pool_cleanup_kill(apr_pool_t *p, const void *data,
2253 apr_status_t (*cleanup_fn)(void *))
2255 cleanup_t *c, **lastp;
2258 apr_pool_check_integrity(p);
2259 #endif /* APR_POOL_DEBUG */
2265 lastp = &p->cleanups;
2268 /* Some cheap loop detection to catch a corrupt list: */
2270 || (c->next && c == c->next->next)
2271 || (c->next && c->next->next && c == c->next->next->next)) {
2276 if (c->data == data && c->plain_cleanup_fn == cleanup_fn) {
2278 /* move to freelist */
2279 c->next = p->free_cleanups;
2280 p->free_cleanups = c;
2288 /* Remove any pre-cleanup as well */
2289 c = p->pre_cleanups;
2290 lastp = &p->pre_cleanups;
2293 /* Some cheap loop detection to catch a corrupt list: */
2295 || (c->next && c == c->next->next)
2296 || (c->next && c->next->next && c == c->next->next->next)) {
2301 if (c->data == data && c->plain_cleanup_fn == cleanup_fn) {
2303 /* move to freelist */
2304 c->next = p->free_cleanups;
2305 p->free_cleanups = c;
2315 APR_DECLARE(void) apr_pool_child_cleanup_set(apr_pool_t *p, const void *data,
2316 apr_status_t (*plain_cleanup_fn)(void *),
2317 apr_status_t (*child_cleanup_fn)(void *))
2322 apr_pool_check_integrity(p);
2323 #endif /* APR_POOL_DEBUG */
2330 if (c->data == data && c->plain_cleanup_fn == plain_cleanup_fn) {
2331 c->child_cleanup_fn = child_cleanup_fn;
2339 APR_DECLARE(apr_status_t) apr_pool_cleanup_run(apr_pool_t *p, void *data,
2340 apr_status_t (*cleanup_fn)(void *))
2342 apr_pool_cleanup_kill(p, data, cleanup_fn);
2343 return (*cleanup_fn)(data);
2346 static void run_cleanups(cleanup_t **cref)
2348 cleanup_t *c = *cref;
2352 (*c->plain_cleanup_fn)((void *)c->data);
2357 #if !defined(WIN32) && !defined(OS2)
2359 static void run_child_cleanups(cleanup_t **cref)
2361 cleanup_t *c = *cref;
2365 (*c->child_cleanup_fn)((void *)c->data);
2370 static void cleanup_pool_for_exec(apr_pool_t *p)
2372 run_child_cleanups(&p->cleanups);
2374 for (p = p->child; p; p = p->sibling)
2375 cleanup_pool_for_exec(p);
2378 APR_DECLARE(void) apr_pool_cleanup_for_exec(void)
2380 cleanup_pool_for_exec(global_pool);
2383 #else /* !defined(WIN32) && !defined(OS2) */
2385 APR_DECLARE(void) apr_pool_cleanup_for_exec(void)
2388 * Don't need to do anything on NT or OS/2, because
2389 * these platforms will spawn the new process - not
2390 * fork for exec. All handles that are not inheritable,
2391 * will be automajically closed. The only problem is
2392 * with file handles that are open, but there isn't
2393 * much that can be done about that (except if the
2394 * child decides to go out and close them, or the
2395 * developer quits opening them shared)
2400 #endif /* !defined(WIN32) && !defined(OS2) */
2402 APR_DECLARE_NONSTD(apr_status_t) apr_pool_cleanup_null(void *data)
2404 /* do nothing cleanup routine */
2408 /* Subprocesses don't use the generic cleanup interface because
2409 * we don't want multiple subprocesses to result in multiple
2410 * three-second pauses; the subprocesses have to be "freed" all
2411 * at once. If other resources are introduced with the same property,
2412 * we might want to fold support for that into the generic interface.
2413 * For now, it's a special case.
2415 APR_DECLARE(void) apr_pool_note_subprocess(apr_pool_t *pool, apr_proc_t *proc,
2416 apr_kill_conditions_e how)
2418 struct process_chain *pc = apr_palloc(pool, sizeof(struct process_chain));
2422 pc->next = pool->subprocesses;
2423 pool->subprocesses = pc;
2426 static void free_proc_chain(struct process_chain *procs)
2428 /* Dispose of the subprocesses we've spawned off in the course of
2429 * whatever it was we're cleaning up now. This may involve killing
2430 * some of them off...
2432 struct process_chain *pc;
2433 int need_timeout = 0;
2434 apr_time_t timeout_interval;
2437 return; /* No work. Whew! */
2439 /* First, check to see if we need to do the SIGTERM, sleep, SIGKILL
2440 * dance with any of the processes we're cleaning up. If we've got
2441 * any kill-on-sight subprocesses, ditch them now as well, so they
2442 * don't waste any more cycles doing whatever it is that they shouldn't
2446 #ifndef NEED_WAITPID
2447 /* Pick up all defunct processes */
2448 for (pc = procs; pc; pc = pc->next) {
2449 if (apr_proc_wait(pc->proc, NULL, NULL, APR_NOWAIT) != APR_CHILD_NOTDONE)
2450 pc->kill_how = APR_KILL_NEVER;
2452 #endif /* !defined(NEED_WAITPID) */
2454 for (pc = procs; pc; pc = pc->next) {
2456 if ((pc->kill_how == APR_KILL_AFTER_TIMEOUT)
2457 || (pc->kill_how == APR_KILL_ONLY_ONCE)) {
2459 * Subprocess may be dead already. Only need the timeout if not.
2460 * Note: apr_proc_kill on Windows is TerminateProcess(), which is
2461 * similar to a SIGKILL, so always give the process a timeout
2462 * under Windows before killing it.
2464 if (apr_proc_kill(pc->proc, SIGTERM) == APR_SUCCESS)
2467 else if (pc->kill_how == APR_KILL_ALWAYS) {
2468 #else /* WIN32 knows only one fast, clean method of killing processes today */
2469 if (pc->kill_how != APR_KILL_NEVER) {
2471 pc->kill_how = APR_KILL_ALWAYS;
2473 apr_proc_kill(pc->proc, SIGKILL);
2477 /* Sleep only if we have to. The sleep algorithm grows
2478 * by a factor of two on each iteration. TIMEOUT_INTERVAL
2479 * is equal to TIMEOUT_USECS / 64.
2482 timeout_interval = TIMEOUT_INTERVAL;
2483 apr_sleep(timeout_interval);
2486 /* check the status of the subprocesses */
2488 for (pc = procs; pc; pc = pc->next) {
2489 if (pc->kill_how == APR_KILL_AFTER_TIMEOUT) {
2490 if (apr_proc_wait(pc->proc, NULL, NULL, APR_NOWAIT)
2491 == APR_CHILD_NOTDONE)
2492 need_timeout = 1; /* subprocess is still active */
2494 pc->kill_how = APR_KILL_NEVER; /* subprocess has exited */
2498 if (timeout_interval >= TIMEOUT_USECS) {
2501 apr_sleep(timeout_interval);
2502 timeout_interval *= 2;
2504 } while (need_timeout);
2507 /* OK, the scripts we just timed out for have had a chance to clean up
2508 * --- now, just get rid of them, and also clean up the system accounting
2511 for (pc = procs; pc; pc = pc->next) {
2512 if (pc->kill_how == APR_KILL_AFTER_TIMEOUT)
2513 apr_proc_kill(pc->proc, SIGKILL);
2516 /* Now wait for all the signaled processes to die */
2517 for (pc = procs; pc; pc = pc->next) {
2518 if (pc->kill_how != APR_KILL_NEVER)
2519 (void)apr_proc_wait(pc->proc, NULL, NULL, APR_WAIT);
2525 * Pool creation/destruction stubs, for people who are running
2526 * mixed release/debug enviroments.
2530 APR_DECLARE(void *) apr_palloc_debug(apr_pool_t *pool, apr_size_t size,
2531 const char *file_line)
2533 return apr_palloc(pool, size);
2536 APR_DECLARE(void *) apr_pcalloc_debug(apr_pool_t *pool, apr_size_t size,
2537 const char *file_line)
2539 return apr_pcalloc(pool, size);
2542 APR_DECLARE(void) apr_pool_clear_debug(apr_pool_t *pool,
2543 const char *file_line)
2545 apr_pool_clear(pool);
2548 APR_DECLARE(void) apr_pool_destroy_debug(apr_pool_t *pool,
2549 const char *file_line)
2551 apr_pool_destroy(pool);
2554 APR_DECLARE(apr_status_t) apr_pool_create_ex_debug(apr_pool_t **newpool,
2556 apr_abortfunc_t abort_fn,
2557 apr_allocator_t *allocator,
2558 const char *file_line)
2560 return apr_pool_create_ex(newpool, parent, abort_fn, allocator);
2563 APR_DECLARE(apr_status_t) apr_pool_create_core_ex_debug(apr_pool_t **newpool,
2564 apr_abortfunc_t abort_fn,
2565 apr_allocator_t *allocator,
2566 const char *file_line)
2568 return apr_pool_create_unmanaged_ex(newpool, abort_fn, allocator);
2571 APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex_debug(apr_pool_t **newpool,
2572 apr_abortfunc_t abort_fn,
2573 apr_allocator_t *allocator,
2574 const char *file_line)
2576 return apr_pool_create_unmanaged_ex(newpool, abort_fn, allocator);
2579 #else /* APR_POOL_DEBUG */
2582 APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t size);
2584 APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t size)
2586 return apr_palloc_debug(pool, size, "undefined");
2590 APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size);
2592 APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size)
2594 return apr_pcalloc_debug(pool, size, "undefined");
2597 #undef apr_pool_clear
2598 APR_DECLARE(void) apr_pool_clear(apr_pool_t *pool);
2600 APR_DECLARE(void) apr_pool_clear(apr_pool_t *pool)
2602 apr_pool_clear_debug(pool, "undefined");
2605 #undef apr_pool_destroy
2606 APR_DECLARE(void) apr_pool_destroy(apr_pool_t *pool);
2608 APR_DECLARE(void) apr_pool_destroy(apr_pool_t *pool)
2610 apr_pool_destroy_debug(pool, "undefined");
2613 #undef apr_pool_create_ex
2614 APR_DECLARE(apr_status_t) apr_pool_create_ex(apr_pool_t **newpool,
2616 apr_abortfunc_t abort_fn,
2617 apr_allocator_t *allocator);
2619 APR_DECLARE(apr_status_t) apr_pool_create_ex(apr_pool_t **newpool,
2621 apr_abortfunc_t abort_fn,
2622 apr_allocator_t *allocator)
2624 return apr_pool_create_ex_debug(newpool, parent,
2625 abort_fn, allocator,
2629 #undef apr_pool_create_core_ex
2630 APR_DECLARE(apr_status_t) apr_pool_create_core_ex(apr_pool_t **newpool,
2631 apr_abortfunc_t abort_fn,
2632 apr_allocator_t *allocator);
2634 APR_DECLARE(apr_status_t) apr_pool_create_core_ex(apr_pool_t **newpool,
2635 apr_abortfunc_t abort_fn,
2636 apr_allocator_t *allocator)
2638 return apr_pool_create_unmanaged_ex_debug(newpool, abort_fn,
2639 allocator, "undefined");
2642 #undef apr_pool_create_unmanaged_ex
2643 APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex(apr_pool_t **newpool,
2644 apr_abortfunc_t abort_fn,
2645 apr_allocator_t *allocator);
2647 APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex(apr_pool_t **newpool,
2648 apr_abortfunc_t abort_fn,
2649 apr_allocator_t *allocator)
2651 return apr_pool_create_unmanaged_ex_debug(newpool, abort_fn,
2652 allocator, "undefined");
2655 #endif /* APR_POOL_DEBUG */