1 /* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements. See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License. You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "apr_private.h"
20 #include "apr_atomic.h"
21 #include "apr_portable.h" /* for get_os_proc */
22 #include "apr_strings.h"
23 #include "apr_general.h"
24 #include "apr_pools.h"
25 #include "apr_allocator.h"
27 #include "apr_thread_mutex.h"
30 #include "apr_support.h"
31 #define APR_WANT_MEMFUNC
36 #include <stdlib.h> /* for malloc, free and abort */
40 #include <unistd.h> /* for getpid and sysconf */
43 #if APR_ALLOCATOR_GUARD_PAGES && !APR_ALLOCATOR_USES_MMAP
44 #define APR_ALLOCATOR_USES_MMAP 1
47 #if APR_ALLOCATOR_USES_MMAP
55 #define REDZONE APR_ALIGN_DEFAULT(8)
56 int apr_running_on_valgrind = 0;
57 #define APR_IF_VALGRIND(x) \
58 do { if (apr_running_on_valgrind) { x; } } while (0)
62 #define APR_IF_VALGRIND(x)
64 #endif /* HAVE_VALGRIND */
66 #define APR_VALGRIND_NOACCESS(addr_, size_) \
67 APR_IF_VALGRIND(VALGRIND_MAKE_MEM_NOACCESS(addr_, size_))
68 #define APR_VALGRIND_UNDEFINED(addr_, size_) \
69 APR_IF_VALGRIND(VALGRIND_MAKE_MEM_UNDEFINED(addr_, size_))
72 #if APR_POOL_CONCURRENCY_CHECK && !APR_HAS_THREADS
73 #error pool-concurrency-check does not make sense without threads
81 * XXX: This is not optimal when using --enable-allocator-uses-mmap on
82 * XXX: machines with large pagesize, but currently the sink is assumed
83 * XXX: to be index 0, so MIN_ALLOC must be at least two pages.
85 #define MIN_ALLOC (2 * BOUNDARY_SIZE)
88 #if APR_ALLOCATOR_USES_MMAP && defined(_SC_PAGESIZE)
89 static unsigned int boundary_index;
90 static unsigned int boundary_size;
91 #define BOUNDARY_INDEX boundary_index
92 #define BOUNDARY_SIZE boundary_size
94 #define BOUNDARY_INDEX 12
95 #define BOUNDARY_SIZE (1 << BOUNDARY_INDEX)
98 #if APR_ALLOCATOR_GUARD_PAGES
99 #if defined(_SC_PAGESIZE)
100 #define GUARDPAGE_SIZE boundary_size
102 #error Cannot determine page size
103 #endif /* _SC_PAGESIZE */
105 #define GUARDPAGE_SIZE 0
106 #endif /* APR_ALLOCATOR_GUARD_PAGES */
109 * Timing constants for killing subprocesses
110 * There is a total 3-second delay between sending a SIGINT
111 * and sending of the final SIGKILL.
112 * TIMEOUT_INTERVAL should be set to TIMEOUT_USECS / 64
113 * for the exponetial timeout alogrithm.
115 #define TIMEOUT_USECS 3000000
116 #define TIMEOUT_INTERVAL 46875
121 * @note The max_free_index and current_free_index fields are not really
122 * indices, but quantities of BOUNDARY_SIZE big memory blocks.
125 struct apr_allocator_t {
126 /** largest used index into free[], always < MAX_INDEX */
127 apr_size_t max_index;
128 /** Total size (in BOUNDARY_SIZE multiples) of unused memory before
129 * blocks are given back. @see apr_allocator_max_free_set().
130 * @note Initialized to APR_ALLOCATOR_MAX_FREE_UNLIMITED,
131 * which means to never give back blocks.
133 apr_size_t max_free_index;
135 * Memory size (in BOUNDARY_SIZE multiples) that currently must be freed
136 * before blocks are given back. Range: 0..max_free_index
138 apr_size_t current_free_index;
140 apr_thread_mutex_t *mutex;
141 #endif /* APR_HAS_THREADS */
144 * Lists of free nodes. Slot 0 is used for oversized nodes,
145 * and the slots 1..MAX_INDEX-1 contain nodes of sizes
146 * (i+1) * BOUNDARY_SIZE. Example for BOUNDARY_INDEX == 12:
147 * slot 0: nodes larger than 81920
151 * slot 19: size 81920
153 apr_memnode_t *free[MAX_INDEX];
156 #define SIZEOF_ALLOCATOR_T APR_ALIGN_DEFAULT(sizeof(apr_allocator_t))
163 APR_DECLARE(apr_status_t) apr_allocator_create(apr_allocator_t **allocator)
165 apr_allocator_t *new_allocator;
169 if ((new_allocator = malloc(SIZEOF_ALLOCATOR_T)) == NULL)
172 memset(new_allocator, 0, SIZEOF_ALLOCATOR_T);
173 new_allocator->max_free_index = APR_ALLOCATOR_MAX_FREE_UNLIMITED;
175 *allocator = new_allocator;
180 APR_DECLARE(void) apr_allocator_destroy(apr_allocator_t *allocator)
183 apr_memnode_t *node, **ref;
185 for (index = 0; index < MAX_INDEX; index++) {
186 ref = &allocator->free[index];
187 while ((node = *ref) != NULL) {
189 #if APR_ALLOCATOR_USES_MMAP
190 munmap((char *)node - GUARDPAGE_SIZE,
191 2 * GUARDPAGE_SIZE + ((node->index+1) << BOUNDARY_INDEX));
202 APR_DECLARE(void) apr_allocator_mutex_set(apr_allocator_t *allocator,
203 apr_thread_mutex_t *mutex)
205 allocator->mutex = mutex;
208 APR_DECLARE(apr_thread_mutex_t *) apr_allocator_mutex_get(
209 apr_allocator_t *allocator)
211 return allocator->mutex;
213 #endif /* APR_HAS_THREADS */
215 APR_DECLARE(void) apr_allocator_owner_set(apr_allocator_t *allocator,
218 allocator->owner = pool;
221 APR_DECLARE(apr_pool_t *) apr_allocator_owner_get(apr_allocator_t *allocator)
223 return allocator->owner;
226 APR_DECLARE(void) apr_allocator_max_free_set(apr_allocator_t *allocator,
229 apr_size_t max_free_index;
230 apr_size_t size = in_size;
233 apr_thread_mutex_t *mutex;
235 mutex = apr_allocator_mutex_get(allocator);
237 apr_thread_mutex_lock(mutex);
238 #endif /* APR_HAS_THREADS */
240 max_free_index = APR_ALIGN(size, BOUNDARY_SIZE) >> BOUNDARY_INDEX;
241 allocator->current_free_index += max_free_index;
242 allocator->current_free_index -= allocator->max_free_index;
243 allocator->max_free_index = max_free_index;
244 if (allocator->current_free_index > max_free_index)
245 allocator->current_free_index = max_free_index;
249 apr_thread_mutex_unlock(mutex);
254 apr_size_t allocator_align(apr_size_t in_size)
256 apr_size_t size = in_size;
258 /* Round up the block size to the next boundary, but always
259 * allocate at least a certain size (MIN_ALLOC).
261 size = APR_ALIGN(size + APR_MEMNODE_T_SIZE, BOUNDARY_SIZE);
262 if (size < in_size) {
265 if (size < MIN_ALLOC) {
272 APR_DECLARE(apr_size_t) apr_allocator_align(apr_allocator_t *allocator,
276 return allocator_align(size);
280 apr_memnode_t *allocator_alloc(apr_allocator_t *allocator, apr_size_t in_size)
282 apr_memnode_t *node, **ref;
283 apr_size_t max_index;
284 apr_size_t size, i, index;
286 /* Round up the block size to the next boundary, but always
287 * allocate at least a certain size (MIN_ALLOC).
289 size = allocator_align(in_size);
294 /* Find the index for this node size by
295 * dividing its size by the boundary size
297 index = (size >> BOUNDARY_INDEX) - 1;
299 if (index > APR_UINT32_MAX) {
303 /* First see if there are any nodes in the area we know
304 * our node will fit into.
306 if (index <= allocator->max_index) {
308 if (allocator->mutex)
309 apr_thread_mutex_lock(allocator->mutex);
310 #endif /* APR_HAS_THREADS */
312 /* Walk the free list to see if there are
313 * any nodes on it of the requested size
315 * NOTE: an optimization would be to check
316 * allocator->free[index] first and if no
317 * node is present, directly use
318 * allocator->free[max_index]. This seems
319 * like overkill though and could cause
322 max_index = allocator->max_index;
323 ref = &allocator->free[index];
325 while (*ref == NULL && i < max_index) {
330 if ((node = *ref) != NULL) {
331 /* If we have found a node and it doesn't have any
332 * nodes waiting in line behind it _and_ we are on
333 * the highest available index, find the new highest
336 if ((*ref = node->next) == NULL && i >= max_index) {
341 while (*ref == NULL && max_index);
343 allocator->max_index = max_index;
346 allocator->current_free_index += node->index + 1;
347 if (allocator->current_free_index > allocator->max_free_index)
348 allocator->current_free_index = allocator->max_free_index;
351 if (allocator->mutex)
352 apr_thread_mutex_unlock(allocator->mutex);
353 #endif /* APR_HAS_THREADS */
359 if (allocator->mutex)
360 apr_thread_mutex_unlock(allocator->mutex);
361 #endif /* APR_HAS_THREADS */
364 /* If we found nothing, seek the sink (at index 0), if
367 else if (allocator->free[0]) {
369 if (allocator->mutex)
370 apr_thread_mutex_lock(allocator->mutex);
371 #endif /* APR_HAS_THREADS */
373 /* Walk the free list to see if there are
374 * any nodes on it of the requested size
376 ref = &allocator->free[0];
377 while ((node = *ref) != NULL && index > node->index)
383 allocator->current_free_index += node->index + 1;
384 if (allocator->current_free_index > allocator->max_free_index)
385 allocator->current_free_index = allocator->max_free_index;
388 if (allocator->mutex)
389 apr_thread_mutex_unlock(allocator->mutex);
390 #endif /* APR_HAS_THREADS */
396 if (allocator->mutex)
397 apr_thread_mutex_unlock(allocator->mutex);
398 #endif /* APR_HAS_THREADS */
401 /* If we haven't got a suitable node, malloc a new one
404 #if APR_ALLOCATOR_GUARD_PAGES
405 if ((node = mmap(NULL, size + 2 * GUARDPAGE_SIZE, PROT_NONE,
406 MAP_PRIVATE|MAP_ANON, -1, 0)) == MAP_FAILED)
407 #elif APR_ALLOCATOR_USES_MMAP
408 if ((node = mmap(NULL, size, PROT_READ|PROT_WRITE,
409 MAP_PRIVATE|MAP_ANON, -1, 0)) == MAP_FAILED)
411 if ((node = malloc(size)) == NULL)
415 #if APR_ALLOCATOR_GUARD_PAGES
416 node = (apr_memnode_t *)((char *)node + GUARDPAGE_SIZE);
417 if (mprotect(node, size, PROT_READ|PROT_WRITE) != 0) {
418 munmap((char *)node - GUARDPAGE_SIZE, size + 2 * GUARDPAGE_SIZE);
423 node->endp = (char *)node + size;
427 node->first_avail = (char *)node + APR_MEMNODE_T_SIZE;
429 APR_VALGRIND_UNDEFINED(node->first_avail, size - APR_MEMNODE_T_SIZE);
435 void allocator_free(apr_allocator_t *allocator, apr_memnode_t *node)
437 apr_memnode_t *next, *freelist = NULL;
438 apr_size_t index, max_index;
439 apr_size_t max_free_index, current_free_index;
442 if (allocator->mutex)
443 apr_thread_mutex_lock(allocator->mutex);
444 #endif /* APR_HAS_THREADS */
446 max_index = allocator->max_index;
447 max_free_index = allocator->max_free_index;
448 current_free_index = allocator->current_free_index;
450 /* Walk the list of submitted nodes and free them one by one,
451 * shoving them in the right 'size' buckets as we go.
457 APR_VALGRIND_NOACCESS((char *)node + APR_MEMNODE_T_SIZE,
458 (node->index+1) << BOUNDARY_INDEX);
460 if (max_free_index != APR_ALLOCATOR_MAX_FREE_UNLIMITED
461 && index + 1 > current_free_index) {
462 node->next = freelist;
465 else if (index < MAX_INDEX) {
466 /* Add the node to the appropriate 'size' bucket. Adjust
467 * the max_index when appropriate.
469 if ((node->next = allocator->free[index]) == NULL
470 && index > max_index) {
473 allocator->free[index] = node;
474 if (current_free_index >= index + 1)
475 current_free_index -= index + 1;
477 current_free_index = 0;
480 /* This node is too large to keep in a specific size bucket,
481 * just add it to the sink (at index 0).
483 node->next = allocator->free[0];
484 allocator->free[0] = node;
485 if (current_free_index >= index + 1)
486 current_free_index -= index + 1;
488 current_free_index = 0;
490 } while ((node = next) != NULL);
492 allocator->max_index = max_index;
493 allocator->current_free_index = current_free_index;
496 if (allocator->mutex)
497 apr_thread_mutex_unlock(allocator->mutex);
498 #endif /* APR_HAS_THREADS */
500 while (freelist != NULL) {
502 freelist = node->next;
503 #if APR_ALLOCATOR_USES_MMAP
504 munmap((char *)node - GUARDPAGE_SIZE,
505 2 * GUARDPAGE_SIZE + ((node->index+1) << BOUNDARY_INDEX));
512 APR_DECLARE(apr_memnode_t *) apr_allocator_alloc(apr_allocator_t *allocator,
515 return allocator_alloc(allocator, size);
518 APR_DECLARE(void) apr_allocator_free(apr_allocator_t *allocator,
521 allocator_free(allocator, node);
530 #define APR_POOL_DEBUG_GENERAL 0x01
531 #define APR_POOL_DEBUG_VERBOSE 0x02
532 #define APR_POOL_DEBUG_LIFETIME 0x04
533 #define APR_POOL_DEBUG_OWNER 0x08
534 #define APR_POOL_DEBUG_VERBOSE_ALLOC 0x10
536 #define APR_POOL_DEBUG_VERBOSE_ALL (APR_POOL_DEBUG_VERBOSE \
537 | APR_POOL_DEBUG_VERBOSE_ALLOC)
544 typedef struct cleanup_t cleanup_t;
546 /** A list of processes */
547 struct process_chain {
548 /** The process ID */
550 apr_kill_conditions_e kill_how;
551 /** The next process in the list */
552 struct process_chain *next;
558 typedef struct debug_node_t debug_node_t;
560 struct debug_node_t {
567 #define SIZEOF_DEBUG_NODE_T APR_ALIGN_DEFAULT(sizeof(debug_node_t))
569 #endif /* APR_POOL_DEBUG */
571 /* The ref field in the apr_pool_t struct holds a
572 * pointer to the pointer referencing this pool.
573 * It is used for parent, child, sibling management.
574 * Look at apr_pool_create_ex() and apr_pool_destroy()
575 * to see how it is used.
583 cleanup_t *free_cleanups;
584 apr_allocator_t *allocator;
585 struct process_chain *subprocesses;
586 apr_abortfunc_t abort_fn;
587 apr_hash_t *user_data;
591 apr_memnode_t *active;
592 apr_memnode_t *self; /* The node containing the pool itself */
593 char *self_first_avail;
595 #else /* APR_POOL_DEBUG */
596 apr_pool_t *joined; /* the caller has guaranteed that this pool
597 * will survive as long as ->joined */
599 const char *file_line;
600 apr_uint32_t creation_flags;
601 unsigned int stat_alloc;
602 unsigned int stat_total_alloc;
603 unsigned int stat_clear;
605 apr_os_thread_t owner;
606 apr_thread_mutex_t *mutex;
607 #endif /* APR_HAS_THREADS */
608 #endif /* APR_POOL_DEBUG */
610 apr_os_proc_t owner_proc;
611 #endif /* defined(NETWARE) */
612 cleanup_t *pre_cleanups;
613 #if APR_POOL_CONCURRENCY_CHECK
618 volatile apr_uint32_t in_use;
619 apr_os_thread_t in_use_by;
620 #endif /* APR_POOL_CONCURRENCY_CHECK */
623 #define SIZEOF_POOL_T APR_ALIGN_DEFAULT(sizeof(apr_pool_t))
630 static apr_byte_t apr_pools_initialized = 0;
631 static apr_pool_t *global_pool = NULL;
634 static apr_allocator_t *global_allocator = NULL;
635 #endif /* !APR_POOL_DEBUG */
637 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
638 static apr_file_t *file_stderr = NULL;
639 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
645 static void run_cleanups(cleanup_t **c);
646 static void free_proc_chain(struct process_chain *procs);
649 static void pool_destroy_debug(apr_pool_t *pool, const char *file_line);
657 APR_DECLARE(apr_status_t) apr_pool_initialize(void)
661 if (apr_pools_initialized++)
665 apr_running_on_valgrind = RUNNING_ON_VALGRIND;
668 #if APR_ALLOCATOR_USES_MMAP && defined(_SC_PAGESIZE)
669 boundary_size = sysconf(_SC_PAGESIZE);
671 while ( (1 << boundary_index) < boundary_size)
673 boundary_size = (1 << boundary_index);
676 if ((rv = apr_allocator_create(&global_allocator)) != APR_SUCCESS) {
677 apr_pools_initialized = 0;
681 if ((rv = apr_pool_create_ex(&global_pool, NULL, NULL,
682 global_allocator)) != APR_SUCCESS) {
683 apr_allocator_destroy(global_allocator);
684 global_allocator = NULL;
685 apr_pools_initialized = 0;
689 apr_pool_tag(global_pool, "apr_global_pool");
691 /* This has to happen here because mutexes might be backed by
692 * atomics. It used to be snug and safe in apr_initialize().
694 * Warning: apr_atomic_init() must always be called, by any
695 * means possible, from apr_initialize().
697 if ((rv = apr_atomic_init(global_pool)) != APR_SUCCESS) {
703 apr_thread_mutex_t *mutex;
705 if ((rv = apr_thread_mutex_create(&mutex,
706 APR_THREAD_MUTEX_DEFAULT,
707 global_pool)) != APR_SUCCESS) {
711 apr_allocator_mutex_set(global_allocator, mutex);
713 #endif /* APR_HAS_THREADS */
715 apr_allocator_owner_set(global_allocator, global_pool);
720 APR_DECLARE(void) apr_pool_terminate(void)
722 if (!apr_pools_initialized)
725 if (--apr_pools_initialized)
728 apr_pool_destroy(global_pool); /* This will also destroy the mutex */
731 global_allocator = NULL;
735 /* Node list management helper macros; list_insert() inserts 'node'
737 #define list_insert(node, point) do { \
738 node->ref = point->ref; \
740 node->next = point; \
741 point->ref = &node->next; \
744 /* list_remove() removes 'node' from its list. */
745 #define list_remove(node) do { \
746 *node->ref = node->next; \
747 node->next->ref = node->ref; \
750 /* Returns the amount of free space in the given node. */
751 #define node_free_space(node_) ((apr_size_t)(node_->endp - node_->first_avail))
754 * Helpers to mark pool as in-use/free. Used for finding thread-unsafe
755 * concurrent accesses from different threads.
757 #if APR_POOL_CONCURRENCY_CHECK
759 static const char * const in_use_string[] = { "idle", "in use", "destroyed" };
761 static void pool_concurrency_abort(apr_pool_t *pool, apr_uint32_t new, apr_uint32_t old)
763 fprintf(stderr, "pool concurrency check: pool %p(%s), thread cur %lx "
764 "in use by %lx, state %s -> %s \n",
765 pool, pool->tag, (unsigned long)apr_os_thread_current(),
766 (unsigned long)pool->in_use_by,
767 in_use_string[old], in_use_string[new]);
771 static APR_INLINE void pool_concurrency_set_used(apr_pool_t *pool)
775 old = apr_atomic_cas32(&pool->in_use, IN_USE, IDLE);
778 pool_concurrency_abort(pool, IN_USE, old);
780 pool->in_use_by = apr_os_thread_current();
783 static APR_INLINE void pool_concurrency_set_idle(apr_pool_t *pool)
787 old = apr_atomic_cas32(&pool->in_use, IDLE, IN_USE);
790 pool_concurrency_abort(pool, IDLE, old);
793 static APR_INLINE void pool_concurrency_init(apr_pool_t *pool)
798 static APR_INLINE void pool_concurrency_set_destroyed(apr_pool_t *pool)
802 old = apr_atomic_cas32(&pool->in_use, DESTROYED, IDLE);
805 pool_concurrency_abort(pool, DESTROYED, old);
806 pool->in_use_by = apr_os_thread_current();
809 static APR_INLINE void pool_concurrency_init(apr_pool_t *pool) { }
810 static APR_INLINE void pool_concurrency_set_used(apr_pool_t *pool) { }
811 static APR_INLINE void pool_concurrency_set_idle(apr_pool_t *pool) { }
812 static APR_INLINE void pool_concurrency_set_destroyed(apr_pool_t *pool) { }
813 #endif /* APR_POOL_CONCURRENCY_CHECK */
819 APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t in_size)
821 apr_memnode_t *active, *node;
823 apr_size_t size, free_index;
825 pool_concurrency_set_used(pool);
826 size = APR_ALIGN_DEFAULT(in_size);
828 if (apr_running_on_valgrind)
831 if (size < in_size) {
832 pool_concurrency_set_idle(pool);
834 pool->abort_fn(APR_ENOMEM);
838 active = pool->active;
840 /* If the active node has enough bytes left, use it. */
841 if (size <= node_free_space(active)) {
842 mem = active->first_avail;
843 active->first_avail += size;
848 if (size <= node_free_space(node)) {
852 if ((node = allocator_alloc(pool->allocator, size)) == NULL) {
853 pool_concurrency_set_idle(pool);
855 pool->abort_fn(APR_ENOMEM);
861 node->free_index = 0;
863 mem = node->first_avail;
864 node->first_avail += size;
866 list_insert(node, active);
870 free_index = (APR_ALIGN(active->endp - active->first_avail + 1,
871 BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX;
873 active->free_index = free_index;
875 if (free_index >= node->free_index)
881 while (free_index < node->free_index);
884 list_insert(active, node);
888 if (!apr_running_on_valgrind) {
889 pool_concurrency_set_idle(pool);
893 mem = (char *)mem + REDZONE;
894 VALGRIND_MEMPOOL_ALLOC(pool, mem, in_size);
895 pool_concurrency_set_idle(pool);
899 pool_concurrency_set_idle(pool);
904 /* Provide an implementation of apr_pcalloc for backward compatibility
905 * with code built before apr_pcalloc was a macro
912 APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size);
913 APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size)
917 if ((mem = apr_palloc(pool, size)) != NULL) {
918 memset(mem, 0, size);
926 * Pool creation/destruction
929 APR_DECLARE(void) apr_pool_clear(apr_pool_t *pool)
931 apr_memnode_t *active;
933 /* Run pre destroy cleanups */
934 run_cleanups(&pool->pre_cleanups);
936 pool_concurrency_set_used(pool);
937 pool->pre_cleanups = NULL;
938 pool_concurrency_set_idle(pool);
940 /* Destroy the subpools. The subpools will detach themselves from
941 * this pool thus this loop is safe and easy.
944 apr_pool_destroy(pool->child);
947 run_cleanups(&pool->cleanups);
949 pool_concurrency_set_used(pool);
950 pool->cleanups = NULL;
951 pool->free_cleanups = NULL;
953 /* Free subprocesses */
954 free_proc_chain(pool->subprocesses);
955 pool->subprocesses = NULL;
957 /* Clear the user data. */
958 pool->user_data = NULL;
960 /* Find the node attached to the pool structure, reset it, make
961 * it the active node and free the rest of the nodes.
963 active = pool->active = pool->self;
964 active->first_avail = pool->self_first_avail;
966 APR_IF_VALGRIND(VALGRIND_MEMPOOL_TRIM(pool, pool, 1));
968 if (active->next == active) {
969 pool_concurrency_set_idle(pool);
974 allocator_free(pool->allocator, active->next);
975 active->next = active;
976 active->ref = &active->next;
978 pool_concurrency_set_idle(pool);
981 APR_DECLARE(void) apr_pool_destroy(apr_pool_t *pool)
983 apr_memnode_t *active;
984 apr_allocator_t *allocator;
986 /* Run pre destroy cleanups */
987 run_cleanups(&pool->pre_cleanups);
989 pool_concurrency_set_used(pool);
990 pool->pre_cleanups = NULL;
991 pool_concurrency_set_idle(pool);
993 /* Destroy the subpools. The subpools will detach themselve from
994 * this pool thus this loop is safe and easy.
997 apr_pool_destroy(pool->child);
1000 run_cleanups(&pool->cleanups);
1001 pool_concurrency_set_destroyed(pool);
1003 /* Free subprocesses */
1004 free_proc_chain(pool->subprocesses);
1006 /* Remove the pool from the parents child list */
1009 apr_thread_mutex_t *mutex;
1011 if ((mutex = apr_allocator_mutex_get(pool->parent->allocator)) != NULL)
1012 apr_thread_mutex_lock(mutex);
1013 #endif /* APR_HAS_THREADS */
1015 if ((*pool->ref = pool->sibling) != NULL)
1016 pool->sibling->ref = pool->ref;
1020 apr_thread_mutex_unlock(mutex);
1021 #endif /* APR_HAS_THREADS */
1024 /* Find the block attached to the pool structure. Save a copy of the
1025 * allocator pointer, because the pool struct soon will be no more.
1027 allocator = pool->allocator;
1028 active = pool->self;
1029 *active->ref = NULL;
1032 if (apr_allocator_owner_get(allocator) == pool) {
1033 /* Make sure to remove the lock, since it is highly likely to
1036 apr_allocator_mutex_set(allocator, NULL);
1038 #endif /* APR_HAS_THREADS */
1040 /* Free all the nodes in the pool (including the node holding the
1041 * pool struct), by giving them back to the allocator.
1043 allocator_free(allocator, active);
1045 /* If this pool happens to be the owner of the allocator, free
1046 * everything in the allocator (that includes the pool struct
1047 * and the allocator). Don't worry about destroying the optional mutex
1048 * in the allocator, it will have been destroyed by the cleanup function.
1050 if (apr_allocator_owner_get(allocator) == pool) {
1051 apr_allocator_destroy(allocator);
1053 APR_IF_VALGRIND(VALGRIND_DESTROY_MEMPOOL(pool));
1056 APR_DECLARE(apr_status_t) apr_pool_create_ex(apr_pool_t **newpool,
1058 apr_abortfunc_t abort_fn,
1059 apr_allocator_t *allocator)
1062 apr_memnode_t *node;
1067 parent = global_pool;
1069 /* parent will always be non-NULL here except the first time a
1070 * pool is created, in which case allocator is guaranteed to be
1073 if (!abort_fn && parent)
1074 abort_fn = parent->abort_fn;
1076 if (allocator == NULL)
1077 allocator = parent->allocator;
1079 if ((node = allocator_alloc(allocator,
1080 MIN_ALLOC - APR_MEMNODE_T_SIZE)) == NULL) {
1082 abort_fn(APR_ENOMEM);
1088 node->ref = &node->next;
1091 if (!apr_running_on_valgrind) {
1092 pool = (apr_pool_t *)node->first_avail;
1093 pool->self_first_avail = (char *)pool + SIZEOF_POOL_T;
1096 pool = (apr_pool_t *)(node->first_avail + REDZONE);
1097 pool->self_first_avail = (char *)pool + SIZEOF_POOL_T + 2 * REDZONE;
1098 VALGRIND_MAKE_MEM_NOACCESS(pool->self_first_avail,
1099 node->endp - pool->self_first_avail);
1100 VALGRIND_CREATE_MEMPOOL(pool, REDZONE, 0);
1103 pool = (apr_pool_t *)node->first_avail;
1104 pool->self_first_avail = (char *)pool + SIZEOF_POOL_T;
1106 node->first_avail = pool->self_first_avail;
1108 pool->allocator = allocator;
1109 pool->active = pool->self = node;
1110 pool->abort_fn = abort_fn;
1112 pool->cleanups = NULL;
1113 pool->free_cleanups = NULL;
1114 pool->pre_cleanups = NULL;
1115 pool->subprocesses = NULL;
1116 pool->user_data = NULL;
1120 pool->owner_proc = (apr_os_proc_t)getnlmhandle();
1121 #endif /* defined(NETWARE) */
1123 if ((pool->parent = parent) != NULL) {
1125 apr_thread_mutex_t *mutex;
1127 if ((mutex = apr_allocator_mutex_get(parent->allocator)) != NULL)
1128 apr_thread_mutex_lock(mutex);
1129 #endif /* APR_HAS_THREADS */
1131 if ((pool->sibling = parent->child) != NULL)
1132 pool->sibling->ref = &pool->sibling;
1134 parent->child = pool;
1135 pool->ref = &parent->child;
1139 apr_thread_mutex_unlock(mutex);
1140 #endif /* APR_HAS_THREADS */
1143 pool->sibling = NULL;
1147 pool_concurrency_init(pool);
1154 /* Deprecated. Renamed to apr_pool_create_unmanaged_ex
1156 APR_DECLARE(apr_status_t) apr_pool_create_core_ex(apr_pool_t **newpool,
1157 apr_abortfunc_t abort_fn,
1158 apr_allocator_t *allocator)
1160 return apr_pool_create_unmanaged_ex(newpool, abort_fn, allocator);
1163 APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex(apr_pool_t **newpool,
1164 apr_abortfunc_t abort_fn,
1165 apr_allocator_t *allocator)
1168 apr_memnode_t *node;
1169 apr_allocator_t *pool_allocator;
1173 if (!apr_pools_initialized)
1175 if ((pool_allocator = allocator) == NULL) {
1176 if ((pool_allocator = malloc(SIZEOF_ALLOCATOR_T)) == NULL) {
1178 abort_fn(APR_ENOMEM);
1182 memset(pool_allocator, 0, SIZEOF_ALLOCATOR_T);
1183 pool_allocator->max_free_index = APR_ALLOCATOR_MAX_FREE_UNLIMITED;
1185 if ((node = allocator_alloc(pool_allocator,
1186 MIN_ALLOC - APR_MEMNODE_T_SIZE)) == NULL) {
1188 abort_fn(APR_ENOMEM);
1194 node->ref = &node->next;
1196 pool = (apr_pool_t *)node->first_avail;
1197 node->first_avail = pool->self_first_avail = (char *)pool + SIZEOF_POOL_T;
1199 pool->allocator = pool_allocator;
1200 pool->active = pool->self = node;
1201 pool->abort_fn = abort_fn;
1203 pool->cleanups = NULL;
1204 pool->free_cleanups = NULL;
1205 pool->pre_cleanups = NULL;
1206 pool->subprocesses = NULL;
1207 pool->user_data = NULL;
1209 pool->parent = NULL;
1210 pool->sibling = NULL;
1214 pool->owner_proc = (apr_os_proc_t)getnlmhandle();
1215 #endif /* defined(NETWARE) */
1217 pool_allocator->owner = pool;
1219 pool_concurrency_init(pool);
1230 * apr_psprintf is implemented by writing directly into the current
1231 * block of the pool, starting right at first_avail. If there's
1232 * insufficient room, then a new block is allocated and the earlier
1233 * output is copied over. The new block isn't linked into the pool
1234 * until all the output is done.
1236 * Note that this is completely safe because nothing else can
1237 * allocate in this apr_pool_t while apr_psprintf is running. alarms are
1238 * blocked, and the only thing outside of apr_pools.c that's invoked
1239 * is apr_vformatter -- which was purposefully written to be
1240 * self-contained with no callouts.
1243 struct psprintf_data {
1244 apr_vformatter_buff_t vbuff;
1245 apr_memnode_t *node;
1247 apr_byte_t got_a_new_node;
1248 apr_memnode_t *free;
1251 #define APR_PSPRINTF_MIN_STRINGSIZE 32
1253 static int psprintf_flush(apr_vformatter_buff_t *vbuff)
1255 struct psprintf_data *ps = (struct psprintf_data *)vbuff;
1256 apr_memnode_t *node, *active;
1257 apr_size_t cur_len, size;
1260 apr_size_t free_index;
1264 strp = ps->vbuff.curpos;
1265 cur_len = strp - active->first_avail;
1266 size = cur_len << 1;
1268 /* Make sure that we don't try to use a block that has less
1269 * than APR_PSPRINTF_MIN_STRINGSIZE bytes left in it. This
1270 * also catches the case where size == 0, which would result
1271 * in reusing a block that can't even hold the NUL byte.
1273 if (size < APR_PSPRINTF_MIN_STRINGSIZE)
1274 size = APR_PSPRINTF_MIN_STRINGSIZE;
1276 node = active->next;
1277 if (!ps->got_a_new_node && size <= node_free_space(node)) {
1280 list_insert(node, active);
1282 node->free_index = 0;
1284 pool->active = node;
1286 free_index = (APR_ALIGN(active->endp - active->first_avail + 1,
1287 BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX;
1289 active->free_index = free_index;
1290 node = active->next;
1291 if (free_index < node->free_index) {
1295 while (free_index < node->free_index);
1297 list_remove(active);
1298 list_insert(active, node);
1301 node = pool->active;
1304 if ((node = allocator_alloc(pool->allocator, size)) == NULL)
1307 if (ps->got_a_new_node) {
1308 active->next = ps->free;
1312 ps->got_a_new_node = 1;
1315 APR_VALGRIND_UNDEFINED(node->first_avail,
1316 node->endp - node->first_avail);
1317 memcpy(node->first_avail, active->first_avail, cur_len);
1318 APR_VALGRIND_NOACCESS(active->first_avail,
1319 active->endp - active->first_avail);
1322 ps->vbuff.curpos = node->first_avail + cur_len;
1323 ps->vbuff.endpos = node->endp - 1; /* Save a byte for NUL terminator */
1329 static int add_redzone(int (*flush_func)(apr_vformatter_buff_t *b),
1330 struct psprintf_data *ps)
1332 apr_size_t len = ps->vbuff.curpos - ps->node->first_avail + REDZONE;
1334 while (ps->vbuff.curpos - ps->node->first_avail < len) {
1335 if (ps->vbuff.endpos - ps->node->first_avail >= len)
1336 ps->vbuff.curpos = ps->node->first_avail + len;
1338 ps->vbuff.curpos = ps->vbuff.endpos;
1341 * Prevent valgrind from complaining when psprintf_flush()
1342 * does a memcpy(). The VALGRIND_MEMPOOL_ALLOC() will reset
1343 * the redzone to NOACCESS.
1345 if (ps->vbuff.curpos != ps->node->first_avail)
1346 VALGRIND_MAKE_MEM_DEFINED(ps->node->first_avail,
1347 ps->vbuff.curpos - ps->node->first_avail);
1348 if (ps->vbuff.curpos == ps->vbuff.endpos) {
1349 if (psprintf_flush(&ps->vbuff) == -1)
1357 APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list ap)
1359 struct psprintf_data ps;
1362 apr_memnode_t *active, *node;
1363 apr_size_t free_index;
1365 pool_concurrency_set_used(pool);
1366 ps.node = active = pool->active;
1368 ps.vbuff.curpos = ps.node->first_avail;
1370 /* Save a byte for the NUL terminator */
1371 ps.vbuff.endpos = ps.node->endp - 1;
1372 ps.got_a_new_node = 0;
1375 /* Make sure that the first node passed to apr_vformatter has at least
1376 * room to hold the NUL terminator.
1378 if (ps.node->first_avail == ps.node->endp) {
1379 if (psprintf_flush(&ps.vbuff) == -1)
1383 if (apr_running_on_valgrind) {
1384 if (add_redzone(psprintf_flush, &ps) == -1)
1386 if (!ps.got_a_new_node) {
1387 /* psprintf_flush() has not been called, allow access to our node */
1388 VALGRIND_MAKE_MEM_UNDEFINED(ps.vbuff.curpos,
1389 ps.node->endp - ps.vbuff.curpos);
1392 #endif /* HAVE_VALGRIND */
1394 if (apr_vformatter(psprintf_flush, &ps.vbuff, fmt, ap) == -1)
1397 *ps.vbuff.curpos++ = '\0';
1400 if (!apr_running_on_valgrind) {
1401 strp = ps.node->first_avail;
1404 if (add_redzone(psprintf_flush, &ps) == -1)
1406 if (ps.node->endp != ps.vbuff.curpos)
1407 APR_VALGRIND_NOACCESS(ps.vbuff.curpos,
1408 ps.node->endp - ps.vbuff.curpos);
1409 strp = ps.node->first_avail + REDZONE;
1410 size = ps.vbuff.curpos - strp;
1411 VALGRIND_MEMPOOL_ALLOC(pool, strp, size);
1412 VALGRIND_MAKE_MEM_DEFINED(strp, size);
1415 strp = ps.node->first_avail;
1418 size = ps.vbuff.curpos - ps.node->first_avail;
1419 size = APR_ALIGN_DEFAULT(size);
1420 ps.node->first_avail += size;
1423 allocator_free(pool->allocator, ps.free);
1426 * Link the node in if it's a new one
1428 if (!ps.got_a_new_node) {
1429 pool_concurrency_set_idle(pool);
1433 active = pool->active;
1436 node->free_index = 0;
1438 list_insert(node, active);
1440 pool->active = node;
1442 free_index = (APR_ALIGN(active->endp - active->first_avail + 1,
1443 BOUNDARY_SIZE) - BOUNDARY_SIZE) >> BOUNDARY_INDEX;
1445 active->free_index = free_index;
1446 node = active->next;
1448 if (free_index >= node->free_index) {
1449 pool_concurrency_set_idle(pool);
1456 while (free_index < node->free_index);
1458 list_remove(active);
1459 list_insert(active, node);
1461 pool_concurrency_set_idle(pool);
1465 pool_concurrency_set_idle(pool);
1467 pool->abort_fn(APR_ENOMEM);
1468 if (ps.got_a_new_node) {
1469 ps.node->next = ps.free;
1470 allocator_free(pool->allocator, ps.node);
1472 APR_VALGRIND_NOACCESS(pool->active->first_avail,
1473 pool->active->endp - pool->active->first_avail);
1478 #else /* APR_POOL_DEBUG */
1480 * Debug helper functions
1485 * Walk the pool tree rooted at pool, depth first. When fn returns
1486 * anything other than 0, abort the traversal and return the value
1489 static int apr_pool_walk_tree(apr_pool_t *pool,
1490 int (*fn)(apr_pool_t *pool, void *data),
1496 rv = fn(pool, data);
1502 apr_thread_mutex_lock(pool->mutex);
1504 #endif /* APR_HAS_THREADS */
1506 child = pool->child;
1508 rv = apr_pool_walk_tree(child, fn, data);
1512 child = child->sibling;
1517 apr_thread_mutex_unlock(pool->mutex);
1519 #endif /* APR_HAS_THREADS */
1524 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
1525 static void apr_pool_log_event(apr_pool_t *pool, const char *event,
1526 const char *file_line, int deref)
1530 apr_file_printf(file_stderr,
1535 #endif /* APR_HAS_THREADS */
1538 "(%10lu/%10lu/%10lu) "
1543 (unsigned long)getpid(),
1545 (unsigned long)apr_os_thread_current(),
1546 #endif /* APR_HAS_THREADS */
1548 (unsigned long)apr_pool_num_bytes(pool, 0),
1549 (unsigned long)apr_pool_num_bytes(pool, 1),
1550 (unsigned long)apr_pool_num_bytes(global_pool, 1),
1553 pool->stat_alloc, pool->stat_total_alloc, pool->stat_clear);
1556 apr_file_printf(file_stderr,
1561 #endif /* APR_HAS_THREADS */
1568 (unsigned long)getpid(),
1570 (unsigned long)apr_os_thread_current(),
1571 #endif /* APR_HAS_THREADS */
1578 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1580 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_LIFETIME)
1581 static int pool_is_child_of(apr_pool_t *parent, void *data)
1583 apr_pool_t *pool = (apr_pool_t *)data;
1585 return (pool == parent);
1588 static int apr_pool_is_child_of(apr_pool_t *pool, apr_pool_t *parent)
1593 return apr_pool_walk_tree(parent, pool_is_child_of, pool);
1595 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_LIFETIME) */
1597 static void apr_pool_check_integrity(apr_pool_t *pool)
1599 /* Rule of thumb: use of the global pool is always
1600 * ok, since the only user is apr_pools.c. Unless
1601 * people have searched for the top level parent and
1602 * started to use that...
1604 if (pool == global_pool || global_pool == NULL)
1608 * This basically checks to see if the pool being used is still
1609 * a relative to the global pool. If not it was previously
1610 * destroyed, in which case we abort().
1612 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_LIFETIME)
1613 if (!apr_pool_is_child_of(pool, global_pool)) {
1614 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
1615 apr_pool_log_event(pool, "LIFE",
1616 __FILE__ ":apr_pool_integrity check [lifetime]", 0);
1617 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1620 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_LIFETIME) */
1622 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_OWNER)
1624 if (!apr_os_thread_equal(pool->owner, apr_os_thread_current())) {
1625 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
1626 apr_pool_log_event(pool, "THREAD",
1627 __FILE__ ":apr_pool_integrity check [owner]", 0);
1628 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1631 #endif /* APR_HAS_THREADS */
1632 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_OWNER) */
1637 * Initialization (debug)
1640 APR_DECLARE(apr_status_t) apr_pool_initialize(void)
1643 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
1645 apr_file_t *debug_log = NULL;
1648 if (apr_pools_initialized++)
1651 #if APR_ALLOCATOR_USES_MMAP && defined(_SC_PAGESIZE)
1652 boundary_size = sysconf(_SC_PAGESIZE);
1653 boundary_index = 12;
1654 while ( (1 << boundary_index) < boundary_size)
1656 boundary_size = (1 << boundary_index);
1659 /* Since the debug code works a bit differently then the
1660 * regular pools code, we ask for a lock here. The regular
1661 * pools code has got this lock embedded in the global
1662 * allocator, a concept unknown to debug mode.
1664 if ((rv = apr_pool_create_ex(&global_pool, NULL, NULL,
1665 NULL)) != APR_SUCCESS) {
1669 apr_pool_tag(global_pool, "APR global pool");
1671 apr_pools_initialized = 1;
1673 /* This has to happen here because mutexes might be backed by
1674 * atomics. It used to be snug and safe in apr_initialize().
1676 if ((rv = apr_atomic_init(global_pool)) != APR_SUCCESS) {
1680 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
1681 rv = apr_env_get(&logpath, "APR_POOL_DEBUG_LOG", global_pool);
1683 /* Don't pass file_stderr directly to apr_file_open() here, since
1684 * apr_file_open() can call back to apr_pool_log_event() and that
1685 * may attempt to use then then non-NULL but partially set up file
1687 if (rv == APR_SUCCESS) {
1688 apr_file_open(&debug_log, logpath, APR_APPEND|APR_WRITE|APR_CREATE,
1689 APR_OS_DEFAULT, global_pool);
1692 apr_file_open_stderr(&debug_log, global_pool);
1695 /* debug_log is now a file handle. */
1696 file_stderr = debug_log;
1699 apr_file_printf(file_stderr,
1703 #endif /* APR_HAS_THREADS */
1704 "] ACTION (SIZE /POOL SIZE /TOTAL SIZE) "
1705 "POOL \"TAG\" <__FILE__:__LINE__> (ALLOCS/TOTAL ALLOCS/CLEARS)\n");
1707 apr_pool_log_event(global_pool, "GLOBAL", __FILE__ ":apr_pool_initialize", 0);
1709 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1714 APR_DECLARE(void) apr_pool_terminate(void)
1716 if (!apr_pools_initialized)
1719 if (--apr_pools_initialized)
1722 apr_pool_destroy(global_pool); /* This will also destroy the mutex */
1725 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
1727 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1732 * Memory allocation (debug)
1735 static void *pool_alloc(apr_pool_t *pool, apr_size_t size)
1740 if ((mem = malloc(size)) == NULL) {
1742 pool->abort_fn(APR_ENOMEM);
1748 if (node == NULL || node->index == 64) {
1749 if ((node = malloc(SIZEOF_DEBUG_NODE_T)) == NULL) {
1752 pool->abort_fn(APR_ENOMEM);
1757 memset(node, 0, SIZEOF_DEBUG_NODE_T);
1759 node->next = pool->nodes;
1764 node->beginp[node->index] = mem;
1765 node->endp[node->index] = (char *)mem + size;
1769 pool->stat_total_alloc++;
1774 APR_DECLARE(void *) apr_palloc_debug(apr_pool_t *pool, apr_size_t size,
1775 const char *file_line)
1779 apr_pool_check_integrity(pool);
1781 mem = pool_alloc(pool, size);
1783 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALLOC)
1784 apr_pool_log_event(pool, "PALLOC", file_line, 1);
1785 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALLOC) */
1790 APR_DECLARE(void *) apr_pcalloc_debug(apr_pool_t *pool, apr_size_t size,
1791 const char *file_line)
1795 apr_pool_check_integrity(pool);
1797 mem = pool_alloc(pool, size);
1798 memset(mem, 0, size);
1800 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALLOC)
1801 apr_pool_log_event(pool, "PCALLOC", file_line, 1);
1802 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALLOC) */
1809 * Pool creation/destruction (debug)
1812 #define POOL_POISON_BYTE 'A'
1814 static void pool_clear_debug(apr_pool_t *pool, const char *file_line)
1819 /* Run pre destroy cleanups */
1820 run_cleanups(&pool->pre_cleanups);
1821 pool->pre_cleanups = NULL;
1823 /* Destroy the subpools. The subpools will detach themselves from
1824 * this pool thus this loop is safe and easy.
1827 pool_destroy_debug(pool->child, file_line);
1830 run_cleanups(&pool->cleanups);
1831 pool->free_cleanups = NULL;
1832 pool->cleanups = NULL;
1834 /* If new child pools showed up, this is a reason to raise a flag */
1838 /* Free subprocesses */
1839 free_proc_chain(pool->subprocesses);
1840 pool->subprocesses = NULL;
1842 /* Clear the user data. */
1843 pool->user_data = NULL;
1845 /* Free the blocks, scribbling over them first to help highlight
1846 * use-after-free issues. */
1847 while ((node = pool->nodes) != NULL) {
1848 pool->nodes = node->next;
1850 for (index = 0; index < node->index; index++) {
1851 memset(node->beginp[index], POOL_POISON_BYTE,
1852 (char *)node->endp[index] - (char *)node->beginp[index]);
1853 free(node->beginp[index]);
1856 memset(node, POOL_POISON_BYTE, SIZEOF_DEBUG_NODE_T);
1860 pool->stat_alloc = 0;
1864 APR_DECLARE(void) apr_pool_clear_debug(apr_pool_t *pool,
1865 const char *file_line)
1868 apr_thread_mutex_t *mutex = NULL;
1871 apr_pool_check_integrity(pool);
1873 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE)
1874 apr_pool_log_event(pool, "CLEAR", file_line, 1);
1875 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) */
1878 if (pool->parent != NULL)
1879 mutex = pool->parent->mutex;
1881 /* Lock the parent mutex before clearing so that if we have our
1882 * own mutex it won't be accessed by apr_pool_walk_tree after
1883 * it has been destroyed.
1885 if (mutex != NULL && mutex != pool->mutex) {
1886 apr_thread_mutex_lock(mutex);
1890 pool_clear_debug(pool, file_line);
1893 /* If we had our own mutex, it will have been destroyed by
1894 * the registered cleanups. Recreate the mutex. Unlock
1895 * the mutex we obtained above.
1897 if (mutex != pool->mutex) {
1898 (void)apr_thread_mutex_create(&pool->mutex,
1899 APR_THREAD_MUTEX_NESTED, pool);
1902 (void)apr_thread_mutex_unlock(mutex);
1904 #endif /* APR_HAS_THREADS */
1907 static void pool_destroy_debug(apr_pool_t *pool, const char *file_line)
1909 apr_pool_check_integrity(pool);
1911 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE)
1912 apr_pool_log_event(pool, "DESTROY", file_line, 1);
1913 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) */
1915 pool_clear_debug(pool, file_line);
1917 /* Remove the pool from the parents child list */
1920 apr_thread_mutex_t *mutex;
1922 if ((mutex = pool->parent->mutex) != NULL)
1923 apr_thread_mutex_lock(mutex);
1924 #endif /* APR_HAS_THREADS */
1926 if ((*pool->ref = pool->sibling) != NULL)
1927 pool->sibling->ref = pool->ref;
1931 apr_thread_mutex_unlock(mutex);
1932 #endif /* APR_HAS_THREADS */
1935 if (pool->allocator != NULL
1936 && apr_allocator_owner_get(pool->allocator) == pool) {
1937 apr_allocator_destroy(pool->allocator);
1940 /* Free the pool itself */
1944 APR_DECLARE(void) apr_pool_destroy_debug(apr_pool_t *pool,
1945 const char *file_line)
1948 /* Joined pools must not be explicitly destroyed; the caller
1949 * has broken the guarantee. */
1950 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL)
1951 apr_pool_log_event(pool, "LIFE",
1952 __FILE__ ":apr_pool_destroy abort on joined", 0);
1953 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE_ALL) */
1957 pool_destroy_debug(pool, file_line);
1960 APR_DECLARE(apr_status_t) apr_pool_create_ex_debug(apr_pool_t **newpool,
1962 apr_abortfunc_t abort_fn,
1963 apr_allocator_t *allocator,
1964 const char *file_line)
1971 parent = global_pool;
1974 apr_pool_check_integrity(parent);
1977 allocator = parent->allocator;
1980 if (!abort_fn && parent)
1981 abort_fn = parent->abort_fn;
1983 if ((pool = malloc(SIZEOF_POOL_T)) == NULL) {
1985 abort_fn(APR_ENOMEM);
1990 memset(pool, 0, SIZEOF_POOL_T);
1992 pool->allocator = allocator;
1993 pool->abort_fn = abort_fn;
1994 pool->tag = file_line;
1995 pool->file_line = file_line;
1997 if ((pool->parent = parent) != NULL) {
2000 apr_thread_mutex_lock(parent->mutex);
2001 #endif /* APR_HAS_THREADS */
2002 if ((pool->sibling = parent->child) != NULL)
2003 pool->sibling->ref = &pool->sibling;
2005 parent->child = pool;
2006 pool->ref = &parent->child;
2010 apr_thread_mutex_unlock(parent->mutex);
2011 #endif /* APR_HAS_THREADS */
2014 pool->sibling = NULL;
2019 pool->owner = apr_os_thread_current();
2020 #endif /* APR_HAS_THREADS */
2022 pool->owner_proc = (apr_os_proc_t)getnlmhandle();
2023 #endif /* defined(NETWARE) */
2026 if (parent == NULL || parent->allocator != allocator) {
2030 /* No matter what the creation flags say, always create
2031 * a lock. Without it integrity_check and apr_pool_num_bytes
2032 * blow up (because they traverse pools child lists that
2033 * possibly belong to another thread, in combination with
2034 * the pool having no lock). However, this might actually
2035 * hide problems like creating a child pool of a pool
2036 * belonging to another thread.
2038 if ((rv = apr_thread_mutex_create(&pool->mutex,
2039 APR_THREAD_MUTEX_NESTED, pool)) != APR_SUCCESS) {
2043 #endif /* APR_HAS_THREADS */
2048 pool->mutex = parent->mutex;
2049 #endif /* APR_HAS_THREADS */
2054 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE)
2055 apr_pool_log_event(pool, "CREATE", file_line, 1);
2056 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) */
2061 APR_DECLARE(apr_status_t) apr_pool_create_core_ex_debug(apr_pool_t **newpool,
2062 apr_abortfunc_t abort_fn,
2063 apr_allocator_t *allocator,
2064 const char *file_line)
2066 return apr_pool_create_unmanaged_ex_debug(newpool, abort_fn, allocator,
2070 APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex_debug(apr_pool_t **newpool,
2071 apr_abortfunc_t abort_fn,
2072 apr_allocator_t *allocator,
2073 const char *file_line)
2076 apr_allocator_t *pool_allocator;
2080 if ((pool = malloc(SIZEOF_POOL_T)) == NULL) {
2082 abort_fn(APR_ENOMEM);
2087 memset(pool, 0, SIZEOF_POOL_T);
2089 pool->abort_fn = abort_fn;
2090 pool->tag = file_line;
2091 pool->file_line = file_line;
2094 pool->owner = apr_os_thread_current();
2095 #endif /* APR_HAS_THREADS */
2097 pool->owner_proc = (apr_os_proc_t)getnlmhandle();
2098 #endif /* defined(NETWARE) */
2100 if ((pool_allocator = allocator) == NULL) {
2102 if ((rv = apr_allocator_create(&pool_allocator)) != APR_SUCCESS) {
2107 pool_allocator->owner = pool;
2109 pool->allocator = pool_allocator;
2111 if (pool->allocator != allocator) {
2115 /* No matter what the creation flags say, always create
2116 * a lock. Without it integrity_check and apr_pool_num_bytes
2117 * blow up (because they traverse pools child lists that
2118 * possibly belong to another thread, in combination with
2119 * the pool having no lock). However, this might actually
2120 * hide problems like creating a child pool of a pool
2121 * belonging to another thread.
2123 if ((rv = apr_thread_mutex_create(&pool->mutex,
2124 APR_THREAD_MUTEX_NESTED, pool)) != APR_SUCCESS) {
2128 #endif /* APR_HAS_THREADS */
2133 #if (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE)
2134 apr_pool_log_event(pool, "CREATEU", file_line, 1);
2135 #endif /* (APR_POOL_DEBUG & APR_POOL_DEBUG_VERBOSE) */
2141 * "Print" functions (debug)
2144 struct psprintf_data {
2145 apr_vformatter_buff_t vbuff;
2150 static int psprintf_flush(apr_vformatter_buff_t *vbuff)
2152 struct psprintf_data *ps = (struct psprintf_data *)vbuff;
2155 size = ps->vbuff.curpos - ps->mem;
2158 if ((ps->mem = realloc(ps->mem, ps->size)) == NULL)
2161 ps->vbuff.curpos = ps->mem + size;
2162 ps->vbuff.endpos = ps->mem + ps->size - 1;
2167 APR_DECLARE(char *) apr_pvsprintf(apr_pool_t *pool, const char *fmt, va_list ap)
2169 struct psprintf_data ps;
2172 apr_pool_check_integrity(pool);
2175 ps.mem = malloc(ps.size);
2176 ps.vbuff.curpos = ps.mem;
2178 /* Save a byte for the NUL terminator */
2179 ps.vbuff.endpos = ps.mem + ps.size - 1;
2181 if (apr_vformatter(psprintf_flush, &ps.vbuff, fmt, ap) == -1) {
2183 pool->abort_fn(APR_ENOMEM);
2188 *ps.vbuff.curpos++ = '\0';
2194 if (node == NULL || node->index == 64) {
2195 if ((node = malloc(SIZEOF_DEBUG_NODE_T)) == NULL) {
2197 pool->abort_fn(APR_ENOMEM);
2202 node->next = pool->nodes;
2207 node->beginp[node->index] = ps.mem;
2208 node->endp[node->index] = ps.mem + ps.size;
2219 APR_DECLARE(void) apr_pool_join(apr_pool_t *p, apr_pool_t *sub)
2222 if (sub->parent != p) {
2229 static int pool_find(apr_pool_t *pool, void *data)
2231 void **pmem = (void **)data;
2238 for (index = 0; index < node->index; index++) {
2239 if (node->beginp[index] <= *pmem
2240 && node->endp[index] > *pmem) {
2252 APR_DECLARE(apr_pool_t *) apr_pool_find(const void *mem)
2254 void *pool = (void *)mem;
2256 if (apr_pool_walk_tree(global_pool, pool_find, &pool))
2262 static int pool_num_bytes(apr_pool_t *pool, void *data)
2264 apr_size_t *psize = (apr_size_t *)data;
2271 for (index = 0; index < node->index; index++) {
2272 *psize += (char *)node->endp[index] - (char *)node->beginp[index];
2281 APR_DECLARE(apr_size_t) apr_pool_num_bytes(apr_pool_t *pool, int recurse)
2283 apr_size_t size = 0;
2286 pool_num_bytes(pool, &size);
2291 apr_pool_walk_tree(pool, pool_num_bytes, &size);
2296 APR_DECLARE(void) apr_pool_lock(apr_pool_t *pool, int flag)
2300 #endif /* !APR_POOL_DEBUG */
2303 void netware_pool_proc_cleanup ()
2305 apr_pool_t *pool = global_pool->child;
2306 apr_os_proc_t owner_proc = (apr_os_proc_t)getnlmhandle();
2309 if (pool->owner_proc == owner_proc) {
2310 apr_pool_destroy (pool);
2311 pool = global_pool->child;
2314 pool = pool->sibling;
2319 #endif /* defined(NETWARE) */
2323 * "Print" functions (common)
2326 APR_DECLARE_NONSTD(char *) apr_psprintf(apr_pool_t *p, const char *fmt, ...)
2332 res = apr_pvsprintf(p, fmt, ap);
2341 APR_DECLARE(void) apr_pool_abort_set(apr_abortfunc_t abort_fn,
2344 pool->abort_fn = abort_fn;
2347 APR_DECLARE(apr_abortfunc_t) apr_pool_abort_get(apr_pool_t *pool)
2349 return pool->abort_fn;
2352 APR_DECLARE(apr_pool_t *) apr_pool_parent_get(apr_pool_t *pool)
2355 /* On NetWare, don't return the global_pool, return the application pool
2356 as the top most pool */
2357 if (pool->parent == global_pool)
2361 return pool->parent;
2364 APR_DECLARE(apr_allocator_t *) apr_pool_allocator_get(apr_pool_t *pool)
2366 return pool->allocator;
2369 /* return TRUE if a is an ancestor of b
2370 * NULL is considered an ancestor of all pools
2372 APR_DECLARE(int) apr_pool_is_ancestor(apr_pool_t *a, apr_pool_t *b)
2378 /* Find the pool with the longest lifetime guaranteed by the
2395 APR_DECLARE(void) apr_pool_tag(apr_pool_t *pool, const char *tag)
2402 * User data management
2405 APR_DECLARE(apr_status_t) apr_pool_userdata_set(const void *data, const char *key,
2406 apr_status_t (*cleanup) (void *),
2410 apr_pool_check_integrity(pool);
2411 #endif /* APR_POOL_DEBUG */
2413 if (pool->user_data == NULL)
2414 pool->user_data = apr_hash_make(pool);
2416 if (apr_hash_get(pool->user_data, key, APR_HASH_KEY_STRING) == NULL) {
2417 char *new_key = apr_pstrdup(pool, key);
2418 apr_hash_set(pool->user_data, new_key, APR_HASH_KEY_STRING, data);
2421 apr_hash_set(pool->user_data, key, APR_HASH_KEY_STRING, data);
2425 apr_pool_cleanup_register(pool, data, cleanup, cleanup);
2430 APR_DECLARE(apr_status_t) apr_pool_userdata_setn(const void *data,
2432 apr_status_t (*cleanup)(void *),
2436 apr_pool_check_integrity(pool);
2437 #endif /* APR_POOL_DEBUG */
2439 if (pool->user_data == NULL)
2440 pool->user_data = apr_hash_make(pool);
2442 apr_hash_set(pool->user_data, key, APR_HASH_KEY_STRING, data);
2445 apr_pool_cleanup_register(pool, data, cleanup, cleanup);
2450 APR_DECLARE(apr_status_t) apr_pool_userdata_get(void **data, const char *key,
2454 apr_pool_check_integrity(pool);
2455 #endif /* APR_POOL_DEBUG */
2457 if (pool->user_data == NULL) {
2461 *data = apr_hash_get(pool->user_data, key, APR_HASH_KEY_STRING);
2473 struct cleanup_t *next;
2475 apr_status_t (*plain_cleanup_fn)(void *data);
2476 apr_status_t (*child_cleanup_fn)(void *data);
2479 APR_DECLARE(void) apr_pool_cleanup_register(apr_pool_t *p, const void *data,
2480 apr_status_t (*plain_cleanup_fn)(void *data),
2481 apr_status_t (*child_cleanup_fn)(void *data))
2486 apr_pool_check_integrity(p);
2487 #endif /* APR_POOL_DEBUG */
2490 if (p->free_cleanups) {
2491 /* reuse a cleanup structure */
2492 c = p->free_cleanups;
2493 p->free_cleanups = c->next;
2495 c = apr_palloc(p, sizeof(cleanup_t));
2498 c->plain_cleanup_fn = plain_cleanup_fn;
2499 c->child_cleanup_fn = child_cleanup_fn;
2500 c->next = p->cleanups;
2505 APR_DECLARE(void) apr_pool_pre_cleanup_register(apr_pool_t *p, const void *data,
2506 apr_status_t (*plain_cleanup_fn)(void *data))
2511 apr_pool_check_integrity(p);
2512 #endif /* APR_POOL_DEBUG */
2515 if (p->free_cleanups) {
2516 /* reuse a cleanup structure */
2517 c = p->free_cleanups;
2518 p->free_cleanups = c->next;
2520 c = apr_palloc(p, sizeof(cleanup_t));
2523 c->plain_cleanup_fn = plain_cleanup_fn;
2524 c->next = p->pre_cleanups;
2525 p->pre_cleanups = c;
2529 APR_DECLARE(void) apr_pool_cleanup_kill(apr_pool_t *p, const void *data,
2530 apr_status_t (*cleanup_fn)(void *))
2532 cleanup_t *c, **lastp;
2535 apr_pool_check_integrity(p);
2536 #endif /* APR_POOL_DEBUG */
2542 lastp = &p->cleanups;
2545 /* Some cheap loop detection to catch a corrupt list: */
2547 || (c->next && c == c->next->next)
2548 || (c->next && c->next->next && c == c->next->next->next)) {
2553 if (c->data == data && c->plain_cleanup_fn == cleanup_fn) {
2555 /* move to freelist */
2556 c->next = p->free_cleanups;
2557 p->free_cleanups = c;
2565 /* Remove any pre-cleanup as well */
2566 c = p->pre_cleanups;
2567 lastp = &p->pre_cleanups;
2570 /* Some cheap loop detection to catch a corrupt list: */
2572 || (c->next && c == c->next->next)
2573 || (c->next && c->next->next && c == c->next->next->next)) {
2578 if (c->data == data && c->plain_cleanup_fn == cleanup_fn) {
2580 /* move to freelist */
2581 c->next = p->free_cleanups;
2582 p->free_cleanups = c;
2592 APR_DECLARE(void) apr_pool_child_cleanup_set(apr_pool_t *p, const void *data,
2593 apr_status_t (*plain_cleanup_fn)(void *),
2594 apr_status_t (*child_cleanup_fn)(void *))
2599 apr_pool_check_integrity(p);
2600 #endif /* APR_POOL_DEBUG */
2607 if (c->data == data && c->plain_cleanup_fn == plain_cleanup_fn) {
2608 c->child_cleanup_fn = child_cleanup_fn;
2616 APR_DECLARE(apr_status_t) apr_pool_cleanup_run(apr_pool_t *p, void *data,
2617 apr_status_t (*cleanup_fn)(void *))
2619 apr_pool_cleanup_kill(p, data, cleanup_fn);
2620 return (*cleanup_fn)(data);
2623 static void run_cleanups(cleanup_t **cref)
2625 cleanup_t *c = *cref;
2629 (*c->plain_cleanup_fn)((void *)c->data);
2634 #if !defined(WIN32) && !defined(OS2)
2636 static void run_child_cleanups(cleanup_t **cref)
2638 cleanup_t *c = *cref;
2642 (*c->child_cleanup_fn)((void *)c->data);
2647 static void cleanup_pool_for_exec(apr_pool_t *p)
2649 run_child_cleanups(&p->cleanups);
2651 for (p = p->child; p; p = p->sibling)
2652 cleanup_pool_for_exec(p);
2655 APR_DECLARE(void) apr_pool_cleanup_for_exec(void)
2657 cleanup_pool_for_exec(global_pool);
2660 #else /* !defined(WIN32) && !defined(OS2) */
2662 APR_DECLARE(void) apr_pool_cleanup_for_exec(void)
2665 * Don't need to do anything on NT or OS/2, because
2666 * these platforms will spawn the new process - not
2667 * fork for exec. All handles that are not inheritable,
2668 * will be automajically closed. The only problem is
2669 * with file handles that are open, but there isn't
2670 * much that can be done about that (except if the
2671 * child decides to go out and close them, or the
2672 * developer quits opening them shared)
2677 #endif /* !defined(WIN32) && !defined(OS2) */
2679 APR_DECLARE_NONSTD(apr_status_t) apr_pool_cleanup_null(void *data)
2681 /* do nothing cleanup routine */
2685 /* Subprocesses don't use the generic cleanup interface because
2686 * we don't want multiple subprocesses to result in multiple
2687 * three-second pauses; the subprocesses have to be "freed" all
2688 * at once. If other resources are introduced with the same property,
2689 * we might want to fold support for that into the generic interface.
2690 * For now, it's a special case.
2692 APR_DECLARE(void) apr_pool_note_subprocess(apr_pool_t *pool, apr_proc_t *proc,
2693 apr_kill_conditions_e how)
2695 struct process_chain *pc = apr_palloc(pool, sizeof(struct process_chain));
2699 pc->next = pool->subprocesses;
2700 pool->subprocesses = pc;
2703 static void free_proc_chain(struct process_chain *procs)
2705 /* Dispose of the subprocesses we've spawned off in the course of
2706 * whatever it was we're cleaning up now. This may involve killing
2707 * some of them off...
2709 struct process_chain *pc;
2710 int need_timeout = 0;
2711 apr_time_t timeout_interval;
2714 return; /* No work. Whew! */
2716 /* First, check to see if we need to do the SIGTERM, sleep, SIGKILL
2717 * dance with any of the processes we're cleaning up. If we've got
2718 * any kill-on-sight subprocesses, ditch them now as well, so they
2719 * don't waste any more cycles doing whatever it is that they shouldn't
2723 #ifndef NEED_WAITPID
2724 /* Pick up all defunct processes */
2725 for (pc = procs; pc; pc = pc->next) {
2726 if (apr_proc_wait(pc->proc, NULL, NULL, APR_NOWAIT) != APR_CHILD_NOTDONE)
2727 pc->kill_how = APR_KILL_NEVER;
2729 #endif /* !defined(NEED_WAITPID) */
2731 for (pc = procs; pc; pc = pc->next) {
2733 if ((pc->kill_how == APR_KILL_AFTER_TIMEOUT)
2734 || (pc->kill_how == APR_KILL_ONLY_ONCE)) {
2736 * Subprocess may be dead already. Only need the timeout if not.
2737 * Note: apr_proc_kill on Windows is TerminateProcess(), which is
2738 * similar to a SIGKILL, so always give the process a timeout
2739 * under Windows before killing it.
2741 if (apr_proc_kill(pc->proc, SIGTERM) == APR_SUCCESS)
2744 else if (pc->kill_how == APR_KILL_ALWAYS) {
2745 #else /* WIN32 knows only one fast, clean method of killing processes today */
2746 if (pc->kill_how != APR_KILL_NEVER) {
2748 pc->kill_how = APR_KILL_ALWAYS;
2750 apr_proc_kill(pc->proc, SIGKILL);
2754 /* Sleep only if we have to. The sleep algorithm grows
2755 * by a factor of two on each iteration. TIMEOUT_INTERVAL
2756 * is equal to TIMEOUT_USECS / 64.
2759 timeout_interval = TIMEOUT_INTERVAL;
2760 apr_sleep(timeout_interval);
2763 /* check the status of the subprocesses */
2765 for (pc = procs; pc; pc = pc->next) {
2766 if (pc->kill_how == APR_KILL_AFTER_TIMEOUT) {
2767 if (apr_proc_wait(pc->proc, NULL, NULL, APR_NOWAIT)
2768 == APR_CHILD_NOTDONE)
2769 need_timeout = 1; /* subprocess is still active */
2771 pc->kill_how = APR_KILL_NEVER; /* subprocess has exited */
2775 if (timeout_interval >= TIMEOUT_USECS) {
2778 apr_sleep(timeout_interval);
2779 timeout_interval *= 2;
2781 } while (need_timeout);
2784 /* OK, the scripts we just timed out for have had a chance to clean up
2785 * --- now, just get rid of them, and also clean up the system accounting
2788 for (pc = procs; pc; pc = pc->next) {
2789 if (pc->kill_how == APR_KILL_AFTER_TIMEOUT)
2790 apr_proc_kill(pc->proc, SIGKILL);
2793 /* Now wait for all the signaled processes to die */
2794 for (pc = procs; pc; pc = pc->next) {
2795 if (pc->kill_how != APR_KILL_NEVER)
2796 (void)apr_proc_wait(pc->proc, NULL, NULL, APR_WAIT);
2802 * Pool creation/destruction stubs, for people who are running
2803 * mixed release/debug enviroments.
2807 APR_DECLARE(void *) apr_palloc_debug(apr_pool_t *pool, apr_size_t size,
2808 const char *file_line)
2810 return apr_palloc(pool, size);
2813 APR_DECLARE(void *) apr_pcalloc_debug(apr_pool_t *pool, apr_size_t size,
2814 const char *file_line)
2816 return apr_pcalloc(pool, size);
2819 APR_DECLARE(void) apr_pool_clear_debug(apr_pool_t *pool,
2820 const char *file_line)
2822 apr_pool_clear(pool);
2825 APR_DECLARE(void) apr_pool_destroy_debug(apr_pool_t *pool,
2826 const char *file_line)
2828 apr_pool_destroy(pool);
2831 APR_DECLARE(apr_status_t) apr_pool_create_ex_debug(apr_pool_t **newpool,
2833 apr_abortfunc_t abort_fn,
2834 apr_allocator_t *allocator,
2835 const char *file_line)
2837 return apr_pool_create_ex(newpool, parent, abort_fn, allocator);
2840 APR_DECLARE(apr_status_t) apr_pool_create_core_ex_debug(apr_pool_t **newpool,
2841 apr_abortfunc_t abort_fn,
2842 apr_allocator_t *allocator,
2843 const char *file_line)
2845 return apr_pool_create_unmanaged_ex(newpool, abort_fn, allocator);
2848 APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex_debug(apr_pool_t **newpool,
2849 apr_abortfunc_t abort_fn,
2850 apr_allocator_t *allocator,
2851 const char *file_line)
2853 return apr_pool_create_unmanaged_ex(newpool, abort_fn, allocator);
2856 #else /* APR_POOL_DEBUG */
2859 APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t size);
2861 APR_DECLARE(void *) apr_palloc(apr_pool_t *pool, apr_size_t size)
2863 return apr_palloc_debug(pool, size, "undefined");
2867 APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size);
2869 APR_DECLARE(void *) apr_pcalloc(apr_pool_t *pool, apr_size_t size)
2871 return apr_pcalloc_debug(pool, size, "undefined");
2874 #undef apr_pool_clear
2875 APR_DECLARE(void) apr_pool_clear(apr_pool_t *pool);
2877 APR_DECLARE(void) apr_pool_clear(apr_pool_t *pool)
2879 apr_pool_clear_debug(pool, "undefined");
2882 #undef apr_pool_destroy
2883 APR_DECLARE(void) apr_pool_destroy(apr_pool_t *pool);
2885 APR_DECLARE(void) apr_pool_destroy(apr_pool_t *pool)
2887 apr_pool_destroy_debug(pool, "undefined");
2890 #undef apr_pool_create_ex
2891 APR_DECLARE(apr_status_t) apr_pool_create_ex(apr_pool_t **newpool,
2893 apr_abortfunc_t abort_fn,
2894 apr_allocator_t *allocator);
2896 APR_DECLARE(apr_status_t) apr_pool_create_ex(apr_pool_t **newpool,
2898 apr_abortfunc_t abort_fn,
2899 apr_allocator_t *allocator)
2901 return apr_pool_create_ex_debug(newpool, parent,
2902 abort_fn, allocator,
2906 #undef apr_pool_create_core_ex
2907 APR_DECLARE(apr_status_t) apr_pool_create_core_ex(apr_pool_t **newpool,
2908 apr_abortfunc_t abort_fn,
2909 apr_allocator_t *allocator);
2911 APR_DECLARE(apr_status_t) apr_pool_create_core_ex(apr_pool_t **newpool,
2912 apr_abortfunc_t abort_fn,
2913 apr_allocator_t *allocator)
2915 return apr_pool_create_unmanaged_ex_debug(newpool, abort_fn,
2916 allocator, "undefined");
2919 #undef apr_pool_create_unmanaged_ex
2920 APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex(apr_pool_t **newpool,
2921 apr_abortfunc_t abort_fn,
2922 apr_allocator_t *allocator);
2924 APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex(apr_pool_t **newpool,
2925 apr_abortfunc_t abort_fn,
2926 apr_allocator_t *allocator)
2928 return apr_pool_create_unmanaged_ex_debug(newpool, abort_fn,
2929 allocator, "undefined");
2932 #endif /* APR_POOL_DEBUG */