1 /* Licensed to the Apache Software Foundation (ASF) under one or more
2 * contributor license agreements. See the NOTICE file distributed with
3 * this work for additional information regarding copyright ownership.
4 * The ASF licenses this file to You under the Apache License, Version 2.0
5 * (the "License"); you may not use this file except in compliance with
6 * the License. You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "apr_buckets.h"
20 #include "apr_allocator.h"
21 #include "apr_version.h"
23 #define ALLOC_AMT (8192 - APR_MEMNODE_T_SIZE)
25 typedef struct node_header_t {
27 apr_bucket_alloc_t *alloc;
28 apr_memnode_t *memnode;
29 struct node_header_t *next;
32 #define SIZEOF_NODE_HEADER_T APR_ALIGN_DEFAULT(sizeof(node_header_t))
33 #define SMALL_NODE_SIZE (APR_BUCKET_ALLOC_SIZE + SIZEOF_NODE_HEADER_T)
35 /** A list of free memory from which new buckets or private bucket
36 * structures can be allocated.
38 struct apr_bucket_alloc_t {
40 apr_allocator_t *allocator;
41 node_header_t *freelist;
42 apr_memnode_t *blocks;
45 static apr_status_t alloc_cleanup(void *data)
47 apr_bucket_alloc_t *list = data;
49 apr_allocator_free(list->allocator, list->blocks);
52 if (list->pool && list->allocator != apr_pool_allocator_get(list->pool)) {
53 apr_allocator_destroy(list->allocator);
60 APU_DECLARE_NONSTD(apr_bucket_alloc_t *) apr_bucket_alloc_create(apr_pool_t *p)
62 apr_allocator_t *allocator = apr_pool_allocator_get(p);
63 apr_bucket_alloc_t *list;
66 /* may be NULL for debug mode. */
67 if (allocator == NULL) {
68 if (apr_allocator_create(&allocator) != APR_SUCCESS) {
69 apr_abortfunc_t fn = apr_pool_abort_get(p);
76 list = apr_bucket_alloc_create_ex(allocator);
78 apr_abortfunc_t fn = apr_pool_abort_get(p);
84 apr_pool_cleanup_register(list->pool, list, alloc_cleanup,
85 apr_pool_cleanup_null);
90 APU_DECLARE_NONSTD(apr_bucket_alloc_t *) apr_bucket_alloc_create_ex(
91 apr_allocator_t *allocator)
93 apr_bucket_alloc_t *list;
96 block = apr_allocator_alloc(allocator, ALLOC_AMT);
100 list = (apr_bucket_alloc_t *)block->first_avail;
102 list->allocator = allocator;
103 list->freelist = NULL;
104 list->blocks = block;
105 block->first_avail += APR_ALIGN_DEFAULT(sizeof(*list));
110 APU_DECLARE_NONSTD(void) apr_bucket_alloc_destroy(apr_bucket_alloc_t *list)
113 apr_pool_cleanup_kill(list->pool, list, alloc_cleanup);
116 apr_allocator_free(list->allocator, list->blocks);
119 if (list->pool && list->allocator != apr_pool_allocator_get(list->pool)) {
120 apr_allocator_destroy(list->allocator);
125 APU_DECLARE_NONSTD(apr_size_t) apr_bucket_alloc_aligned_floor(apr_bucket_alloc_t *list,
128 if (size <= SMALL_NODE_SIZE) {
129 size = SMALL_NODE_SIZE;
132 #if APR_VERSION_AT_LEAST(1,6,0)
133 if (size < APR_MEMNODE_T_SIZE) {
134 size = apr_allocator_align(list->allocator, 0);
137 size = apr_allocator_align(list->allocator,
138 size - APR_MEMNODE_T_SIZE);
141 /* Assumes the minimum (default) allocator's boundary of 4K and
142 * minimum (immutable before APR-1.6.x) allocation size of 8K,
143 * hence possibly (yet unlikely) under-estimating the floor...
145 size = APR_ALIGN(size, 4096);
150 size -= APR_MEMNODE_T_SIZE;
152 size -= SIZEOF_NODE_HEADER_T;
156 APU_DECLARE_NONSTD(void *) apr_bucket_alloc(apr_size_t size,
157 apr_bucket_alloc_t *list)
160 apr_memnode_t *active = list->blocks;
163 size += SIZEOF_NODE_HEADER_T;
164 if (size <= SMALL_NODE_SIZE) {
165 if (list->freelist) {
166 node = list->freelist;
167 list->freelist = node->next;
170 endp = active->first_avail + SMALL_NODE_SIZE;
171 if (endp >= active->endp) {
172 list->blocks = apr_allocator_alloc(list->allocator, ALLOC_AMT);
174 list->blocks = active;
177 list->blocks->next = active;
178 active = list->blocks;
179 endp = active->first_avail + SMALL_NODE_SIZE;
181 node = (node_header_t *)active->first_avail;
183 node->memnode = active;
184 node->size = SMALL_NODE_SIZE;
185 active->first_avail = endp;
189 apr_memnode_t *memnode = apr_allocator_alloc(list->allocator, size);
193 node = (node_header_t *)memnode->first_avail;
195 node->memnode = memnode;
198 return ((char *)node) + SIZEOF_NODE_HEADER_T;
201 #ifdef APR_BUCKET_DEBUG
202 #if APR_HAVE_STDLIB_H
205 static void check_not_already_free(node_header_t *node)
207 apr_bucket_alloc_t *list = node->alloc;
208 node_header_t *curr = list->freelist;
218 #define check_not_already_free(node)
221 APU_DECLARE_NONSTD(void) apr_bucket_free(void *mem)
223 node_header_t *node = (node_header_t *)((char *)mem - SIZEOF_NODE_HEADER_T);
224 apr_bucket_alloc_t *list = node->alloc;
226 if (node->size == SMALL_NODE_SIZE) {
227 check_not_already_free(node);
228 node->next = list->freelist;
229 list->freelist = node;
232 apr_allocator_free(list->allocator, node->memnode);