1 /* Copyright 2002-2004 Justin Erenkrantz and Greg Stein
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
18 #include <apr_pools.h>
21 #include "serf_bucket_util.h"
24 typedef struct node_header_t {
27 struct node_header_t *next; /* if size == 0 (freed/inactive) */
28 /* no data if size == STANDARD_NODE_SIZE */
29 apr_memnode_t *memnode; /* if size > STANDARD_NODE_SIZE */
33 /* The size of a node_header_t, properly aligned. Note that (normally)
34 * this macro will round the size to a multiple of 8 bytes. Keep this in
35 * mind when altering the node_header_t structure. Also, keep in mind that
36 * node_header_t is an overhead for every allocation performed through
37 * the serf_bucket_mem_alloc() function.
39 #define SIZEOF_NODE_HEADER_T APR_ALIGN_DEFAULT(sizeof(node_header_t))
42 /* STANDARD_NODE_SIZE is manually set to an allocation size that will
43 * capture most allocators performed via this API. It must be "large
44 * enough" to avoid lots of spillage to allocating directly from the
45 * apr_allocator associated with the bucket allocator. The apr_allocator
46 * has a minimum size of 8k, which can be expensive if you missed the
47 * STANDARD_NODE_SIZE by just a few bytes.
49 /* ### we should define some rules or ways to determine how to derive
50 * ### a "good" value for this. probably log some stats on allocs, then
51 * ### analyze them for size "misses". then find the balance point between
52 * ### wasted space due to min-size allocator, and wasted-space due to
53 * ### size-spill to the 8k minimum.
55 #define STANDARD_NODE_SIZE 128
57 /* When allocating a block of memory from the allocator, we should go for
58 * an 8k block, minus the overhead that the allocator needs.
60 #define ALLOC_AMT (8192 - APR_MEMNODE_T_SIZE)
62 /* Define DEBUG_DOUBLE_FREE if you're interested in debugging double-free
63 * calls to serf_bucket_mem_free().
65 #define DEBUG_DOUBLE_FREE
69 const serf_bucket_t *bucket;
73 #define TRACK_BUCKET_COUNT 100 /* track N buckets' status */
76 int next_index; /* info[] is a ring. next bucket goes at this idx. */
79 read_status_t info[TRACK_BUCKET_COUNT];
83 struct serf_bucket_alloc_t {
85 apr_allocator_t *allocator;
88 serf_unfreed_func_t unfreed;
91 apr_uint32_t num_alloc;
93 node_header_t *freelist; /* free STANDARD_NODE_SIZE blocks */
94 apr_memnode_t *blocks; /* blocks we allocated for subdividing */
99 /* ==================================================================== */
102 static apr_status_t allocator_cleanup(void *data)
104 serf_bucket_alloc_t *allocator = data;
106 /* If we allocated anything, give it back. */
107 if (allocator->blocks) {
108 apr_allocator_free(allocator->allocator, allocator->blocks);
111 /* If we allocated our own allocator (?!), destroy it here. */
112 if (allocator->own_allocator) {
113 apr_allocator_destroy(allocator->allocator);
119 serf_bucket_alloc_t *serf_bucket_allocator_create(
121 serf_unfreed_func_t unfreed,
124 serf_bucket_alloc_t *allocator = apr_pcalloc(pool, sizeof(*allocator));
126 allocator->pool = pool;
127 allocator->allocator = apr_pool_allocator_get(pool);
128 if (allocator->allocator == NULL) {
129 /* This most likely means pools are running in debug mode, create our
130 * own allocator to deal with memory ourselves */
131 apr_allocator_create(&allocator->allocator);
132 allocator->own_allocator = 1;
134 allocator->unfreed = unfreed;
135 allocator->unfreed_baton = unfreed_baton;
137 #ifdef SERF_DEBUG_BUCKET_USE
139 track_state_t *track;
141 track = allocator->track = apr_palloc(pool, sizeof(*allocator->track));
142 track->next_index = 0;
147 /* NOTE: On a fork/exec, the child won't bother cleaning up memory.
148 This is just fine... the memory will go away at exec.
150 NOTE: If the child will NOT perform an exec, then the parent or
151 the child will need to decide who to clean up any
152 outstanding connection/buckets (as appropriate). */
153 apr_pool_cleanup_register(pool, allocator,
154 allocator_cleanup, apr_pool_cleanup_null);
159 apr_pool_t *serf_bucket_allocator_get_pool(
160 const serf_bucket_alloc_t *allocator)
162 return allocator->pool;
166 void *serf_bucket_mem_alloc(
167 serf_bucket_alloc_t *allocator,
172 ++allocator->num_alloc;
174 size += SIZEOF_NODE_HEADER_T;
175 if (size <= STANDARD_NODE_SIZE) {
176 if (allocator->freelist) {
177 /* just pull a node off our freelist */
178 node = allocator->freelist;
179 allocator->freelist = node->u.next;
180 #ifdef DEBUG_DOUBLE_FREE
181 /* When we free an item, we set its size to zero. Thus, when
182 * we return it to the caller, we must ensure the size is set
185 node->size = STANDARD_NODE_SIZE;
189 apr_memnode_t *active = allocator->blocks;
192 || active->first_avail + STANDARD_NODE_SIZE >= active->endp) {
193 apr_memnode_t *head = allocator->blocks;
195 /* ran out of room. grab another block. */
196 active = apr_allocator_alloc(allocator->allocator, ALLOC_AMT);
198 /* System couldn't provide us with memory. */
202 /* link the block into our tracking list */
203 allocator->blocks = active;
207 node = (node_header_t *)active->first_avail;
208 node->size = STANDARD_NODE_SIZE;
209 active->first_avail += STANDARD_NODE_SIZE;
213 apr_memnode_t *memnode = apr_allocator_alloc(allocator->allocator,
219 node = (node_header_t *)memnode->first_avail;
220 node->u.memnode = memnode;
224 return ((char *)node) + SIZEOF_NODE_HEADER_T;
228 void *serf_bucket_mem_calloc(
229 serf_bucket_alloc_t *allocator,
233 mem = serf_bucket_mem_alloc(allocator, size);
236 memset(mem, 0, size);
241 void serf_bucket_mem_free(
242 serf_bucket_alloc_t *allocator,
247 --allocator->num_alloc;
249 node = (node_header_t *)((char *)block - SIZEOF_NODE_HEADER_T);
251 if (node->size == STANDARD_NODE_SIZE) {
252 /* put the node onto our free list */
253 node->u.next = allocator->freelist;
254 allocator->freelist = node;
256 #ifdef DEBUG_DOUBLE_FREE
257 /* note that this thing was freed. */
260 else if (node->size == 0) {
261 /* damn thing was freed already. */
266 #ifdef DEBUG_DOUBLE_FREE
267 /* note that this thing was freed. */
272 apr_allocator_free(allocator->allocator, node->u.memnode);
277 /* ==================================================================== */
280 #ifdef SERF_DEBUG_BUCKET_USE
282 static read_status_t *find_read_status(
283 track_state_t *track,
284 const serf_bucket_t *bucket,
289 if (track->num_used) {
290 int count = track->num_used;
291 int idx = track->next_index;
293 /* Search backwards. In all likelihood, the bucket which just got
294 * read was read very recently.
296 while (count-- > 0) {
298 /* assert: track->num_used == TRACK_BUCKET_COUNT */
299 idx = track->num_used - 1;
301 if ((rs = &track->info[idx])->bucket == bucket) {
307 /* Only create a new read_status_t when asked. */
311 if (track->num_used < TRACK_BUCKET_COUNT) {
312 /* We're still filling up the ring. */
316 rs = &track->info[track->next_index];
318 rs->last = APR_SUCCESS; /* ### the right initial value? */
320 if (++track->next_index == TRACK_BUCKET_COUNT)
321 track->next_index = 0;
326 #endif /* SERF_DEBUG_BUCKET_USE */
329 apr_status_t serf_debug__record_read(
330 const serf_bucket_t *bucket,
333 #ifndef SERF_DEBUG_BUCKET_USE
337 track_state_t *track = bucket->allocator->track;
338 read_status_t *rs = find_read_status(track, bucket, 1);
340 /* Validate that the previous status value allowed for another read. */
341 if (APR_STATUS_IS_EAGAIN(rs->last) /* ### or APR_EOF? */) {
342 /* Somebody read when they weren't supposed to. Bail. */
346 /* Save the current status for later. */
354 void serf_debug__entered_loop(serf_bucket_alloc_t *allocator)
356 #ifdef SERF_DEBUG_BUCKET_USE
358 track_state_t *track = allocator->track;
359 read_status_t *rs = &track->info[0];
361 for ( ; track->num_used; --track->num_used, ++rs ) {
362 if (rs->last == APR_SUCCESS) {
363 /* Somebody should have read this bucket again. */
367 /* ### other status values? */
370 /* num_used was reset. also need to reset the next index. */
371 track->next_index = 0;
377 void serf_debug__closed_conn(serf_bucket_alloc_t *allocator)
379 #ifdef SERF_DEBUG_BUCKET_USE
381 /* Just reset the number used so that we don't examine the info[] */
382 allocator->track->num_used = 0;
383 allocator->track->next_index = 0;
389 void serf_debug__bucket_destroy(const serf_bucket_t *bucket)
391 #ifdef SERF_DEBUG_BUCKET_USE
393 track_state_t *track = bucket->allocator->track;
394 read_status_t *rs = find_read_status(track, bucket, 0);
396 if (rs != NULL && rs->last != APR_EOF) {
397 /* The bucket was destroyed before it was read to completion. */
399 /* Special exception for socket buckets. If a connection remains
400 * open, they are not read to completion.
402 if (SERF_BUCKET_IS_SOCKET(bucket))
405 /* Ditto for SSL Decrypt buckets. */
406 if (SERF_BUCKET_IS_SSL_DECRYPT(bucket))
409 /* Ditto for SSL Encrypt buckets. */
410 if (SERF_BUCKET_IS_SSL_ENCRYPT(bucket))
413 /* Ditto for barrier buckets. */
414 if (SERF_BUCKET_IS_BARRIER(bucket))
425 void serf_debug__bucket_alloc_check(
426 serf_bucket_alloc_t *allocator)
428 #ifdef SERF_DEBUG_BUCKET_USE
429 if (allocator->num_alloc != 0) {