1 /* ====================================================================
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing,
13 * software distributed under the License is distributed on an
14 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 * KIND, either express or implied. See the License for the
16 * specific language governing permissions and limitations
18 * ====================================================================
23 #include <apr_pools.h>
26 #include "serf_bucket_util.h"
29 typedef struct node_header_t {
32 struct node_header_t *next; /* if size == 0 (freed/inactive) */
33 /* no data if size == STANDARD_NODE_SIZE */
34 apr_memnode_t *memnode; /* if size > STANDARD_NODE_SIZE */
38 /* The size of a node_header_t, properly aligned. Note that (normally)
39 * this macro will round the size to a multiple of 8 bytes. Keep this in
40 * mind when altering the node_header_t structure. Also, keep in mind that
41 * node_header_t is an overhead for every allocation performed through
42 * the serf_bucket_mem_alloc() function.
44 #define SIZEOF_NODE_HEADER_T APR_ALIGN_DEFAULT(sizeof(node_header_t))
47 /* STANDARD_NODE_SIZE is manually set to an allocation size that will
48 * capture most allocators performed via this API. It must be "large
49 * enough" to avoid lots of spillage to allocating directly from the
50 * apr_allocator associated with the bucket allocator. The apr_allocator
51 * has a minimum size of 8k, which can be expensive if you missed the
52 * STANDARD_NODE_SIZE by just a few bytes.
54 /* ### we should define some rules or ways to determine how to derive
55 * ### a "good" value for this. probably log some stats on allocs, then
56 * ### analyze them for size "misses". then find the balance point between
57 * ### wasted space due to min-size allocator, and wasted-space due to
58 * ### size-spill to the 8k minimum.
60 #define STANDARD_NODE_SIZE 128
62 /* When allocating a block of memory from the allocator, we should go for
63 * an 8k block, minus the overhead that the allocator needs.
65 #define ALLOC_AMT (8192 - APR_MEMNODE_T_SIZE)
67 /* Define DEBUG_DOUBLE_FREE if you're interested in debugging double-free
68 * calls to serf_bucket_mem_free().
70 #define DEBUG_DOUBLE_FREE
74 const serf_bucket_t *bucket;
78 #define TRACK_BUCKET_COUNT 100 /* track N buckets' status */
81 int next_index; /* info[] is a ring. next bucket goes at this idx. */
84 read_status_t info[TRACK_BUCKET_COUNT];
88 struct serf_bucket_alloc_t {
90 apr_allocator_t *allocator;
93 serf_unfreed_func_t unfreed;
96 apr_uint32_t num_alloc;
98 node_header_t *freelist; /* free STANDARD_NODE_SIZE blocks */
99 apr_memnode_t *blocks; /* blocks we allocated for subdividing */
101 track_state_t *track;
104 /* ==================================================================== */
107 static apr_status_t allocator_cleanup(void *data)
109 serf_bucket_alloc_t *allocator = data;
111 /* If we allocated anything, give it back. */
112 if (allocator->blocks) {
113 apr_allocator_free(allocator->allocator, allocator->blocks);
116 /* If we allocated our own allocator (?!), destroy it here. */
117 if (allocator->own_allocator) {
118 apr_allocator_destroy(allocator->allocator);
124 serf_bucket_alloc_t *serf_bucket_allocator_create(
126 serf_unfreed_func_t unfreed,
129 serf_bucket_alloc_t *allocator = apr_pcalloc(pool, sizeof(*allocator));
131 allocator->pool = pool;
132 allocator->allocator = apr_pool_allocator_get(pool);
133 if (allocator->allocator == NULL) {
134 /* This most likely means pools are running in debug mode, create our
135 * own allocator to deal with memory ourselves */
136 apr_allocator_create(&allocator->allocator);
137 allocator->own_allocator = 1;
139 allocator->unfreed = unfreed;
140 allocator->unfreed_baton = unfreed_baton;
142 #ifdef SERF_DEBUG_BUCKET_USE
144 track_state_t *track;
146 track = allocator->track = apr_palloc(pool, sizeof(*allocator->track));
147 track->next_index = 0;
152 /* NOTE: On a fork/exec, the child won't bother cleaning up memory.
153 This is just fine... the memory will go away at exec.
155 NOTE: If the child will NOT perform an exec, then the parent or
156 the child will need to decide who to clean up any
157 outstanding connection/buckets (as appropriate). */
158 apr_pool_cleanup_register(pool, allocator,
159 allocator_cleanup, apr_pool_cleanup_null);
164 apr_pool_t *serf_bucket_allocator_get_pool(
165 const serf_bucket_alloc_t *allocator)
167 return allocator->pool;
171 void *serf_bucket_mem_alloc(
172 serf_bucket_alloc_t *allocator,
177 ++allocator->num_alloc;
179 size += SIZEOF_NODE_HEADER_T;
180 if (size <= STANDARD_NODE_SIZE) {
181 if (allocator->freelist) {
182 /* just pull a node off our freelist */
183 node = allocator->freelist;
184 allocator->freelist = node->u.next;
185 #ifdef DEBUG_DOUBLE_FREE
186 /* When we free an item, we set its size to zero. Thus, when
187 * we return it to the caller, we must ensure the size is set
190 node->size = STANDARD_NODE_SIZE;
194 apr_memnode_t *active = allocator->blocks;
197 || active->first_avail + STANDARD_NODE_SIZE >= active->endp) {
198 apr_memnode_t *head = allocator->blocks;
200 /* ran out of room. grab another block. */
201 active = apr_allocator_alloc(allocator->allocator, ALLOC_AMT);
203 /* System couldn't provide us with memory. */
207 /* link the block into our tracking list */
208 allocator->blocks = active;
212 node = (node_header_t *)active->first_avail;
213 node->size = STANDARD_NODE_SIZE;
214 active->first_avail += STANDARD_NODE_SIZE;
218 apr_memnode_t *memnode = apr_allocator_alloc(allocator->allocator,
224 node = (node_header_t *)memnode->first_avail;
225 node->u.memnode = memnode;
229 return ((char *)node) + SIZEOF_NODE_HEADER_T;
233 void *serf_bucket_mem_calloc(
234 serf_bucket_alloc_t *allocator,
238 mem = serf_bucket_mem_alloc(allocator, size);
241 memset(mem, 0, size);
246 void serf_bucket_mem_free(
247 serf_bucket_alloc_t *allocator,
252 --allocator->num_alloc;
254 node = (node_header_t *)((char *)block - SIZEOF_NODE_HEADER_T);
256 if (node->size == STANDARD_NODE_SIZE) {
257 /* put the node onto our free list */
258 node->u.next = allocator->freelist;
259 allocator->freelist = node;
261 #ifdef DEBUG_DOUBLE_FREE
262 /* note that this thing was freed. */
265 else if (node->size == 0) {
266 /* damn thing was freed already. */
271 #ifdef DEBUG_DOUBLE_FREE
272 /* note that this thing was freed. */
277 apr_allocator_free(allocator->allocator, node->u.memnode);
282 /* ==================================================================== */
285 #ifdef SERF_DEBUG_BUCKET_USE
287 static read_status_t *find_read_status(
288 track_state_t *track,
289 const serf_bucket_t *bucket,
294 if (track->num_used) {
295 int count = track->num_used;
296 int idx = track->next_index;
298 /* Search backwards. In all likelihood, the bucket which just got
299 * read was read very recently.
301 while (count-- > 0) {
303 /* assert: track->num_used == TRACK_BUCKET_COUNT */
304 idx = track->num_used - 1;
306 if ((rs = &track->info[idx])->bucket == bucket) {
312 /* Only create a new read_status_t when asked. */
316 if (track->num_used < TRACK_BUCKET_COUNT) {
317 /* We're still filling up the ring. */
321 rs = &track->info[track->next_index];
323 rs->last = APR_SUCCESS; /* ### the right initial value? */
325 if (++track->next_index == TRACK_BUCKET_COUNT)
326 track->next_index = 0;
331 #endif /* SERF_DEBUG_BUCKET_USE */
334 apr_status_t serf_debug__record_read(
335 const serf_bucket_t *bucket,
338 #ifndef SERF_DEBUG_BUCKET_USE
342 track_state_t *track = bucket->allocator->track;
343 read_status_t *rs = find_read_status(track, bucket, 1);
345 /* Validate that the previous status value allowed for another read. */
346 if (APR_STATUS_IS_EAGAIN(rs->last) /* ### or APR_EOF? */) {
347 /* Somebody read when they weren't supposed to. Bail. */
351 /* Save the current status for later. */
359 void serf_debug__entered_loop(serf_bucket_alloc_t *allocator)
361 #ifdef SERF_DEBUG_BUCKET_USE
363 track_state_t *track = allocator->track;
364 read_status_t *rs = &track->info[0];
366 for ( ; track->num_used; --track->num_used, ++rs ) {
367 if (rs->last == APR_SUCCESS) {
368 /* Somebody should have read this bucket again. */
372 /* ### other status values? */
375 /* num_used was reset. also need to reset the next index. */
376 track->next_index = 0;
382 void serf_debug__closed_conn(serf_bucket_alloc_t *allocator)
384 #ifdef SERF_DEBUG_BUCKET_USE
386 /* Just reset the number used so that we don't examine the info[] */
387 allocator->track->num_used = 0;
388 allocator->track->next_index = 0;
394 void serf_debug__bucket_destroy(const serf_bucket_t *bucket)
396 #ifdef SERF_DEBUG_BUCKET_USE
398 track_state_t *track = bucket->allocator->track;
399 read_status_t *rs = find_read_status(track, bucket, 0);
401 if (rs != NULL && rs->last != APR_EOF) {
402 /* The bucket was destroyed before it was read to completion. */
404 /* Special exception for socket buckets. If a connection remains
405 * open, they are not read to completion.
407 if (SERF_BUCKET_IS_SOCKET(bucket))
410 /* Ditto for SSL Decrypt buckets. */
411 if (SERF_BUCKET_IS_SSL_DECRYPT(bucket))
414 /* Ditto for SSL Encrypt buckets. */
415 if (SERF_BUCKET_IS_SSL_ENCRYPT(bucket))
418 /* Ditto for barrier buckets. */
419 if (SERF_BUCKET_IS_BARRIER(bucket))
430 void serf_debug__bucket_alloc_check(
431 serf_bucket_alloc_t *allocator)
433 #ifdef SERF_DEBUG_BUCKET_USE
434 if (allocator->num_alloc != 0) {