2 * Copyright (c) 2000-2004 Sendmail, Inc. and its suppliers.
5 * By using this file, you agree to the terms and conditions set
6 * forth in the LICENSE file which can be found at the top level of
7 * the sendmail distribution.
11 SM_RCSID("@(#)$Id: rpool.c,v 1.28 2004/08/03 20:44:04 ca Exp $")
15 ** For documentation, see rpool.html
21 #include <sm/varargs.h>
25 #endif /* _FFR_PERF_RPOOL */
27 const char SmRpoolMagic[] = "sm_rpool";
32 char align[SM_ALIGN_SIZE];
35 static char *sm_rpool_allocblock_x __P((SM_RPOOL_T *, size_t));
36 static char *sm_rpool_allocblock __P((SM_RPOOL_T *, size_t));
43 #define BIG_OBJECT_RATIO 10
46 ** SM_RPOOL_ALLOCBLOCK_X -- allocate a new block for an rpool.
49 ** rpool -- rpool to which the block should be added.
50 ** size -- size of block.
56 ** F:sm_heap -- out of memory
60 sm_rpool_allocblock_x(rpool, size)
66 p = sm_malloc_x(sizeof(SM_POOLHDR_T) + size);
67 p->sm_pnext = rpool->sm_pools;
69 return (char*) p + sizeof(SM_POOLHDR_T);
73 ** SM_RPOOL_ALLOCBLOCK -- allocate a new block for an rpool.
76 ** rpool -- rpool to which the block should be added.
77 ** size -- size of block.
80 ** Pointer to block, NULL on failure.
84 sm_rpool_allocblock(rpool, size)
90 p = sm_malloc(sizeof(SM_POOLHDR_T) + size);
93 p->sm_pnext = rpool->sm_pools;
95 return (char*) p + sizeof(SM_POOLHDR_T);
99 ** SM_RPOOL_MALLOC_TAGGED_X -- allocate memory from rpool
102 ** rpool -- rpool from which memory should be allocated;
103 ** can be NULL, use sm_malloc() then.
104 ** size -- size of block.
106 ** line -- line number in file.
107 ** group -- heap group for debugging.
113 ** F:sm_heap -- out of memory
116 ** if size == 0 and the rpool is new (no memory
117 ** allocated yet) NULL is returned!
118 ** We could solve this by
119 ** - wasting 1 byte (size < avail)
120 ** - checking for rpool->sm_poolptr != NULL
121 ** - not asking for 0 sized buffer
126 sm_rpool_malloc_tagged_x(rpool, size, file, line, group)
132 #else /* SM_HEAP_CHECK */
133 sm_rpool_malloc_x(rpool, size)
136 #endif /* SM_HEAP_CHECK */
141 return sm_malloc_tagged_x(size, file, line, group);
143 /* Ensure that size is properly aligned. */
144 if (size & SM_ALIGN_BITS)
145 size = (size & ~SM_ALIGN_BITS) + SM_ALIGN_SIZE;
147 /* The common case. This is optimized for speed. */
148 if (size <= rpool->sm_poolavail)
150 ptr = rpool->sm_poolptr;
151 rpool->sm_poolptr += size;
152 rpool->sm_poolavail -= size;
157 ** The slow case: we need to call malloc.
158 ** The SM_REQUIRE assertion is deferred until now, for speed.
159 ** That's okay: we set rpool->sm_poolavail to 0 when we free an rpool,
160 ** so the common case code won't be triggered on a dangling pointer.
163 SM_REQUIRE(rpool->sm_magic == SmRpoolMagic);
166 ** If size > sm_poolsize, then malloc a new block especially for
167 ** this request. Future requests will be allocated from the
170 ** What if the current pool is mostly unallocated, and the current
171 ** request is larger than the available space, but < sm_poolsize?
172 ** If we discard the current pool, and start allocating from a new
173 ** pool, then we will be wasting a lot of space. For this reason,
174 ** we malloc a block just for the current request if size >
175 ** sm_bigobjectsize, where sm_bigobjectsize <= sm_poolsize.
176 ** Thus, the most space that we will waste at the end of a pool
177 ** is sm_bigobjectsize - 1.
180 if (size > rpool->sm_bigobjectsize)
183 ++rpool->sm_nbigblocks;
184 #endif /* _FFR_PERF_RPOOL */
185 return sm_rpool_allocblock_x(rpool, size);
187 SM_ASSERT(rpool->sm_bigobjectsize <= rpool->sm_poolsize);
188 ptr = sm_rpool_allocblock_x(rpool, rpool->sm_poolsize);
189 rpool->sm_poolptr = ptr + size;
190 rpool->sm_poolavail = rpool->sm_poolsize - size;
193 #endif /* _FFR_PERF_RPOOL */
198 ** SM_RPOOL_MALLOC_TAGGED -- allocate memory from rpool
201 ** rpool -- rpool from which memory should be allocated;
202 ** can be NULL, use sm_malloc() then.
203 ** size -- size of block.
205 ** line -- line number in file.
206 ** group -- heap group for debugging.
209 ** Pointer to block, NULL on failure.
212 ** if size == 0 and the rpool is new (no memory
213 ** allocated yet) NULL is returned!
214 ** We could solve this by
215 ** - wasting 1 byte (size < avail)
216 ** - checking for rpool->sm_poolptr != NULL
217 ** - not asking for 0 sized buffer
222 sm_rpool_malloc_tagged(rpool, size, file, line, group)
228 #else /* SM_HEAP_CHECK */
229 sm_rpool_malloc(rpool, size)
232 #endif /* SM_HEAP_CHECK */
237 return sm_malloc_tagged(size, file, line, group);
239 /* Ensure that size is properly aligned. */
240 if (size & SM_ALIGN_BITS)
241 size = (size & ~SM_ALIGN_BITS) + SM_ALIGN_SIZE;
243 /* The common case. This is optimized for speed. */
244 if (size <= rpool->sm_poolavail)
246 ptr = rpool->sm_poolptr;
247 rpool->sm_poolptr += size;
248 rpool->sm_poolavail -= size;
253 ** The slow case: we need to call malloc.
254 ** The SM_REQUIRE assertion is deferred until now, for speed.
255 ** That's okay: we set rpool->sm_poolavail to 0 when we free an rpool,
256 ** so the common case code won't be triggered on a dangling pointer.
259 SM_REQUIRE(rpool->sm_magic == SmRpoolMagic);
262 ** If size > sm_poolsize, then malloc a new block especially for
263 ** this request. Future requests will be allocated from the
266 ** What if the current pool is mostly unallocated, and the current
267 ** request is larger than the available space, but < sm_poolsize?
268 ** If we discard the current pool, and start allocating from a new
269 ** pool, then we will be wasting a lot of space. For this reason,
270 ** we malloc a block just for the current request if size >
271 ** sm_bigobjectsize, where sm_bigobjectsize <= sm_poolsize.
272 ** Thus, the most space that we will waste at the end of a pool
273 ** is sm_bigobjectsize - 1.
276 if (size > rpool->sm_bigobjectsize)
279 ++rpool->sm_nbigblocks;
280 #endif /* _FFR_PERF_RPOOL */
281 return sm_rpool_allocblock(rpool, size);
283 SM_ASSERT(rpool->sm_bigobjectsize <= rpool->sm_poolsize);
284 ptr = sm_rpool_allocblock(rpool, rpool->sm_poolsize);
287 rpool->sm_poolptr = ptr + size;
288 rpool->sm_poolavail = rpool->sm_poolsize - size;
291 #endif /* _FFR_PERF_RPOOL */
296 ** SM_RPOOL_NEW_X -- create a new rpool.
299 ** parent -- pointer to parent rpool, can be NULL.
302 ** Pointer to new rpool.
306 sm_rpool_new_x(parent)
311 rpool = sm_malloc_x(sizeof(SM_RPOOL_T));
313 rpool->sm_parentlink = NULL;
317 rpool->sm_parentlink = sm_rpool_attach_x(parent,
318 (SM_RPOOL_RFREE_T) sm_rpool_free,
325 rpool->sm_magic = SmRpoolMagic;
327 rpool->sm_poolsize = POOLSIZE - sizeof(SM_POOLHDR_T);
328 rpool->sm_bigobjectsize = rpool->sm_poolsize / BIG_OBJECT_RATIO;
329 rpool->sm_poolptr = NULL;
330 rpool->sm_poolavail = 0;
331 rpool->sm_pools = NULL;
333 rpool->sm_rptr = NULL;
334 rpool->sm_ravail = 0;
335 rpool->sm_rlists = NULL;
337 rpool->sm_nbigblocks = 0;
338 rpool->sm_npools = 0;
339 #endif /* _FFR_PERF_RPOOL */
345 ** SM_RPOOL_SETSIZES -- set sizes for rpool.
348 ** poolsize -- size of a single rpool block.
349 ** bigobjectsize -- if this size is exceeded, an individual
350 ** block is allocated (must be less or equal poolsize).
357 sm_rpool_setsizes(rpool, poolsize, bigobjectsize)
360 size_t bigobjectsize;
362 SM_REQUIRE(poolsize >= bigobjectsize);
364 poolsize = POOLSIZE - sizeof(SM_POOLHDR_T);
365 if (bigobjectsize == 0)
366 bigobjectsize = poolsize / BIG_OBJECT_RATIO;
367 rpool->sm_poolsize = poolsize;
368 rpool->sm_bigobjectsize = bigobjectsize;
372 ** SM_RPOOL_FREE -- free an rpool and release all of its resources.
375 ** rpool -- rpool to free.
385 SM_RLIST_T *rl, *rnext;
386 SM_RESOURCE_T *r, *rmax;
387 SM_POOLLINK_T *pp, *pnext;
393 ** It's important to free the resources before the memory pools,
394 ** because the resource free functions might modify the contents
395 ** of the memory pools.
398 rl = rpool->sm_rlists;
401 rmax = rpool->sm_rptr;
404 for (r = rl->sm_rvec; r < rmax; ++r)
406 if (r->sm_rfree != NULL)
407 r->sm_rfree(r->sm_rcontext);
409 rnext = rl->sm_rnext;
414 rmax = &rl->sm_rvec[SM_RLIST_MAX];
419 ** Now free the memory pools.
422 for (pp = rpool->sm_pools; pp != NULL; pp = pnext)
424 pnext = pp->sm_pnext;
429 ** Disconnect rpool from its parent.
432 if (rpool->sm_parentlink != NULL)
433 *rpool->sm_parentlink = NULL;
436 ** Setting these fields to zero means that any future to attempt
437 ** to use the rpool after it is freed will cause an assertion failure.
440 rpool->sm_magic = NULL;
441 rpool->sm_poolavail = 0;
442 rpool->sm_ravail = 0;
445 if (rpool->sm_nbigblocks > 0 || rpool->sm_npools > 1)
447 "perf: rpool=%lx, sm_nbigblocks=%d, sm_npools=%d",
448 (long) rpool, rpool->sm_nbigblocks, rpool->sm_npools);
449 rpool->sm_nbigblocks = 0;
450 rpool->sm_npools = 0;
451 #endif /* _FFR_PERF_RPOOL */
456 ** SM_RPOOL_ATTACH_X -- attach a resource to an rpool.
459 ** rpool -- rpool to which resource should be attached.
460 ** rfree -- function to call when rpool is freed.
461 ** rcontext -- argument for function to call when rpool is freed.
464 ** Pointer to allocated function.
467 ** F:sm_heap -- out of memory
471 sm_rpool_attach_x(rpool, rfree, rcontext)
473 SM_RPOOL_RFREE_T rfree;
479 SM_REQUIRE_ISA(rpool, SmRpoolMagic);
481 if (rpool->sm_ravail == 0)
483 rl = sm_malloc_x(sizeof(SM_RLIST_T));
484 rl->sm_rnext = rpool->sm_rlists;
485 rpool->sm_rlists = rl;
486 rpool->sm_rptr = rl->sm_rvec;
487 rpool->sm_ravail = SM_RLIST_MAX;
490 a = &rpool->sm_rptr->sm_rfree;
491 rpool->sm_rptr->sm_rfree = rfree;
492 rpool->sm_rptr->sm_rcontext = rcontext;
498 #if DO_NOT_USE_STRCPY
500 ** SM_RPOOL_STRDUP_X -- Create a copy of a C string
503 ** rpool -- rpool to use.
504 ** s -- the string to copy.
507 ** pointer to newly allocated string.
511 sm_rpool_strdup_x(rpool, s)
519 SM_ASSERT(l + 1 > l);
520 n = sm_rpool_malloc_x(rpool, l + 1);
521 sm_strlcpy(n, s, l + 1);
524 #endif /* DO_NOT_USE_STRCPY */