1 /* $OpenBSD: monitor_mm.c,v 1.17 2013/05/17 00:13:13 djm Exp $ */
3 * Copyright 2002 Niels Provos <provos@citi.umich.edu>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/types.h>
30 #ifdef HAVE_SYS_MMAN_H
33 #include <sys/param.h>
34 #include "openbsd-compat/sys-tree.h"
44 #include "monitor_mm.h"
47 mm_compare(struct mm_share *a, struct mm_share *b)
49 long diff = (char *)a->address - (char *)b->address;
59 RB_GENERATE(mmtree, mm_share, next, mm_compare)
61 static struct mm_share *
62 mm_make_entry(struct mm_master *mm, struct mmtree *head,
63 void *address, size_t size)
65 struct mm_share *tmp, *tmp2;
67 if (mm->mmalloc == NULL)
68 tmp = xmalloc(sizeof(struct mm_share));
70 tmp = mm_xmalloc(mm->mmalloc, sizeof(struct mm_share));
71 tmp->address = address;
74 tmp2 = RB_INSERT(mmtree, head, tmp);
76 fatal("mm_make_entry(%p): double address %p->%p(%lu)",
77 mm, tmp2, address, (u_long)size);
82 /* Creates a shared memory area of a certain size */
85 mm_create(struct mm_master *mmalloc, size_t size)
91 mm = xmalloc(sizeof(struct mm_master));
93 mm = mm_xmalloc(mmalloc, sizeof(struct mm_master));
96 * If the memory map has a mm_master it can be completely
97 * shared including authentication between the child
100 mm->mmalloc = mmalloc;
102 address = xmmap(size);
103 if (address == (void *)MAP_FAILED)
104 fatal("mmap(%lu): %s", (u_long)size, strerror(errno));
106 mm->address = address;
109 RB_INIT(&mm->rb_free);
110 RB_INIT(&mm->rb_allocated);
112 mm_make_entry(mm, &mm->rb_free, address, size);
117 /* Frees either the allocated or the free list */
120 mm_freelist(struct mm_master *mmalloc, struct mmtree *head)
122 struct mm_share *mms, *next;
124 for (mms = RB_ROOT(head); mms; mms = next) {
125 next = RB_NEXT(mmtree, head, mms);
126 RB_REMOVE(mmtree, head, mms);
130 mm_free(mmalloc, mms);
134 /* Destroys a memory mapped area */
137 mm_destroy(struct mm_master *mm)
139 mm_freelist(mm->mmalloc, &mm->rb_free);
140 mm_freelist(mm->mmalloc, &mm->rb_allocated);
143 if (munmap(mm->address, mm->size) == -1)
144 fatal("munmap(%p, %lu): %s", mm->address, (u_long)mm->size,
147 fatal("%s: UsePrivilegeSeparation=yes and Compression=yes not supported",
150 if (mm->mmalloc == NULL)
153 mm_free(mm->mmalloc, mm);
157 mm_xmalloc(struct mm_master *mm, size_t size)
161 address = mm_malloc(mm, size);
163 fatal("%s: mm_malloc(%lu)", __func__, (u_long)size);
168 /* Allocates data from a memory mapped area */
171 mm_malloc(struct mm_master *mm, size_t size)
173 struct mm_share *mms, *tmp;
176 fatal("mm_malloc: try to allocate 0 space");
177 if (size > SIZE_T_MAX - MM_MINSIZE + 1)
178 fatal("mm_malloc: size too big");
180 size = ((size + (MM_MINSIZE - 1)) / MM_MINSIZE) * MM_MINSIZE;
182 RB_FOREACH(mms, mmtree, &mm->rb_free) {
183 if (mms->size >= size)
191 memset(mms->address, 0xd0, size);
193 tmp = mm_make_entry(mm, &mm->rb_allocated, mms->address, size);
195 /* Does not change order in RB tree */
197 mms->address = (u_char *)mms->address + size;
199 if (mms->size == 0) {
200 RB_REMOVE(mmtree, &mm->rb_free, mms);
201 if (mm->mmalloc == NULL)
204 mm_free(mm->mmalloc, mms);
207 return (tmp->address);
210 /* Frees memory in a memory mapped area */
213 mm_free(struct mm_master *mm, void *address)
215 struct mm_share *mms, *prev, tmp;
217 tmp.address = address;
218 mms = RB_FIND(mmtree, &mm->rb_allocated, &tmp);
220 fatal("mm_free(%p): can not find %p", mm, address);
223 memset(mms->address, 0xd0, mms->size);
225 /* Remove from allocated list and insert in free list */
226 RB_REMOVE(mmtree, &mm->rb_allocated, mms);
227 if (RB_INSERT(mmtree, &mm->rb_free, mms) != NULL)
228 fatal("mm_free(%p): double address %p", mm, address);
230 /* Find previous entry */
232 if (RB_LEFT(prev, next)) {
233 prev = RB_LEFT(prev, next);
234 while (RB_RIGHT(prev, next))
235 prev = RB_RIGHT(prev, next);
237 if (RB_PARENT(prev, next) &&
238 (prev == RB_RIGHT(RB_PARENT(prev, next), next)))
239 prev = RB_PARENT(prev, next);
241 while (RB_PARENT(prev, next) &&
242 (prev == RB_LEFT(RB_PARENT(prev, next), next)))
243 prev = RB_PARENT(prev, next);
244 prev = RB_PARENT(prev, next);
248 /* Check if range does not overlap */
249 if (prev != NULL && MM_ADDRESS_END(prev) > address)
250 fatal("mm_free: memory corruption: %p(%lu) > %p",
251 prev->address, (u_long)prev->size, address);
253 /* See if we can merge backwards */
254 if (prev != NULL && MM_ADDRESS_END(prev) == address) {
255 prev->size += mms->size;
256 RB_REMOVE(mmtree, &mm->rb_free, mms);
257 if (mm->mmalloc == NULL)
260 mm_free(mm->mmalloc, mms);
267 /* Check if we can merge forwards */
268 mms = RB_NEXT(mmtree, &mm->rb_free, prev);
272 if (MM_ADDRESS_END(prev) > mms->address)
273 fatal("mm_free: memory corruption: %p < %p(%lu)",
274 mms->address, prev->address, (u_long)prev->size);
275 if (MM_ADDRESS_END(prev) != mms->address)
278 prev->size += mms->size;
279 RB_REMOVE(mmtree, &mm->rb_free, mms);
281 if (mm->mmalloc == NULL)
284 mm_free(mm->mmalloc, mms);
288 mm_sync_list(struct mmtree *oldtree, struct mmtree *newtree,
289 struct mm_master *mm, struct mm_master *mmold)
291 struct mm_master *mmalloc = mm->mmalloc;
292 struct mm_share *mms, *new;
295 RB_FOREACH(mms, mmtree, oldtree) {
296 /* Check the values */
297 mm_memvalid(mmold, mms, sizeof(struct mm_share));
298 mm_memvalid(mm, mms->address, mms->size);
300 new = mm_xmalloc(mmalloc, sizeof(struct mm_share));
301 memcpy(new, mms, sizeof(struct mm_share));
302 RB_INSERT(mmtree, newtree, new);
307 mm_share_sync(struct mm_master **pmm, struct mm_master **pmmalloc)
309 struct mm_master *mm;
310 struct mm_master *mmalloc;
311 struct mm_master *mmold;
312 struct mmtree rb_free, rb_allocated;
314 debug3("%s: Share sync", __func__);
318 mm_memvalid(mmold, mm, sizeof(*mm));
320 mmalloc = mm_create(NULL, mm->size);
321 mm = mm_xmalloc(mmalloc, sizeof(struct mm_master));
322 memcpy(mm, *pmm, sizeof(struct mm_master));
323 mm->mmalloc = mmalloc;
325 rb_free = mm->rb_free;
326 rb_allocated = mm->rb_allocated;
328 RB_INIT(&mm->rb_free);
329 RB_INIT(&mm->rb_allocated);
331 mm_sync_list(&rb_free, &mm->rb_free, mm, mmold);
332 mm_sync_list(&rb_allocated, &mm->rb_allocated, mm, mmold);
339 debug3("%s: Share sync end", __func__);
343 mm_memvalid(struct mm_master *mm, void *address, size_t size)
345 void *end = (u_char *)address + size;
347 if (address < mm->address)
348 fatal("mm_memvalid: address too small: %p", address);
350 fatal("mm_memvalid: end < address: %p < %p", end, address);
351 if (end > (void *)((u_char *)mm->address + mm->size))
352 fatal("mm_memvalid: address too large: %p", address);