2 * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
3 * Copyright (c) 2000-2001 Jason Evans <jasone@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include "namespace.h"
30 #include <sys/types.h>
32 #include <sys/param.h>
33 #include <sys/queue.h>
36 #include "un-namespace.h"
38 #include "pthread_private.h"
40 /* Spare thread stack. */
42 LIST_ENTRY(stack) qe; /* Stack queue linkage. */
43 size_t stacksize; /* Stack size (rounded up). */
44 size_t guardsize; /* Guard size. */
45 void *stackaddr; /* Stack address. */
49 * Default sized (stack and guard) spare stack queue. Stacks are cached to
50 * avoid additional complexity managing mmap()ed stack regions. Spare stacks
51 * are used in LIFO order to increase cache locality.
53 static LIST_HEAD(, stack) _dstackq = LIST_HEAD_INITIALIZER(_dstackq);
56 * Miscellaneous sized (non-default stack and/or guard) spare stack queue.
57 * Stacks are cached to avoid additional complexity managing mmap()ed stack
58 * regions. This list is unordered, since ordering on both stack size and guard
59 * size would be more trouble than it's worth. Stacks are allocated from this
60 * cache on a first size match basis.
62 static LIST_HEAD(, stack) _mstackq = LIST_HEAD_INITIALIZER(_mstackq);
65 * Base address of the last stack allocated (including its red zone, if there is
66 * one). Stacks are allocated contiguously, starting beyond the top of the main
67 * stack. When a new stack is created, a red zone is typically created
68 * (actually, the red zone is simply left unmapped) above the top of the stack,
69 * such that the stack will not be able to grow all the way to the bottom of the
70 * next stack. This isn't fool-proof. It is possible for a stack to grow by a
71 * large amount, such that it grows into the next stack, and as long as the
72 * memory within the red zone is never accessed, nothing will prevent one thread
73 * stack from trouncing all over the next.
76 * . . . . . . . . . . . . . . . . . .
78 * | stack 3 | start of 3rd thread stack
79 * +-----------------------------------+
81 * | Red Zone (guard page) | red zone for 2nd thread
83 * +-----------------------------------+
84 * | stack 2 - _pthread_stack_default | top of 2nd thread stack
90 * +-----------------------------------+ <-- start of 2nd thread stack
92 * | Red Zone | red zone for 1st thread
94 * +-----------------------------------+
95 * | stack 1 - _pthread_stack_default | top of 1st thread stack
101 * +-----------------------------------+ <-- start of 1st thread stack
102 * | | (initial value of last_stack)
104 * | | red zone for main thread
105 * +-----------------------------------+
106 * | USRSTACK - _pthread_stack_initial | top of main thread stack
112 * +-----------------------------------+ <-- start of main thread stack
117 static void * last_stack;
120 _thread_stack_alloc(size_t stacksize, size_t guardsize)
123 struct stack *spare_stack;
127 * Round up stack size to nearest multiple of _pthread_page_size,
128 * so that mmap() * will work. If the stack size is not an even
129 * multiple, we end up initializing things such that there is unused
130 * space above the beginning of the stack, so the stack sits snugly
133 if (stacksize % _pthread_page_size != 0)
134 stack_size = ((stacksize / _pthread_page_size) + 1) *
137 stack_size = stacksize;
140 * If the stack and guard sizes are default, try to allocate a stack
141 * from the default-size stack cache:
143 if (stack_size == _pthread_stack_default &&
144 guardsize == _pthread_guard_default) {
146 * Use the garbage collector mutex for synchronization of the
149 if (_pthread_mutex_lock(&_gc_mutex) != 0)
150 PANIC("Cannot lock gc mutex");
152 if ((spare_stack = LIST_FIRST(&_dstackq)) != NULL) {
153 /* Use the spare stack. */
154 LIST_REMOVE(spare_stack, qe);
155 stack = spare_stack->stackaddr;
158 /* Unlock the garbage collector mutex. */
159 if (_pthread_mutex_unlock(&_gc_mutex) != 0)
160 PANIC("Cannot unlock gc mutex");
163 * The user specified a non-default stack and/or guard size, so try to
164 * allocate a stack from the non-default size stack cache, using the
165 * rounded up stack size (stack_size) in the search:
169 * Use the garbage collector mutex for synchronization of the
172 if (_pthread_mutex_lock(&_gc_mutex) != 0)
173 PANIC("Cannot lock gc mutex");
175 LIST_FOREACH(spare_stack, &_mstackq, qe) {
176 if (spare_stack->stacksize == stack_size &&
177 spare_stack->guardsize == guardsize) {
178 LIST_REMOVE(spare_stack, qe);
179 stack = spare_stack->stackaddr;
184 /* Unlock the garbage collector mutex. */
185 if (_pthread_mutex_unlock(&_gc_mutex) != 0)
186 PANIC("Cannot unlock gc mutex");
189 /* Check if a stack was not allocated from a stack cache: */
192 if (last_stack == NULL)
193 last_stack = _usrstack - _pthread_stack_initial -
194 _pthread_guard_default;
196 /* Allocate a new stack. */
197 stack = last_stack - stack_size;
200 * Even if stack allocation fails, we don't want to try to use
201 * this location again, so unconditionally decrement
202 * last_stack. Under normal operating conditions, the most
203 * likely reason for an mmap() error is a stack overflow of the
204 * adjacent thread stack.
206 last_stack -= (stack_size + guardsize);
209 if (mmap(stack, stack_size, PROT_READ | PROT_WRITE, MAP_STACK,
210 -1, 0) == MAP_FAILED)
217 /* This function must be called with _gc_mutex held. */
219 _thread_stack_free(void *stack, size_t stacksize, size_t guardsize)
221 struct stack *spare_stack;
223 spare_stack = (stack + stacksize - sizeof(struct stack));
224 /* Round stacksize up to nearest multiple of _pthread_page_size. */
225 if (stacksize % _pthread_page_size != 0) {
226 spare_stack->stacksize =
227 ((stacksize / _pthread_page_size) + 1) *
230 spare_stack->stacksize = stacksize;
231 spare_stack->guardsize = guardsize;
232 spare_stack->stackaddr = stack;
234 if (spare_stack->stacksize == _pthread_stack_default &&
235 spare_stack->guardsize == _pthread_guard_default) {
236 /* Default stack/guard size. */
237 LIST_INSERT_HEAD(&_dstackq, spare_stack, qe);
239 /* Non-default stack/guard size. */
240 LIST_INSERT_HEAD(&_mstackq, spare_stack, qe);