2 * Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
3 * Copyright (c) 2000-2001 Jason Evans <jasone@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include "namespace.h"
31 #include <sys/types.h>
33 #include <sys/queue.h>
36 #include "un-namespace.h"
37 #include "thr_private.h"
39 /* Spare thread stack. */
41 LIST_ENTRY(stack) qe; /* Stack queue linkage. */
42 size_t stacksize; /* Stack size (rounded up). */
43 size_t guardsize; /* Guard size. */
44 void *stackaddr; /* Stack address. */
48 * Default sized (stack and guard) spare stack queue. Stacks are cached
49 * to avoid additional complexity managing mmap()ed stack regions. Spare
50 * stacks are used in LIFO order to increase cache locality.
52 static LIST_HEAD(, stack) dstackq = LIST_HEAD_INITIALIZER(dstackq);
55 * Miscellaneous sized (non-default stack and/or guard) spare stack queue.
56 * Stacks are cached to avoid additional complexity managing mmap()ed
57 * stack regions. This list is unordered, since ordering on both stack
58 * size and guard size would be more trouble than it's worth. Stacks are
59 * allocated from this cache on a first size match basis.
61 static LIST_HEAD(, stack) mstackq = LIST_HEAD_INITIALIZER(mstackq);
64 * Base address of the last stack allocated (including its red zone, if
65 * there is one). Stacks are allocated contiguously, starting beyond the
66 * top of the main stack. When a new stack is created, a red zone is
67 * typically created (actually, the red zone is mapped with PROT_NONE) above
68 * the top of the stack, such that the stack will not be able to grow all
69 * the way to the bottom of the next stack. This isn't fool-proof. It is
70 * possible for a stack to grow by a large amount, such that it grows into
71 * the next stack, and as long as the memory within the red zone is never
72 * accessed, nothing will prevent one thread stack from trouncing all over
76 * . . . . . . . . . . . . . . . . . .
78 * | stack 3 | start of 3rd thread stack
79 * +-----------------------------------+
81 * | Red Zone (guard page) | red zone for 2nd thread
83 * +-----------------------------------+
84 * | stack 2 - PTHREAD_STACK_DEFAULT | top of 2nd thread stack
90 * +-----------------------------------+ <-- start of 2nd thread stack
92 * | Red Zone | red zone for 1st thread
94 * +-----------------------------------+
95 * | stack 1 - PTHREAD_STACK_DEFAULT | top of 1st thread stack
101 * +-----------------------------------+ <-- start of 1st thread stack
102 * | | (initial value of last_stack)
104 * | | red zone for main thread
105 * +-----------------------------------+
106 * | USRSTACK - PTHREAD_STACK_INITIAL | top of main thread stack
112 * +-----------------------------------+ <-- start of main thread stack
117 static void *last_stack = NULL;
120 * Round size up to the nearest multiple of
124 round_up(size_t size)
126 if (size % _thr_page_size != 0)
127 size = ((size / _thr_page_size) + 1) *
133 _thr_stack_alloc(struct pthread_attr *attr)
135 struct stack *spare_stack;
143 * Round up stack size to nearest multiple of _thr_page_size so
144 * that mmap() * will work. If the stack size is not an even
145 * multiple, we end up initializing things such that there is
146 * unused space above the beginning of the stack, so the stack
147 * sits snugly against its guard.
149 stacksize = round_up(attr->stacksize_attr);
150 guardsize = round_up(attr->guardsize_attr);
152 attr->stackaddr_attr = NULL;
153 attr->flags &= ~THR_STACK_USER;
156 * Use the garbage collector lock for synchronization of the
157 * spare stack lists and allocations from usrstack.
159 crit = _kse_critical_enter();
160 curkse = _get_curkse();
161 KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
163 * If the stack and guard sizes are default, try to allocate a stack
164 * from the default-size stack cache:
166 if ((stacksize == _thr_stack_default) &&
167 (guardsize == _thr_guard_default)) {
168 if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) {
169 /* Use the spare stack. */
170 LIST_REMOVE(spare_stack, qe);
171 attr->stackaddr_attr = spare_stack->stackaddr;
175 * The user specified a non-default stack and/or guard size, so try to
176 * allocate a stack from the non-default size stack cache, using the
177 * rounded up stack size (stack_size) in the search:
180 LIST_FOREACH(spare_stack, &mstackq, qe) {
181 if (spare_stack->stacksize == stacksize &&
182 spare_stack->guardsize == guardsize) {
183 LIST_REMOVE(spare_stack, qe);
184 attr->stackaddr_attr = spare_stack->stackaddr;
189 if (attr->stackaddr_attr != NULL) {
190 /* A cached stack was found. Release the lock. */
191 KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
192 _kse_critical_leave(crit);
195 /* Allocate a stack from usrstack. */
196 if (last_stack == NULL)
197 last_stack = (void *)((uintptr_t)_usrstack -
198 (uintptr_t)_thr_stack_initial -
199 (uintptr_t)_thr_guard_default);
201 /* Allocate a new stack. */
202 stackaddr = (void *)((uintptr_t)last_stack -
203 (uintptr_t)stacksize - (uintptr_t)guardsize);
206 * Even if stack allocation fails, we don't want to try to
207 * use this location again, so unconditionally decrement
208 * last_stack. Under normal operating conditions, the most
209 * likely reason for an mmap() error is a stack overflow of
210 * the adjacent thread stack.
212 last_stack = (void *)((uintptr_t)last_stack -
213 (uintptr_t)(stacksize + guardsize));
215 /* Release the lock before mmap'ing it. */
216 KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
217 _kse_critical_leave(crit);
219 /* Map the stack and guard page together, and split guard
220 page from allocated space: */
221 if ((stackaddr = mmap(stackaddr, stacksize+guardsize,
222 PROT_READ | PROT_WRITE, MAP_STACK,
223 -1, 0)) != MAP_FAILED &&
225 mprotect(stackaddr, guardsize, PROT_NONE) == 0)) {
226 stackaddr += guardsize;
228 if (stackaddr != MAP_FAILED)
229 munmap(stackaddr, stacksize + guardsize);
232 attr->stackaddr_attr = stackaddr;
234 if (attr->stackaddr_attr != NULL)
240 /* This function must be called with _thread_list_lock held. */
242 _thr_stack_free(struct pthread_attr *attr)
244 struct stack *spare_stack;
246 if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0)
247 && (attr->stackaddr_attr != NULL)) {
248 spare_stack = (struct stack *)((uintptr_t)attr->stackaddr_attr
249 + (uintptr_t)attr->stacksize_attr - sizeof(struct stack));
250 spare_stack->stacksize = round_up(attr->stacksize_attr);
251 spare_stack->guardsize = round_up(attr->guardsize_attr);
252 spare_stack->stackaddr = attr->stackaddr_attr;
254 if (spare_stack->stacksize == _thr_stack_default &&
255 spare_stack->guardsize == _thr_guard_default) {
256 /* Default stack/guard size. */
257 LIST_INSERT_HEAD(&dstackq, spare_stack, qe);
259 /* Non-default stack/guard size. */
260 LIST_INSERT_HEAD(&mstackq, spare_stack, qe);
262 attr->stackaddr_attr = NULL;