]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - lib/libthr/thread/thr_list.c
Upgrade to 1.8.1.
[FreeBSD/FreeBSD.git] / lib / libthr / thread / thr_list.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
5  * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org>
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/types.h>
34 #include <sys/queue.h>
35
36 #include <stdlib.h>
37 #include <string.h>
38 #include <pthread.h>
39
40 #include "libc_private.h"
41 #include "thr_private.h"
42
43 /*#define DEBUG_THREAD_LIST */
44 #ifdef DEBUG_THREAD_LIST
45 #define DBG_MSG         stdout_debug
46 #else
47 #define DBG_MSG(x...)
48 #endif
49
50 #define MAX_THREADS             100000
51
52 /*
53  * Define a high water mark for the maximum number of threads that
54  * will be cached.  Once this level is reached, any extra threads
55  * will be free()'d.
56  */
57 #define MAX_CACHED_THREADS      100
58
59 /*
60  * We've got to keep track of everything that is allocated, not only
61  * to have a speedy free list, but also so they can be deallocated
62  * after a fork().
63  */
64 static TAILQ_HEAD(, pthread)    free_threadq;
65 static struct umutex            free_thread_lock = DEFAULT_UMUTEX;
66 static struct umutex            tcb_lock = DEFAULT_UMUTEX;
67 static int                      free_thread_count = 0;
68 static int                      inited = 0;
69 static int                      total_threads;
70
71 LIST_HEAD(thread_hash_head, pthread);
72 #define HASH_QUEUES     128
73 static struct thread_hash_head  thr_hashtable[HASH_QUEUES];
74 #define THREAD_HASH(thrd)       (((unsigned long)thrd >> 8) % HASH_QUEUES)
75
76 static void thr_destroy(struct pthread *curthread, struct pthread *thread);
77
78 void
79 _thr_list_init(void)
80 {
81         int i;
82
83         _gc_count = 0;
84         total_threads = 1;
85         _thr_urwlock_init(&_thr_list_lock);
86         TAILQ_INIT(&_thread_list);
87         TAILQ_INIT(&free_threadq);
88         _thr_umutex_init(&free_thread_lock);
89         _thr_umutex_init(&tcb_lock);
90         if (inited) {
91                 for (i = 0; i < HASH_QUEUES; ++i)
92                         LIST_INIT(&thr_hashtable[i]);
93         }
94         inited = 1;
95 }
96
97 void
98 _thr_gc(struct pthread *curthread)
99 {
100         struct pthread *td, *td_next;
101         TAILQ_HEAD(, pthread) worklist;
102
103         TAILQ_INIT(&worklist);
104         THREAD_LIST_WRLOCK(curthread);
105
106         /* Check the threads waiting for GC. */
107         TAILQ_FOREACH_SAFE(td, &_thread_gc_list, gcle, td_next) {
108                 if (td->tid != TID_TERMINATED) {
109                         /* make sure we are not still in userland */
110                         continue;
111                 }
112                 _thr_stack_free(&td->attr);
113                 THR_GCLIST_REMOVE(td);
114                 TAILQ_INSERT_HEAD(&worklist, td, gcle);
115         }
116         THREAD_LIST_UNLOCK(curthread);
117
118         while ((td = TAILQ_FIRST(&worklist)) != NULL) {
119                 TAILQ_REMOVE(&worklist, td, gcle);
120                 /*
121                  * XXX we don't free initial thread, because there might
122                  * have some code referencing initial thread.
123                  */
124                 if (td == _thr_initial) {
125                         DBG_MSG("Initial thread won't be freed\n");
126                         continue;
127                 }
128
129                 _thr_free(curthread, td);
130         }
131 }
132
133 struct pthread *
134 _thr_alloc(struct pthread *curthread)
135 {
136         struct pthread  *thread = NULL;
137         struct tcb      *tcb;
138
139         if (curthread != NULL) {
140                 if (GC_NEEDED())
141                         _thr_gc(curthread);
142                 if (free_thread_count > 0) {
143                         THR_LOCK_ACQUIRE(curthread, &free_thread_lock);
144                         if ((thread = TAILQ_FIRST(&free_threadq)) != NULL) {
145                                 TAILQ_REMOVE(&free_threadq, thread, tle);
146                                 free_thread_count--;
147                         }
148                         THR_LOCK_RELEASE(curthread, &free_thread_lock);
149                 }
150         }
151         if (thread == NULL) {
152                 if (total_threads > MAX_THREADS)
153                         return (NULL);
154                 atomic_fetchadd_int(&total_threads, 1);
155                 thread = calloc(1, sizeof(struct pthread));
156                 if (thread == NULL) {
157                         atomic_fetchadd_int(&total_threads, -1);
158                         return (NULL);
159                 }
160                 if ((thread->sleepqueue = _sleepq_alloc()) == NULL ||
161                     (thread->wake_addr = _thr_alloc_wake_addr()) == NULL) {
162                         thr_destroy(curthread, thread);
163                         atomic_fetchadd_int(&total_threads, -1);
164                         return (NULL);
165                 }
166         } else {
167                 bzero(&thread->_pthread_startzero, 
168                         __rangeof(struct pthread, _pthread_startzero, _pthread_endzero));
169         }
170         if (curthread != NULL) {
171                 THR_LOCK_ACQUIRE(curthread, &tcb_lock);
172                 tcb = _tcb_ctor(thread, 0 /* not initial tls */);
173                 THR_LOCK_RELEASE(curthread, &tcb_lock);
174         } else {
175                 tcb = _tcb_ctor(thread, 1 /* initial tls */);
176         }
177         if (tcb != NULL) {
178                 thread->tcb = tcb;
179         } else {
180                 thr_destroy(curthread, thread);
181                 atomic_fetchadd_int(&total_threads, -1);
182                 thread = NULL;
183         }
184         return (thread);
185 }
186
187 void
188 _thr_free(struct pthread *curthread, struct pthread *thread)
189 {
190         DBG_MSG("Freeing thread %p\n", thread);
191
192         /*
193          * Always free tcb, as we only know it is part of RTLD TLS
194          * block, but don't know its detail and can not assume how
195          * it works, so better to avoid caching it here.
196          */
197         if (curthread != NULL) {
198                 THR_LOCK_ACQUIRE(curthread, &tcb_lock);
199                 _tcb_dtor(thread->tcb);
200                 THR_LOCK_RELEASE(curthread, &tcb_lock);
201         } else {
202                 _tcb_dtor(thread->tcb);
203         }
204         thread->tcb = NULL;
205         if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS)) {
206                 thr_destroy(curthread, thread);
207                 atomic_fetchadd_int(&total_threads, -1);
208         } else {
209                 /*
210                  * Add the thread to the free thread list, this also avoids
211                  * pthread id is reused too quickly, may help some buggy apps.
212                  */
213                 THR_LOCK_ACQUIRE(curthread, &free_thread_lock);
214                 TAILQ_INSERT_TAIL(&free_threadq, thread, tle);
215                 free_thread_count++;
216                 THR_LOCK_RELEASE(curthread, &free_thread_lock);
217         }
218 }
219
220 static void
221 thr_destroy(struct pthread *curthread __unused, struct pthread *thread)
222 {
223         if (thread->sleepqueue != NULL)
224                 _sleepq_free(thread->sleepqueue);
225         if (thread->wake_addr != NULL)
226                 _thr_release_wake_addr(thread->wake_addr);
227         free(thread);
228 }
229
230 /*
231  * Add the thread to the list of all threads and increment
232  * number of active threads.
233  */
234 void
235 _thr_link(struct pthread *curthread, struct pthread *thread)
236 {
237         THREAD_LIST_WRLOCK(curthread);
238         THR_LIST_ADD(thread);
239         THREAD_LIST_UNLOCK(curthread);
240         atomic_add_int(&_thread_active_threads, 1);
241 }
242
243 /*
244  * Remove an active thread.
245  */
246 void
247 _thr_unlink(struct pthread *curthread, struct pthread *thread)
248 {
249         THREAD_LIST_WRLOCK(curthread);
250         THR_LIST_REMOVE(thread);
251         THREAD_LIST_UNLOCK(curthread);
252         atomic_add_int(&_thread_active_threads, -1);
253 }
254
255 void
256 _thr_hash_add(struct pthread *thread)
257 {
258         struct thread_hash_head *head;
259
260         head = &thr_hashtable[THREAD_HASH(thread)];
261         LIST_INSERT_HEAD(head, thread, hle);
262 }
263
264 void
265 _thr_hash_remove(struct pthread *thread)
266 {
267         LIST_REMOVE(thread, hle);
268 }
269
270 struct pthread *
271 _thr_hash_find(struct pthread *thread)
272 {
273         struct pthread *td;
274         struct thread_hash_head *head;
275
276         head = &thr_hashtable[THREAD_HASH(thread)];
277         LIST_FOREACH(td, head, hle) {
278                 if (td == thread)
279                         return (thread);
280         }
281         return (NULL);
282 }
283
284 /*
285  * Find a thread in the linked list of active threads and add a reference
286  * to it.  Threads with positive reference counts will not be deallocated
287  * until all references are released.
288  */
289 int
290 _thr_ref_add(struct pthread *curthread, struct pthread *thread,
291     int include_dead)
292 {
293         int ret;
294
295         if (thread == NULL)
296                 /* Invalid thread: */
297                 return (EINVAL);
298
299         if ((ret = _thr_find_thread(curthread, thread, include_dead)) == 0) {
300                 thread->refcount++;
301                 THR_CRITICAL_ENTER(curthread);
302                 THR_THREAD_UNLOCK(curthread, thread);
303         }
304
305         /* Return zero if the thread exists: */
306         return (ret);
307 }
308
309 void
310 _thr_ref_delete(struct pthread *curthread, struct pthread *thread)
311 {
312         THR_THREAD_LOCK(curthread, thread);
313         thread->refcount--;
314         _thr_try_gc(curthread, thread);
315         THR_CRITICAL_LEAVE(curthread);
316 }
317
318 /* entered with thread lock held, exit with thread lock released */
319 void
320 _thr_try_gc(struct pthread *curthread, struct pthread *thread)
321 {
322         if (THR_SHOULD_GC(thread)) {
323                 THR_REF_ADD(curthread, thread);
324                 THR_THREAD_UNLOCK(curthread, thread);
325                 THREAD_LIST_WRLOCK(curthread);
326                 THR_THREAD_LOCK(curthread, thread);
327                 THR_REF_DEL(curthread, thread);
328                 if (THR_SHOULD_GC(thread)) {
329                         THR_LIST_REMOVE(thread);
330                         THR_GCLIST_ADD(thread);
331                 }
332                 THR_THREAD_UNLOCK(curthread, thread);
333                 THREAD_LIST_UNLOCK(curthread);
334         } else {
335                 THR_THREAD_UNLOCK(curthread, thread);
336         }
337 }
338
339 /* return with thread lock held if thread is found */
340 int
341 _thr_find_thread(struct pthread *curthread, struct pthread *thread,
342     int include_dead)
343 {
344         struct pthread *pthread;
345         int ret;
346
347         if (thread == NULL)
348                 return (EINVAL);
349
350         ret = 0;
351         THREAD_LIST_RDLOCK(curthread);
352         pthread = _thr_hash_find(thread);
353         if (pthread) {
354                 THR_THREAD_LOCK(curthread, pthread);
355                 if (include_dead == 0 && pthread->state == PS_DEAD) {
356                         THR_THREAD_UNLOCK(curthread, pthread);
357                         ret = ESRCH;
358                 }
359         } else {
360                 ret = ESRCH;
361         }
362         THREAD_LIST_UNLOCK(curthread);
363         return (ret);
364 }