]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - lib/libthr/thread/thr_list.c
MFC r345703:
[FreeBSD/FreeBSD.git] / lib / libthr / thread / thr_list.c
1 /*
2  * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3  * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice unmodified, this list of conditions, and the following
11  *    disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30
31 #include <sys/types.h>
32 #include <sys/queue.h>
33
34 #include <stdlib.h>
35 #include <string.h>
36 #include <pthread.h>
37
38 #include "libc_private.h"
39 #include "thr_private.h"
40 #include "static_tls.h"
41
42 /*#define DEBUG_THREAD_LIST */
43 #ifdef DEBUG_THREAD_LIST
44 #define DBG_MSG         stdout_debug
45 #else
46 #define DBG_MSG(x...)
47 #endif
48
49 #define MAX_THREADS             100000
50
51 /*
52  * Define a high water mark for the maximum number of threads that
53  * will be cached.  Once this level is reached, any extra threads
54  * will be free()'d.
55  */
56 #define MAX_CACHED_THREADS      100
57
58 /*
59  * We've got to keep track of everything that is allocated, not only
60  * to have a speedy free list, but also so they can be deallocated
61  * after a fork().
62  */
63 static TAILQ_HEAD(, pthread)    free_threadq;
64 static struct umutex            free_thread_lock = DEFAULT_UMUTEX;
65 static struct umutex            tcb_lock = DEFAULT_UMUTEX;
66 static int                      free_thread_count = 0;
67 static int                      inited = 0;
68 static int                      total_threads;
69
70 LIST_HEAD(thread_hash_head, pthread);
71 #define HASH_QUEUES     128
72 static struct thread_hash_head  thr_hashtable[HASH_QUEUES];
73 #define THREAD_HASH(thrd)       (((unsigned long)thrd >> 8) % HASH_QUEUES)
74
75 static void thr_destroy(struct pthread *curthread, struct pthread *thread);
76
77 void
78 _thr_list_init(void)
79 {
80         int i;
81
82         _gc_count = 0;
83         total_threads = 1;
84         _thr_urwlock_init(&_thr_list_lock);
85         TAILQ_INIT(&_thread_list);
86         TAILQ_INIT(&free_threadq);
87         _thr_umutex_init(&free_thread_lock);
88         _thr_umutex_init(&tcb_lock);
89         if (inited) {
90                 for (i = 0; i < HASH_QUEUES; ++i)
91                         LIST_INIT(&thr_hashtable[i]);
92         }
93         inited = 1;
94 }
95
96 void
97 _thr_gc(struct pthread *curthread)
98 {
99         struct pthread *td, *td_next;
100         TAILQ_HEAD(, pthread) worklist;
101
102         TAILQ_INIT(&worklist);
103         THREAD_LIST_WRLOCK(curthread);
104
105         /* Check the threads waiting for GC. */
106         TAILQ_FOREACH_SAFE(td, &_thread_gc_list, gcle, td_next) {
107                 if (td->tid != TID_TERMINATED) {
108                         /* make sure we are not still in userland */
109                         continue;
110                 }
111                 _thr_stack_free(&td->attr);
112                 THR_GCLIST_REMOVE(td);
113                 TAILQ_INSERT_HEAD(&worklist, td, gcle);
114         }
115         THREAD_LIST_UNLOCK(curthread);
116
117         while ((td = TAILQ_FIRST(&worklist)) != NULL) {
118                 TAILQ_REMOVE(&worklist, td, gcle);
119                 /*
120                  * XXX we don't free initial thread, because there might
121                  * have some code referencing initial thread.
122                  */
123                 if (td == _thr_initial) {
124                         DBG_MSG("Initial thread won't be freed\n");
125                         continue;
126                 }
127
128                 _thr_free(curthread, td);
129         }
130 }
131
132 struct pthread *
133 _thr_alloc(struct pthread *curthread)
134 {
135         struct pthread  *thread = NULL;
136         struct tcb      *tcb;
137
138         if (curthread != NULL) {
139                 if (GC_NEEDED())
140                         _thr_gc(curthread);
141                 if (free_thread_count > 0) {
142                         THR_LOCK_ACQUIRE(curthread, &free_thread_lock);
143                         if ((thread = TAILQ_FIRST(&free_threadq)) != NULL) {
144                                 TAILQ_REMOVE(&free_threadq, thread, tle);
145                                 free_thread_count--;
146                         }
147                         THR_LOCK_RELEASE(curthread, &free_thread_lock);
148                 }
149         }
150         if (thread == NULL) {
151                 if (total_threads > MAX_THREADS)
152                         return (NULL);
153                 atomic_fetchadd_int(&total_threads, 1);
154                 thread = calloc(1, sizeof(struct pthread));
155                 if (thread == NULL) {
156                         atomic_fetchadd_int(&total_threads, -1);
157                         return (NULL);
158                 }
159                 if ((thread->sleepqueue = _sleepq_alloc()) == NULL ||
160                     (thread->wake_addr = _thr_alloc_wake_addr()) == NULL) {
161                         thr_destroy(curthread, thread);
162                         atomic_fetchadd_int(&total_threads, -1);
163                         return (NULL);
164                 }
165         } else {
166                 bzero(&thread->_pthread_startzero, 
167                         __rangeof(struct pthread, _pthread_startzero, _pthread_endzero));
168         }
169         if (curthread != NULL) {
170                 THR_LOCK_ACQUIRE(curthread, &tcb_lock);
171                 tcb = _tcb_ctor(thread, 0 /* not initial tls */);
172                 THR_LOCK_RELEASE(curthread, &tcb_lock);
173         } else {
174                 tcb = _tcb_ctor(thread, 1 /* initial tls */);
175         }
176         if (tcb != NULL) {
177                 thread->tcb = tcb;
178         } else {
179                 thr_destroy(curthread, thread);
180                 atomic_fetchadd_int(&total_threads, -1);
181                 thread = NULL;
182         }
183         return (thread);
184 }
185
186 void
187 _thr_free(struct pthread *curthread, struct pthread *thread)
188 {
189         DBG_MSG("Freeing thread %p\n", thread);
190
191         /*
192          * Always free tcb, as we only know it is part of RTLD TLS
193          * block, but don't know its detail and can not assume how
194          * it works, so better to avoid caching it here.
195          */
196         if (curthread != NULL) {
197                 THR_LOCK_ACQUIRE(curthread, &tcb_lock);
198                 _tcb_dtor(thread->tcb);
199                 THR_LOCK_RELEASE(curthread, &tcb_lock);
200         } else {
201                 _tcb_dtor(thread->tcb);
202         }
203         thread->tcb = NULL;
204         if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS)) {
205                 thr_destroy(curthread, thread);
206                 atomic_fetchadd_int(&total_threads, -1);
207         } else {
208                 /*
209                  * Add the thread to the free thread list, this also avoids
210                  * pthread id is reused too quickly, may help some buggy apps.
211                  */
212                 THR_LOCK_ACQUIRE(curthread, &free_thread_lock);
213                 TAILQ_INSERT_TAIL(&free_threadq, thread, tle);
214                 free_thread_count++;
215                 THR_LOCK_RELEASE(curthread, &free_thread_lock);
216         }
217 }
218
219 static void
220 thr_destroy(struct pthread *curthread __unused, struct pthread *thread)
221 {
222         if (thread->sleepqueue != NULL)
223                 _sleepq_free(thread->sleepqueue);
224         if (thread->wake_addr != NULL)
225                 _thr_release_wake_addr(thread->wake_addr);
226         free(thread);
227 }
228
229 /*
230  * Add the thread to the list of all threads and increment
231  * number of active threads.
232  */
233 void
234 _thr_link(struct pthread *curthread, struct pthread *thread)
235 {
236         THREAD_LIST_WRLOCK(curthread);
237         THR_LIST_ADD(thread);
238         THREAD_LIST_UNLOCK(curthread);
239         atomic_add_int(&_thread_active_threads, 1);
240 }
241
242 /*
243  * Remove an active thread.
244  */
245 void
246 _thr_unlink(struct pthread *curthread, struct pthread *thread)
247 {
248         THREAD_LIST_WRLOCK(curthread);
249         THR_LIST_REMOVE(thread);
250         THREAD_LIST_UNLOCK(curthread);
251         atomic_add_int(&_thread_active_threads, -1);
252 }
253
254 void
255 _thr_hash_add(struct pthread *thread)
256 {
257         struct thread_hash_head *head;
258
259         head = &thr_hashtable[THREAD_HASH(thread)];
260         LIST_INSERT_HEAD(head, thread, hle);
261 }
262
263 void
264 _thr_hash_remove(struct pthread *thread)
265 {
266         LIST_REMOVE(thread, hle);
267 }
268
269 struct pthread *
270 _thr_hash_find(struct pthread *thread)
271 {
272         struct pthread *td;
273         struct thread_hash_head *head;
274
275         head = &thr_hashtable[THREAD_HASH(thread)];
276         LIST_FOREACH(td, head, hle) {
277                 if (td == thread)
278                         return (thread);
279         }
280         return (NULL);
281 }
282
283 /*
284  * Find a thread in the linked list of active threads and add a reference
285  * to it.  Threads with positive reference counts will not be deallocated
286  * until all references are released.
287  */
288 int
289 _thr_ref_add(struct pthread *curthread, struct pthread *thread,
290     int include_dead)
291 {
292         int ret;
293
294         if (thread == NULL)
295                 /* Invalid thread: */
296                 return (EINVAL);
297
298         if ((ret = _thr_find_thread(curthread, thread, include_dead)) == 0) {
299                 thread->refcount++;
300                 THR_CRITICAL_ENTER(curthread);
301                 THR_THREAD_UNLOCK(curthread, thread);
302         }
303
304         /* Return zero if the thread exists: */
305         return (ret);
306 }
307
308 void
309 _thr_ref_delete(struct pthread *curthread, struct pthread *thread)
310 {
311         THR_THREAD_LOCK(curthread, thread);
312         thread->refcount--;
313         _thr_try_gc(curthread, thread);
314         THR_CRITICAL_LEAVE(curthread);
315 }
316
317 /* entered with thread lock held, exit with thread lock released */
318 void
319 _thr_try_gc(struct pthread *curthread, struct pthread *thread)
320 {
321         if (THR_SHOULD_GC(thread)) {
322                 THR_REF_ADD(curthread, thread);
323                 THR_THREAD_UNLOCK(curthread, thread);
324                 THREAD_LIST_WRLOCK(curthread);
325                 THR_THREAD_LOCK(curthread, thread);
326                 THR_REF_DEL(curthread, thread);
327                 if (THR_SHOULD_GC(thread)) {
328                         THR_LIST_REMOVE(thread);
329                         THR_GCLIST_ADD(thread);
330                 }
331                 THR_THREAD_UNLOCK(curthread, thread);
332                 THREAD_LIST_UNLOCK(curthread);
333         } else {
334                 THR_THREAD_UNLOCK(curthread, thread);
335         }
336 }
337
338 /* return with thread lock held if thread is found */
339 int
340 _thr_find_thread(struct pthread *curthread, struct pthread *thread,
341     int include_dead)
342 {
343         struct pthread *pthread;
344         int ret;
345
346         if (thread == NULL)
347                 return (EINVAL);
348
349         ret = 0;
350         THREAD_LIST_RDLOCK(curthread);
351         pthread = _thr_hash_find(thread);
352         if (pthread) {
353                 THR_THREAD_LOCK(curthread, pthread);
354                 if (include_dead == 0 && pthread->state == PS_DEAD) {
355                         THR_THREAD_UNLOCK(curthread, pthread);
356                         ret = ESRCH;
357                 }
358         } else {
359                 ret = ESRCH;
360         }
361         THREAD_LIST_UNLOCK(curthread);
362         return (ret);
363 }
364
365 #include "pthread_tls.h"
366
367 static void
368 thr_distribute_static_tls(uintptr_t tlsbase, void *src, size_t len,
369     size_t total_len)
370 {
371
372         memcpy((void *)tlsbase, src, len);
373         memset((char *)tlsbase + len, 0, total_len - len);
374 }
375
376 void
377 __pthread_distribute_static_tls(size_t offset, void *src, size_t len,
378     size_t total_len)
379 {
380         struct pthread *curthread, *thrd;
381         uintptr_t tlsbase;
382
383         if (!_thr_is_inited()) {
384                 tlsbase = _libc_get_static_tls_base(offset);
385                 thr_distribute_static_tls(tlsbase, src, len, total_len);
386                 return;
387         }
388         curthread = _get_curthread();
389         THREAD_LIST_RDLOCK(curthread);
390         TAILQ_FOREACH(thrd, &_thread_list, tle) {
391                 tlsbase = _get_static_tls_base(thrd, offset);
392                 thr_distribute_static_tls(tlsbase, src, len, total_len);
393         }
394         THREAD_LIST_UNLOCK(curthread);
395 }