]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_thread.c
pipe: drop spurious pipeunlock/pipelock cycle on write
[FreeBSD/FreeBSD.git] / sys / kern / kern_thread.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
5  *  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice(s), this list of conditions and the following disclaimer as
12  *    the first lines of this file unmodified other than the possible
13  *    addition of one or more copyright notices.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice(s), this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28  * DAMAGE.
29  */
30
31 #include "opt_witness.h"
32 #include "opt_hwpmc_hooks.h"
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/mutex.h>
42 #include <sys/proc.h>
43 #include <sys/bitstring.h>
44 #include <sys/epoch.h>
45 #include <sys/rangelock.h>
46 #include <sys/resourcevar.h>
47 #include <sys/sdt.h>
48 #include <sys/smp.h>
49 #include <sys/sched.h>
50 #include <sys/sleepqueue.h>
51 #include <sys/selinfo.h>
52 #include <sys/syscallsubr.h>
53 #include <sys/dtrace_bsd.h>
54 #include <sys/sysent.h>
55 #include <sys/turnstile.h>
56 #include <sys/taskqueue.h>
57 #include <sys/ktr.h>
58 #include <sys/rwlock.h>
59 #include <sys/umtx.h>
60 #include <sys/vmmeter.h>
61 #include <sys/cpuset.h>
62 #ifdef  HWPMC_HOOKS
63 #include <sys/pmckern.h>
64 #endif
65 #include <sys/priv.h>
66
67 #include <security/audit/audit.h>
68
69 #include <vm/pmap.h>
70 #include <vm/vm.h>
71 #include <vm/vm_extern.h>
72 #include <vm/uma.h>
73 #include <vm/vm_phys.h>
74 #include <sys/eventhandler.h>
75
76 /*
77  * Asserts below verify the stability of struct thread and struct proc
78  * layout, as exposed by KBI to modules.  On head, the KBI is allowed
79  * to drift, change to the structures must be accompanied by the
80  * assert update.
81  *
82  * On the stable branches after KBI freeze, conditions must not be
83  * violated.  Typically new fields are moved to the end of the
84  * structures.
85  */
86 #ifdef __amd64__
87 _Static_assert(offsetof(struct thread, td_flags) == 0xfc,
88     "struct thread KBI td_flags");
89 _Static_assert(offsetof(struct thread, td_pflags) == 0x104,
90     "struct thread KBI td_pflags");
91 _Static_assert(offsetof(struct thread, td_frame) == 0x4a0,
92     "struct thread KBI td_frame");
93 _Static_assert(offsetof(struct thread, td_emuldata) == 0x6b0,
94     "struct thread KBI td_emuldata");
95 _Static_assert(offsetof(struct proc, p_flag) == 0xb8,
96     "struct proc KBI p_flag");
97 _Static_assert(offsetof(struct proc, p_pid) == 0xc4,
98     "struct proc KBI p_pid");
99 _Static_assert(offsetof(struct proc, p_filemon) == 0x3c0,
100     "struct proc KBI p_filemon");
101 _Static_assert(offsetof(struct proc, p_comm) == 0x3d8,
102     "struct proc KBI p_comm");
103 _Static_assert(offsetof(struct proc, p_emuldata) == 0x4b8,
104     "struct proc KBI p_emuldata");
105 #endif
106 #ifdef __i386__
107 _Static_assert(offsetof(struct thread, td_flags) == 0x98,
108     "struct thread KBI td_flags");
109 _Static_assert(offsetof(struct thread, td_pflags) == 0xa0,
110     "struct thread KBI td_pflags");
111 _Static_assert(offsetof(struct thread, td_frame) == 0x300,
112     "struct thread KBI td_frame");
113 _Static_assert(offsetof(struct thread, td_emuldata) == 0x344,
114     "struct thread KBI td_emuldata");
115 _Static_assert(offsetof(struct proc, p_flag) == 0x6c,
116     "struct proc KBI p_flag");
117 _Static_assert(offsetof(struct proc, p_pid) == 0x78,
118     "struct proc KBI p_pid");
119 _Static_assert(offsetof(struct proc, p_filemon) == 0x26c,
120     "struct proc KBI p_filemon");
121 _Static_assert(offsetof(struct proc, p_comm) == 0x280,
122     "struct proc KBI p_comm");
123 _Static_assert(offsetof(struct proc, p_emuldata) == 0x30c,
124     "struct proc KBI p_emuldata");
125 #endif
126
127 SDT_PROVIDER_DECLARE(proc);
128 SDT_PROBE_DEFINE(proc, , , lwp__exit);
129
130 /*
131  * thread related storage.
132  */
133 static uma_zone_t thread_zone;
134
135 struct thread_domain_data {
136         struct thread   *tdd_zombies;
137         int             tdd_reapticks;
138 } __aligned(CACHE_LINE_SIZE);
139
140 static struct thread_domain_data thread_domain_data[MAXMEMDOM];
141
142 static struct task      thread_reap_task;
143 static struct callout   thread_reap_callout;
144
145 static void thread_zombie(struct thread *);
146 static void thread_reap_all(void);
147 static void thread_reap_task_cb(void *, int);
148 static void thread_reap_callout_cb(void *);
149 static int thread_unsuspend_one(struct thread *td, struct proc *p,
150     bool boundary);
151 static void thread_free_batched(struct thread *td);
152
153 static __exclusive_cache_line struct mtx tid_lock;
154 static bitstr_t *tid_bitmap;
155
156 static MALLOC_DEFINE(M_TIDHASH, "tidhash", "thread hash");
157
158 static int maxthread;
159 SYSCTL_INT(_kern, OID_AUTO, maxthread, CTLFLAG_RDTUN,
160     &maxthread, 0, "Maximum number of threads");
161
162 static __exclusive_cache_line int nthreads;
163
164 static LIST_HEAD(tidhashhead, thread) *tidhashtbl;
165 static u_long   tidhash;
166 static u_long   tidhashlock;
167 static struct   rwlock *tidhashtbl_lock;
168 #define TIDHASH(tid)            (&tidhashtbl[(tid) & tidhash])
169 #define TIDHASHLOCK(tid)        (&tidhashtbl_lock[(tid) & tidhashlock])
170
171 EVENTHANDLER_LIST_DEFINE(thread_ctor);
172 EVENTHANDLER_LIST_DEFINE(thread_dtor);
173 EVENTHANDLER_LIST_DEFINE(thread_init);
174 EVENTHANDLER_LIST_DEFINE(thread_fini);
175
176 static bool
177 thread_count_inc_try(void)
178 {
179         int nthreads_new;
180
181         nthreads_new = atomic_fetchadd_int(&nthreads, 1) + 1;
182         if (nthreads_new >= maxthread - 100) {
183                 if (priv_check_cred(curthread->td_ucred, PRIV_MAXPROC) != 0 ||
184                     nthreads_new >= maxthread) {
185                         atomic_subtract_int(&nthreads, 1);
186                         return (false);
187                 }
188         }
189         return (true);
190 }
191
192 static bool
193 thread_count_inc(void)
194 {
195         static struct timeval lastfail;
196         static int curfail;
197
198         thread_reap();
199         if (thread_count_inc_try()) {
200                 return (true);
201         }
202
203         thread_reap_all();
204         if (thread_count_inc_try()) {
205                 return (true);
206         }
207
208         if (ppsratecheck(&lastfail, &curfail, 1)) {
209                 printf("maxthread limit exceeded by uid %u "
210                     "(pid %d); consider increasing kern.maxthread\n",
211                     curthread->td_ucred->cr_ruid, curproc->p_pid);
212         }
213         return (false);
214 }
215
216 static void
217 thread_count_sub(int n)
218 {
219
220         atomic_subtract_int(&nthreads, n);
221 }
222
223 static void
224 thread_count_dec(void)
225 {
226
227         thread_count_sub(1);
228 }
229
230 static lwpid_t
231 tid_alloc(void)
232 {
233         static lwpid_t trytid;
234         lwpid_t tid;
235
236         mtx_lock(&tid_lock);
237         /*
238          * It is an invariant that the bitmap is big enough to hold maxthread
239          * IDs. If we got to this point there has to be at least one free.
240          */
241         if (trytid >= maxthread)
242                 trytid = 0;
243         bit_ffc_at(tid_bitmap, trytid, maxthread, &tid);
244         if (tid == -1) {
245                 KASSERT(trytid != 0, ("unexpectedly ran out of IDs"));
246                 trytid = 0;
247                 bit_ffc_at(tid_bitmap, trytid, maxthread, &tid);
248                 KASSERT(tid != -1, ("unexpectedly ran out of IDs"));
249         }
250         bit_set(tid_bitmap, tid);
251         trytid = tid + 1;
252         mtx_unlock(&tid_lock);
253         return (tid + NO_PID);
254 }
255
256 static void
257 tid_free_locked(lwpid_t rtid)
258 {
259         lwpid_t tid;
260
261         mtx_assert(&tid_lock, MA_OWNED);
262         KASSERT(rtid >= NO_PID,
263             ("%s: invalid tid %d\n", __func__, rtid));
264         tid = rtid - NO_PID;
265         KASSERT(bit_test(tid_bitmap, tid) != 0,
266             ("thread ID %d not allocated\n", rtid));
267         bit_clear(tid_bitmap, tid);
268 }
269
270 static void
271 tid_free(lwpid_t rtid)
272 {
273
274         mtx_lock(&tid_lock);
275         tid_free_locked(rtid);
276         mtx_unlock(&tid_lock);
277 }
278
279 static void
280 tid_free_batch(lwpid_t *batch, int n)
281 {
282         int i;
283
284         mtx_lock(&tid_lock);
285         for (i = 0; i < n; i++) {
286                 tid_free_locked(batch[i]);
287         }
288         mtx_unlock(&tid_lock);
289 }
290
291 /*
292  * Batching for thread reapping.
293  */
294 struct tidbatch {
295         lwpid_t tab[16];
296         int n;
297 };
298
299 static void
300 tidbatch_prep(struct tidbatch *tb)
301 {
302
303         tb->n = 0;
304 }
305
306 static void
307 tidbatch_add(struct tidbatch *tb, struct thread *td)
308 {
309
310         KASSERT(tb->n < nitems(tb->tab),
311             ("%s: count too high %d", __func__, tb->n));
312         tb->tab[tb->n] = td->td_tid;
313         tb->n++;
314 }
315
316 static void
317 tidbatch_process(struct tidbatch *tb)
318 {
319
320         KASSERT(tb->n <= nitems(tb->tab),
321             ("%s: count too high %d", __func__, tb->n));
322         if (tb->n == nitems(tb->tab)) {
323                 tid_free_batch(tb->tab, tb->n);
324                 tb->n = 0;
325         }
326 }
327
328 static void
329 tidbatch_final(struct tidbatch *tb)
330 {
331
332         KASSERT(tb->n <= nitems(tb->tab),
333             ("%s: count too high %d", __func__, tb->n));
334         if (tb->n != 0) {
335                 tid_free_batch(tb->tab, tb->n);
336         }
337 }
338
339 /*
340  * Prepare a thread for use.
341  */
342 static int
343 thread_ctor(void *mem, int size, void *arg, int flags)
344 {
345         struct thread   *td;
346
347         td = (struct thread *)mem;
348         td->td_state = TDS_INACTIVE;
349         td->td_lastcpu = td->td_oncpu = NOCPU;
350         td->td_allocdomain = vm_phys_domain(vtophys(td));
351
352         /*
353          * Note that td_critnest begins life as 1 because the thread is not
354          * running and is thereby implicitly waiting to be on the receiving
355          * end of a context switch.
356          */
357         td->td_critnest = 1;
358         td->td_lend_user_pri = PRI_MAX;
359 #ifdef AUDIT
360         audit_thread_alloc(td);
361 #endif
362 #ifdef KDTRACE_HOOKS
363         kdtrace_thread_ctor(td);
364 #endif
365         umtx_thread_alloc(td);
366         MPASS(td->td_sel == NULL);
367         return (0);
368 }
369
370 /*
371  * Reclaim a thread after use.
372  */
373 static void
374 thread_dtor(void *mem, int size, void *arg)
375 {
376         struct thread *td;
377
378         td = (struct thread *)mem;
379
380 #ifdef INVARIANTS
381         /* Verify that this thread is in a safe state to free. */
382         switch (td->td_state) {
383         case TDS_INHIBITED:
384         case TDS_RUNNING:
385         case TDS_CAN_RUN:
386         case TDS_RUNQ:
387                 /*
388                  * We must never unlink a thread that is in one of
389                  * these states, because it is currently active.
390                  */
391                 panic("bad state for thread unlinking");
392                 /* NOTREACHED */
393         case TDS_INACTIVE:
394                 break;
395         default:
396                 panic("bad thread state");
397                 /* NOTREACHED */
398         }
399 #endif
400 #ifdef AUDIT
401         audit_thread_free(td);
402 #endif
403 #ifdef KDTRACE_HOOKS
404         kdtrace_thread_dtor(td);
405 #endif
406         /* Free all OSD associated to this thread. */
407         osd_thread_exit(td);
408         td_softdep_cleanup(td);
409         MPASS(td->td_su == NULL);
410         seltdfini(td);
411 }
412
413 /*
414  * Initialize type-stable parts of a thread (when newly created).
415  */
416 static int
417 thread_init(void *mem, int size, int flags)
418 {
419         struct thread *td;
420
421         td = (struct thread *)mem;
422
423         td->td_sleepqueue = sleepq_alloc();
424         td->td_turnstile = turnstile_alloc();
425         td->td_rlqe = NULL;
426         EVENTHANDLER_DIRECT_INVOKE(thread_init, td);
427         umtx_thread_init(td);
428         td->td_kstack = 0;
429         td->td_sel = NULL;
430         return (0);
431 }
432
433 /*
434  * Tear down type-stable parts of a thread (just before being discarded).
435  */
436 static void
437 thread_fini(void *mem, int size)
438 {
439         struct thread *td;
440
441         td = (struct thread *)mem;
442         EVENTHANDLER_DIRECT_INVOKE(thread_fini, td);
443         rlqentry_free(td->td_rlqe);
444         turnstile_free(td->td_turnstile);
445         sleepq_free(td->td_sleepqueue);
446         umtx_thread_fini(td);
447         MPASS(td->td_sel == NULL);
448 }
449
450 /*
451  * For a newly created process,
452  * link up all the structures and its initial threads etc.
453  * called from:
454  * {arch}/{arch}/machdep.c   {arch}_init(), init386() etc.
455  * proc_dtor() (should go away)
456  * proc_init()
457  */
458 void
459 proc_linkup0(struct proc *p, struct thread *td)
460 {
461         TAILQ_INIT(&p->p_threads);           /* all threads in proc */
462         proc_linkup(p, td);
463 }
464
465 void
466 proc_linkup(struct proc *p, struct thread *td)
467 {
468
469         sigqueue_init(&p->p_sigqueue, p);
470         p->p_ksi = ksiginfo_alloc(1);
471         if (p->p_ksi != NULL) {
472                 /* XXX p_ksi may be null if ksiginfo zone is not ready */
473                 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
474         }
475         LIST_INIT(&p->p_mqnotifier);
476         p->p_numthreads = 0;
477         thread_link(td, p);
478 }
479
480 extern int max_threads_per_proc;
481
482 /*
483  * Initialize global thread allocation resources.
484  */
485 void
486 threadinit(void)
487 {
488         u_long i;
489         lwpid_t tid0;
490         uint32_t flags;
491
492         /*
493          * Place an upper limit on threads which can be allocated.
494          *
495          * Note that other factors may make the de facto limit much lower.
496          *
497          * Platform limits are somewhat arbitrary but deemed "more than good
498          * enough" for the foreseable future.
499          */
500         if (maxthread == 0) {
501 #ifdef _LP64
502                 maxthread = MIN(maxproc * max_threads_per_proc, 1000000);
503 #else
504                 maxthread = MIN(maxproc * max_threads_per_proc, 100000);
505 #endif
506         }
507
508         mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
509         tid_bitmap = bit_alloc(maxthread, M_TIDHASH, M_WAITOK);
510         /*
511          * Handle thread0.
512          */
513         thread_count_inc();
514         tid0 = tid_alloc();
515         if (tid0 != THREAD0_TID)
516                 panic("tid0 %d != %d\n", tid0, THREAD0_TID);
517
518         flags = UMA_ZONE_NOFREE;
519 #ifdef __aarch64__
520         /*
521          * Force thread structures to be allocated from the direct map.
522          * Otherwise, superpage promotions and demotions may temporarily
523          * invalidate thread structure mappings.  For most dynamically allocated
524          * structures this is not a problem, but translation faults cannot be
525          * handled without accessing curthread.
526          */
527         flags |= UMA_ZONE_CONTIG;
528 #endif
529         thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
530             thread_ctor, thread_dtor, thread_init, thread_fini,
531             32 - 1, flags);
532         tidhashtbl = hashinit(maxproc / 2, M_TIDHASH, &tidhash);
533         tidhashlock = (tidhash + 1) / 64;
534         if (tidhashlock > 0)
535                 tidhashlock--;
536         tidhashtbl_lock = malloc(sizeof(*tidhashtbl_lock) * (tidhashlock + 1),
537             M_TIDHASH, M_WAITOK | M_ZERO);
538         for (i = 0; i < tidhashlock + 1; i++)
539                 rw_init(&tidhashtbl_lock[i], "tidhash");
540
541         TASK_INIT(&thread_reap_task, 0, thread_reap_task_cb, NULL);
542         callout_init(&thread_reap_callout, 1);
543         callout_reset(&thread_reap_callout, 5 * hz, thread_reap_callout_cb, NULL);
544 }
545
546 /*
547  * Place an unused thread on the zombie list.
548  */
549 void
550 thread_zombie(struct thread *td)
551 {
552         struct thread_domain_data *tdd;
553         struct thread *ztd;
554
555         tdd = &thread_domain_data[td->td_allocdomain];
556         ztd = atomic_load_ptr(&tdd->tdd_zombies);
557         for (;;) {
558                 td->td_zombie = ztd;
559                 if (atomic_fcmpset_rel_ptr((uintptr_t *)&tdd->tdd_zombies,
560                     (uintptr_t *)&ztd, (uintptr_t)td))
561                         break;
562                 continue;
563         }
564 }
565
566 /*
567  * Release a thread that has exited after cpu_throw().
568  */
569 void
570 thread_stash(struct thread *td)
571 {
572         atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
573         thread_zombie(td);
574 }
575
576 /*
577  * Reap zombies from passed domain.
578  */
579 static void
580 thread_reap_domain(struct thread_domain_data *tdd)
581 {
582         struct thread *itd, *ntd;
583         struct tidbatch tidbatch;
584         struct credbatch credbatch;
585         int tdcount;
586         struct plimit *lim;
587         int limcount;
588
589         /*
590          * Reading upfront is pessimal if followed by concurrent atomic_swap,
591          * but most of the time the list is empty.
592          */
593         if (tdd->tdd_zombies == NULL)
594                 return;
595
596         itd = (struct thread *)atomic_swap_ptr((uintptr_t *)&tdd->tdd_zombies,
597             (uintptr_t)NULL);
598         if (itd == NULL)
599                 return;
600
601         /*
602          * Multiple CPUs can get here, the race is fine as ticks is only
603          * advisory.
604          */
605         tdd->tdd_reapticks = ticks;
606
607         tidbatch_prep(&tidbatch);
608         credbatch_prep(&credbatch);
609         tdcount = 0;
610         lim = NULL;
611         limcount = 0;
612
613         while (itd != NULL) {
614                 ntd = itd->td_zombie;
615                 EVENTHANDLER_DIRECT_INVOKE(thread_dtor, itd);
616                 tidbatch_add(&tidbatch, itd);
617                 credbatch_add(&credbatch, itd);
618                 MPASS(itd->td_limit != NULL);
619                 if (lim != itd->td_limit) {
620                         if (limcount != 0) {
621                                 lim_freen(lim, limcount);
622                                 limcount = 0;
623                         }
624                 }
625                 lim = itd->td_limit;
626                 limcount++;
627                 thread_free_batched(itd);
628                 tidbatch_process(&tidbatch);
629                 credbatch_process(&credbatch);
630                 tdcount++;
631                 if (tdcount == 32) {
632                         thread_count_sub(tdcount);
633                         tdcount = 0;
634                 }
635                 itd = ntd;
636         }
637
638         tidbatch_final(&tidbatch);
639         credbatch_final(&credbatch);
640         if (tdcount != 0) {
641                 thread_count_sub(tdcount);
642         }
643         MPASS(limcount != 0);
644         lim_freen(lim, limcount);
645 }
646
647 /*
648  * Reap zombies from all domains.
649  */
650 static void
651 thread_reap_all(void)
652 {
653         struct thread_domain_data *tdd;
654         int i, domain;
655
656         domain = PCPU_GET(domain);
657         for (i = 0; i < vm_ndomains; i++) {
658                 tdd = &thread_domain_data[(i + domain) % vm_ndomains];
659                 thread_reap_domain(tdd);
660         }
661 }
662
663 /*
664  * Reap zombies from local domain.
665  */
666 void
667 thread_reap(void)
668 {
669         struct thread_domain_data *tdd;
670         int domain;
671
672         domain = PCPU_GET(domain);
673         tdd = &thread_domain_data[domain];
674
675         thread_reap_domain(tdd);
676 }
677
678 static void
679 thread_reap_task_cb(void *arg __unused, int pending __unused)
680 {
681
682         thread_reap_all();
683 }
684
685 static void
686 thread_reap_callout_cb(void *arg __unused)
687 {
688         struct thread_domain_data *tdd;
689         int i, cticks, lticks;
690         bool wantreap;
691
692         wantreap = false;
693         cticks = atomic_load_int(&ticks);
694         for (i = 0; i < vm_ndomains; i++) {
695                 tdd = &thread_domain_data[i];
696                 lticks = tdd->tdd_reapticks;
697                 if (tdd->tdd_zombies != NULL &&
698                     (u_int)(cticks - lticks) > 5 * hz) {
699                         wantreap = true;
700                         break;
701                 }
702         }
703
704         if (wantreap)
705                 taskqueue_enqueue(taskqueue_thread, &thread_reap_task);
706         callout_reset(&thread_reap_callout, 5 * hz, thread_reap_callout_cb, NULL);
707 }
708
709 /*
710  * Allocate a thread.
711  */
712 struct thread *
713 thread_alloc(int pages)
714 {
715         struct thread *td;
716         lwpid_t tid;
717
718         if (!thread_count_inc()) {
719                 return (NULL);
720         }
721
722         tid = tid_alloc();
723         td = uma_zalloc(thread_zone, M_WAITOK);
724         KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
725         if (!vm_thread_new(td, pages)) {
726                 uma_zfree(thread_zone, td);
727                 tid_free(tid);
728                 thread_count_dec();
729                 return (NULL);
730         }
731         td->td_tid = tid;
732         cpu_thread_alloc(td);
733         EVENTHANDLER_DIRECT_INVOKE(thread_ctor, td);
734         return (td);
735 }
736
737 int
738 thread_alloc_stack(struct thread *td, int pages)
739 {
740
741         KASSERT(td->td_kstack == 0,
742             ("thread_alloc_stack called on a thread with kstack"));
743         if (!vm_thread_new(td, pages))
744                 return (0);
745         cpu_thread_alloc(td);
746         return (1);
747 }
748
749 /*
750  * Deallocate a thread.
751  */
752 static void
753 thread_free_batched(struct thread *td)
754 {
755
756         lock_profile_thread_exit(td);
757         if (td->td_cpuset)
758                 cpuset_rel(td->td_cpuset);
759         td->td_cpuset = NULL;
760         cpu_thread_free(td);
761         if (td->td_kstack != 0)
762                 vm_thread_dispose(td);
763         callout_drain(&td->td_slpcallout);
764         /*
765          * Freeing handled by the caller.
766          */
767         td->td_tid = -1;
768         uma_zfree(thread_zone, td);
769 }
770
771 void
772 thread_free(struct thread *td)
773 {
774         lwpid_t tid;
775
776         EVENTHANDLER_DIRECT_INVOKE(thread_dtor, td);
777         tid = td->td_tid;
778         thread_free_batched(td);
779         tid_free(tid);
780         thread_count_dec();
781 }
782
783 void
784 thread_cow_get_proc(struct thread *newtd, struct proc *p)
785 {
786
787         PROC_LOCK_ASSERT(p, MA_OWNED);
788         newtd->td_realucred = crcowget(p->p_ucred);
789         newtd->td_ucred = newtd->td_realucred;
790         newtd->td_limit = lim_hold(p->p_limit);
791         newtd->td_cowgen = p->p_cowgen;
792 }
793
794 void
795 thread_cow_get(struct thread *newtd, struct thread *td)
796 {
797
798         MPASS(td->td_realucred == td->td_ucred);
799         newtd->td_realucred = crcowget(td->td_realucred);
800         newtd->td_ucred = newtd->td_realucred;
801         newtd->td_limit = lim_hold(td->td_limit);
802         newtd->td_cowgen = td->td_cowgen;
803 }
804
805 void
806 thread_cow_free(struct thread *td)
807 {
808
809         if (td->td_realucred != NULL)
810                 crcowfree(td);
811         if (td->td_limit != NULL)
812                 lim_free(td->td_limit);
813 }
814
815 void
816 thread_cow_update(struct thread *td)
817 {
818         struct proc *p;
819         struct ucred *oldcred;
820         struct plimit *oldlimit;
821
822         p = td->td_proc;
823         oldlimit = NULL;
824         PROC_LOCK(p);
825         oldcred = crcowsync();
826         if (td->td_limit != p->p_limit) {
827                 oldlimit = td->td_limit;
828                 td->td_limit = lim_hold(p->p_limit);
829         }
830         td->td_cowgen = p->p_cowgen;
831         PROC_UNLOCK(p);
832         if (oldcred != NULL)
833                 crfree(oldcred);
834         if (oldlimit != NULL)
835                 lim_free(oldlimit);
836 }
837
838 /*
839  * Discard the current thread and exit from its context.
840  * Always called with scheduler locked.
841  *
842  * Because we can't free a thread while we're operating under its context,
843  * push the current thread into our CPU's deadthread holder. This means
844  * we needn't worry about someone else grabbing our context before we
845  * do a cpu_throw().
846  */
847 void
848 thread_exit(void)
849 {
850         uint64_t runtime, new_switchtime;
851         struct thread *td;
852         struct thread *td2;
853         struct proc *p;
854         int wakeup_swapper;
855
856         td = curthread;
857         p = td->td_proc;
858
859         PROC_SLOCK_ASSERT(p, MA_OWNED);
860         mtx_assert(&Giant, MA_NOTOWNED);
861
862         PROC_LOCK_ASSERT(p, MA_OWNED);
863         KASSERT(p != NULL, ("thread exiting without a process"));
864         CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
865             (long)p->p_pid, td->td_name);
866         SDT_PROBE0(proc, , , lwp__exit);
867         KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
868         MPASS(td->td_realucred == td->td_ucred);
869
870         /*
871          * drop FPU & debug register state storage, or any other
872          * architecture specific resources that
873          * would not be on a new untouched process.
874          */
875         cpu_thread_exit(td);
876
877         /*
878          * The last thread is left attached to the process
879          * So that the whole bundle gets recycled. Skip
880          * all this stuff if we never had threads.
881          * EXIT clears all sign of other threads when
882          * it goes to single threading, so the last thread always
883          * takes the short path.
884          */
885         if (p->p_flag & P_HADTHREADS) {
886                 if (p->p_numthreads > 1) {
887                         atomic_add_int(&td->td_proc->p_exitthreads, 1);
888                         thread_unlink(td);
889                         td2 = FIRST_THREAD_IN_PROC(p);
890                         sched_exit_thread(td2, td);
891
892                         /*
893                          * The test below is NOT true if we are the
894                          * sole exiting thread. P_STOPPED_SINGLE is unset
895                          * in exit1() after it is the only survivor.
896                          */
897                         if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
898                                 if (p->p_numthreads == p->p_suspcount) {
899                                         thread_lock(p->p_singlethread);
900                                         wakeup_swapper = thread_unsuspend_one(
901                                                 p->p_singlethread, p, false);
902                                         if (wakeup_swapper)
903                                                 kick_proc0();
904                                 }
905                         }
906
907                         PCPU_SET(deadthread, td);
908                 } else {
909                         /*
910                          * The last thread is exiting.. but not through exit()
911                          */
912                         panic ("thread_exit: Last thread exiting on its own");
913                 }
914         } 
915 #ifdef  HWPMC_HOOKS
916         /*
917          * If this thread is part of a process that is being tracked by hwpmc(4),
918          * inform the module of the thread's impending exit.
919          */
920         if (PMC_PROC_IS_USING_PMCS(td->td_proc)) {
921                 PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
922                 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT, NULL);
923         } else if (PMC_SYSTEM_SAMPLING_ACTIVE())
924                 PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL);
925 #endif
926         PROC_UNLOCK(p);
927         PROC_STATLOCK(p);
928         thread_lock(td);
929         PROC_SUNLOCK(p);
930
931         /* Do the same timestamp bookkeeping that mi_switch() would do. */
932         new_switchtime = cpu_ticks();
933         runtime = new_switchtime - PCPU_GET(switchtime);
934         td->td_runtime += runtime;
935         td->td_incruntime += runtime;
936         PCPU_SET(switchtime, new_switchtime);
937         PCPU_SET(switchticks, ticks);
938         VM_CNT_INC(v_swtch);
939
940         /* Save our resource usage in our process. */
941         td->td_ru.ru_nvcsw++;
942         ruxagg_locked(p, td);
943         rucollect(&p->p_ru, &td->td_ru);
944         PROC_STATUNLOCK(p);
945
946         td->td_state = TDS_INACTIVE;
947 #ifdef WITNESS
948         witness_thread_exit(td);
949 #endif
950         CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
951         sched_throw(td);
952         panic("I'm a teapot!");
953         /* NOTREACHED */
954 }
955
956 /*
957  * Do any thread specific cleanups that may be needed in wait()
958  * called with Giant, proc and schedlock not held.
959  */
960 void
961 thread_wait(struct proc *p)
962 {
963         struct thread *td;
964
965         mtx_assert(&Giant, MA_NOTOWNED);
966         KASSERT(p->p_numthreads == 1, ("multiple threads in thread_wait()"));
967         KASSERT(p->p_exitthreads == 0, ("p_exitthreads leaking"));
968         td = FIRST_THREAD_IN_PROC(p);
969         /* Lock the last thread so we spin until it exits cpu_throw(). */
970         thread_lock(td);
971         thread_unlock(td);
972         lock_profile_thread_exit(td);
973         cpuset_rel(td->td_cpuset);
974         td->td_cpuset = NULL;
975         cpu_thread_clean(td);
976         thread_cow_free(td);
977         callout_drain(&td->td_slpcallout);
978         thread_reap();  /* check for zombie threads etc. */
979 }
980
981 /*
982  * Link a thread to a process.
983  * set up anything that needs to be initialized for it to
984  * be used by the process.
985  */
986 void
987 thread_link(struct thread *td, struct proc *p)
988 {
989
990         /*
991          * XXX This can't be enabled because it's called for proc0 before
992          * its lock has been created.
993          * PROC_LOCK_ASSERT(p, MA_OWNED);
994          */
995         td->td_state    = TDS_INACTIVE;
996         td->td_proc     = p;
997         td->td_flags    = TDF_INMEM;
998
999         LIST_INIT(&td->td_contested);
1000         LIST_INIT(&td->td_lprof[0]);
1001         LIST_INIT(&td->td_lprof[1]);
1002 #ifdef EPOCH_TRACE
1003         SLIST_INIT(&td->td_epochs);
1004 #endif
1005         sigqueue_init(&td->td_sigqueue, p);
1006         callout_init(&td->td_slpcallout, 1);
1007         TAILQ_INSERT_TAIL(&p->p_threads, td, td_plist);
1008         p->p_numthreads++;
1009 }
1010
1011 /*
1012  * Called from:
1013  *  thread_exit()
1014  */
1015 void
1016 thread_unlink(struct thread *td)
1017 {
1018         struct proc *p = td->td_proc;
1019
1020         PROC_LOCK_ASSERT(p, MA_OWNED);
1021 #ifdef EPOCH_TRACE
1022         MPASS(SLIST_EMPTY(&td->td_epochs));
1023 #endif
1024
1025         TAILQ_REMOVE(&p->p_threads, td, td_plist);
1026         p->p_numthreads--;
1027         /* could clear a few other things here */
1028         /* Must  NOT clear links to proc! */
1029 }
1030
1031 static int
1032 calc_remaining(struct proc *p, int mode)
1033 {
1034         int remaining;
1035
1036         PROC_LOCK_ASSERT(p, MA_OWNED);
1037         PROC_SLOCK_ASSERT(p, MA_OWNED);
1038         if (mode == SINGLE_EXIT)
1039                 remaining = p->p_numthreads;
1040         else if (mode == SINGLE_BOUNDARY)
1041                 remaining = p->p_numthreads - p->p_boundary_count;
1042         else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
1043                 remaining = p->p_numthreads - p->p_suspcount;
1044         else
1045                 panic("calc_remaining: wrong mode %d", mode);
1046         return (remaining);
1047 }
1048
1049 static int
1050 remain_for_mode(int mode)
1051 {
1052
1053         return (mode == SINGLE_ALLPROC ? 0 : 1);
1054 }
1055
1056 static int
1057 weed_inhib(int mode, struct thread *td2, struct proc *p)
1058 {
1059         int wakeup_swapper;
1060
1061         PROC_LOCK_ASSERT(p, MA_OWNED);
1062         PROC_SLOCK_ASSERT(p, MA_OWNED);
1063         THREAD_LOCK_ASSERT(td2, MA_OWNED);
1064
1065         wakeup_swapper = 0;
1066
1067         /*
1068          * Since the thread lock is dropped by the scheduler we have
1069          * to retry to check for races.
1070          */
1071 restart:
1072         switch (mode) {
1073         case SINGLE_EXIT:
1074                 if (TD_IS_SUSPENDED(td2)) {
1075                         wakeup_swapper |= thread_unsuspend_one(td2, p, true);
1076                         thread_lock(td2);
1077                         goto restart;
1078                 }
1079                 if (TD_CAN_ABORT(td2)) {
1080                         wakeup_swapper |= sleepq_abort(td2, EINTR);
1081                         return (wakeup_swapper);
1082                 }
1083                 break;
1084         case SINGLE_BOUNDARY:
1085         case SINGLE_NO_EXIT:
1086                 if (TD_IS_SUSPENDED(td2) &&
1087                     (td2->td_flags & TDF_BOUNDARY) == 0) {
1088                         wakeup_swapper |= thread_unsuspend_one(td2, p, false);
1089                         thread_lock(td2);
1090                         goto restart;
1091                 }
1092                 if (TD_CAN_ABORT(td2)) {
1093                         wakeup_swapper |= sleepq_abort(td2, ERESTART);
1094                         return (wakeup_swapper);
1095                 }
1096                 break;
1097         case SINGLE_ALLPROC:
1098                 /*
1099                  * ALLPROC suspend tries to avoid spurious EINTR for
1100                  * threads sleeping interruptable, by suspending the
1101                  * thread directly, similarly to sig_suspend_threads().
1102                  * Since such sleep is not performed at the user
1103                  * boundary, TDF_BOUNDARY flag is not set, and TDF_ALLPROCSUSP
1104                  * is used to avoid immediate un-suspend.
1105                  */
1106                 if (TD_IS_SUSPENDED(td2) && (td2->td_flags & (TDF_BOUNDARY |
1107                     TDF_ALLPROCSUSP)) == 0) {
1108                         wakeup_swapper |= thread_unsuspend_one(td2, p, false);
1109                         thread_lock(td2);
1110                         goto restart;
1111                 }
1112                 if (TD_CAN_ABORT(td2)) {
1113                         if ((td2->td_flags & TDF_SBDRY) == 0) {
1114                                 thread_suspend_one(td2);
1115                                 td2->td_flags |= TDF_ALLPROCSUSP;
1116                         } else {
1117                                 wakeup_swapper |= sleepq_abort(td2, ERESTART);
1118                                 return (wakeup_swapper);
1119                         }
1120                 }
1121                 break;
1122         default:
1123                 break;
1124         }
1125         thread_unlock(td2);
1126         return (wakeup_swapper);
1127 }
1128
1129 /*
1130  * Enforce single-threading.
1131  *
1132  * Returns 1 if the caller must abort (another thread is waiting to
1133  * exit the process or similar). Process is locked!
1134  * Returns 0 when you are successfully the only thread running.
1135  * A process has successfully single threaded in the suspend mode when
1136  * There are no threads in user mode. Threads in the kernel must be
1137  * allowed to continue until they get to the user boundary. They may even
1138  * copy out their return values and data before suspending. They may however be
1139  * accelerated in reaching the user boundary as we will wake up
1140  * any sleeping threads that are interruptable. (PCATCH).
1141  */
1142 int
1143 thread_single(struct proc *p, int mode)
1144 {
1145         struct thread *td;
1146         struct thread *td2;
1147         int remaining, wakeup_swapper;
1148
1149         td = curthread;
1150         KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
1151             mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
1152             ("invalid mode %d", mode));
1153         /*
1154          * If allowing non-ALLPROC singlethreading for non-curproc
1155          * callers, calc_remaining() and remain_for_mode() should be
1156          * adjusted to also account for td->td_proc != p.  For now
1157          * this is not implemented because it is not used.
1158          */
1159         KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
1160             (mode != SINGLE_ALLPROC && td->td_proc == p),
1161             ("mode %d proc %p curproc %p", mode, p, td->td_proc));
1162         mtx_assert(&Giant, MA_NOTOWNED);
1163         PROC_LOCK_ASSERT(p, MA_OWNED);
1164
1165         if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC)
1166                 return (0);
1167
1168         /* Is someone already single threading? */
1169         if (p->p_singlethread != NULL && p->p_singlethread != td)
1170                 return (1);
1171
1172         if (mode == SINGLE_EXIT) {
1173                 p->p_flag |= P_SINGLE_EXIT;
1174                 p->p_flag &= ~P_SINGLE_BOUNDARY;
1175         } else {
1176                 p->p_flag &= ~P_SINGLE_EXIT;
1177                 if (mode == SINGLE_BOUNDARY)
1178                         p->p_flag |= P_SINGLE_BOUNDARY;
1179                 else
1180                         p->p_flag &= ~P_SINGLE_BOUNDARY;
1181         }
1182         if (mode == SINGLE_ALLPROC)
1183                 p->p_flag |= P_TOTAL_STOP;
1184         p->p_flag |= P_STOPPED_SINGLE;
1185         PROC_SLOCK(p);
1186         p->p_singlethread = td;
1187         remaining = calc_remaining(p, mode);
1188         while (remaining != remain_for_mode(mode)) {
1189                 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
1190                         goto stopme;
1191                 wakeup_swapper = 0;
1192                 FOREACH_THREAD_IN_PROC(p, td2) {
1193                         if (td2 == td)
1194                                 continue;
1195                         thread_lock(td2);
1196                         td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
1197                         if (TD_IS_INHIBITED(td2)) {
1198                                 wakeup_swapper |= weed_inhib(mode, td2, p);
1199 #ifdef SMP
1200                         } else if (TD_IS_RUNNING(td2) && td != td2) {
1201                                 forward_signal(td2);
1202                                 thread_unlock(td2);
1203 #endif
1204                         } else
1205                                 thread_unlock(td2);
1206                 }
1207                 if (wakeup_swapper)
1208                         kick_proc0();
1209                 remaining = calc_remaining(p, mode);
1210
1211                 /*
1212                  * Maybe we suspended some threads.. was it enough?
1213                  */
1214                 if (remaining == remain_for_mode(mode))
1215                         break;
1216
1217 stopme:
1218                 /*
1219                  * Wake us up when everyone else has suspended.
1220                  * In the mean time we suspend as well.
1221                  */
1222                 thread_suspend_switch(td, p);
1223                 remaining = calc_remaining(p, mode);
1224         }
1225         if (mode == SINGLE_EXIT) {
1226                 /*
1227                  * Convert the process to an unthreaded process.  The
1228                  * SINGLE_EXIT is called by exit1() or execve(), in
1229                  * both cases other threads must be retired.
1230                  */
1231                 KASSERT(p->p_numthreads == 1, ("Unthreading with >1 threads"));
1232                 p->p_singlethread = NULL;
1233                 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_HADTHREADS);
1234
1235                 /*
1236                  * Wait for any remaining threads to exit cpu_throw().
1237                  */
1238                 while (p->p_exitthreads != 0) {
1239                         PROC_SUNLOCK(p);
1240                         PROC_UNLOCK(p);
1241                         sched_relinquish(td);
1242                         PROC_LOCK(p);
1243                         PROC_SLOCK(p);
1244                 }
1245         } else if (mode == SINGLE_BOUNDARY) {
1246                 /*
1247                  * Wait until all suspended threads are removed from
1248                  * the processors.  The thread_suspend_check()
1249                  * increments p_boundary_count while it is still
1250                  * running, which makes it possible for the execve()
1251                  * to destroy vmspace while our other threads are
1252                  * still using the address space.
1253                  *
1254                  * We lock the thread, which is only allowed to
1255                  * succeed after context switch code finished using
1256                  * the address space.
1257                  */
1258                 FOREACH_THREAD_IN_PROC(p, td2) {
1259                         if (td2 == td)
1260                                 continue;
1261                         thread_lock(td2);
1262                         KASSERT((td2->td_flags & TDF_BOUNDARY) != 0,
1263                             ("td %p not on boundary", td2));
1264                         KASSERT(TD_IS_SUSPENDED(td2),
1265                             ("td %p is not suspended", td2));
1266                         thread_unlock(td2);
1267                 }
1268         }
1269         PROC_SUNLOCK(p);
1270         return (0);
1271 }
1272
1273 bool
1274 thread_suspend_check_needed(void)
1275 {
1276         struct proc *p;
1277         struct thread *td;
1278
1279         td = curthread;
1280         p = td->td_proc;
1281         PROC_LOCK_ASSERT(p, MA_OWNED);
1282         return (P_SHOULDSTOP(p) || ((p->p_flag & P_TRACED) != 0 &&
1283             (td->td_dbgflags & TDB_SUSPEND) != 0));
1284 }
1285
1286 /*
1287  * Called in from locations that can safely check to see
1288  * whether we have to suspend or at least throttle for a
1289  * single-thread event (e.g. fork).
1290  *
1291  * Such locations include userret().
1292  * If the "return_instead" argument is non zero, the thread must be able to
1293  * accept 0 (caller may continue), or 1 (caller must abort) as a result.
1294  *
1295  * The 'return_instead' argument tells the function if it may do a
1296  * thread_exit() or suspend, or whether the caller must abort and back
1297  * out instead.
1298  *
1299  * If the thread that set the single_threading request has set the
1300  * P_SINGLE_EXIT bit in the process flags then this call will never return
1301  * if 'return_instead' is false, but will exit.
1302  *
1303  * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
1304  *---------------+--------------------+---------------------
1305  *       0       | returns 0          |   returns 0 or 1
1306  *               | when ST ends       |   immediately
1307  *---------------+--------------------+---------------------
1308  *       1       | thread exits       |   returns 1
1309  *               |                    |  immediately
1310  * 0 = thread_exit() or suspension ok,
1311  * other = return error instead of stopping the thread.
1312  *
1313  * While a full suspension is under effect, even a single threading
1314  * thread would be suspended if it made this call (but it shouldn't).
1315  * This call should only be made from places where
1316  * thread_exit() would be safe as that may be the outcome unless
1317  * return_instead is set.
1318  */
1319 int
1320 thread_suspend_check(int return_instead)
1321 {
1322         struct thread *td;
1323         struct proc *p;
1324         int wakeup_swapper;
1325
1326         td = curthread;
1327         p = td->td_proc;
1328         mtx_assert(&Giant, MA_NOTOWNED);
1329         PROC_LOCK_ASSERT(p, MA_OWNED);
1330         while (thread_suspend_check_needed()) {
1331                 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1332                         KASSERT(p->p_singlethread != NULL,
1333                             ("singlethread not set"));
1334                         /*
1335                          * The only suspension in action is a
1336                          * single-threading. Single threader need not stop.
1337                          * It is safe to access p->p_singlethread unlocked
1338                          * because it can only be set to our address by us.
1339                          */
1340                         if (p->p_singlethread == td)
1341                                 return (0);     /* Exempt from stopping. */
1342                 }
1343                 if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
1344                         return (EINTR);
1345
1346                 /* Should we goto user boundary if we didn't come from there? */
1347                 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
1348                     (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
1349                         return (ERESTART);
1350
1351                 /*
1352                  * Ignore suspend requests if they are deferred.
1353                  */
1354                 if ((td->td_flags & TDF_SBDRY) != 0) {
1355                         KASSERT(return_instead,
1356                             ("TDF_SBDRY set for unsafe thread_suspend_check"));
1357                         KASSERT((td->td_flags & (TDF_SEINTR | TDF_SERESTART)) !=
1358                             (TDF_SEINTR | TDF_SERESTART),
1359                             ("both TDF_SEINTR and TDF_SERESTART"));
1360                         return (TD_SBDRY_INTR(td) ? TD_SBDRY_ERRNO(td) : 0);
1361                 }
1362
1363                 /*
1364                  * If the process is waiting for us to exit,
1365                  * this thread should just suicide.
1366                  * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
1367                  */
1368                 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1369                         PROC_UNLOCK(p);
1370
1371                         /*
1372                          * Allow Linux emulation layer to do some work
1373                          * before thread suicide.
1374                          */
1375                         if (__predict_false(p->p_sysent->sv_thread_detach != NULL))
1376                                 (p->p_sysent->sv_thread_detach)(td);
1377                         umtx_thread_exit(td);
1378                         kern_thr_exit(td);
1379                         panic("stopped thread did not exit");
1380                 }
1381
1382                 PROC_SLOCK(p);
1383                 thread_stopped(p);
1384                 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1385                         if (p->p_numthreads == p->p_suspcount + 1) {
1386                                 thread_lock(p->p_singlethread);
1387                                 wakeup_swapper = thread_unsuspend_one(
1388                                     p->p_singlethread, p, false);
1389                                 if (wakeup_swapper)
1390                                         kick_proc0();
1391                         }
1392                 }
1393                 PROC_UNLOCK(p);
1394                 thread_lock(td);
1395                 /*
1396                  * When a thread suspends, it just
1397                  * gets taken off all queues.
1398                  */
1399                 thread_suspend_one(td);
1400                 if (return_instead == 0) {
1401                         p->p_boundary_count++;
1402                         td->td_flags |= TDF_BOUNDARY;
1403                 }
1404                 PROC_SUNLOCK(p);
1405                 mi_switch(SW_INVOL | SWT_SUSPEND);
1406                 PROC_LOCK(p);
1407         }
1408         return (0);
1409 }
1410
1411 /*
1412  * Check for possible stops and suspensions while executing a
1413  * casueword or similar transiently failing operation.
1414  *
1415  * The sleep argument controls whether the function can handle a stop
1416  * request itself or it should return ERESTART and the request is
1417  * proceed at the kernel/user boundary in ast.
1418  *
1419  * Typically, when retrying due to casueword(9) failure (rv == 1), we
1420  * should handle the stop requests there, with exception of cases when
1421  * the thread owns a kernel resource, for instance busied the umtx
1422  * key, or when functions return immediately if thread_check_susp()
1423  * returned non-zero.  On the other hand, retrying the whole lock
1424  * operation, we better not stop there but delegate the handling to
1425  * ast.
1426  *
1427  * If the request is for thread termination P_SINGLE_EXIT, we cannot
1428  * handle it at all, and simply return EINTR.
1429  */
1430 int
1431 thread_check_susp(struct thread *td, bool sleep)
1432 {
1433         struct proc *p;
1434         int error;
1435
1436         /*
1437          * The check for TDF_NEEDSUSPCHK is racy, but it is enough to
1438          * eventually break the lockstep loop.
1439          */
1440         if ((td->td_flags & TDF_NEEDSUSPCHK) == 0)
1441                 return (0);
1442         error = 0;
1443         p = td->td_proc;
1444         PROC_LOCK(p);
1445         if (p->p_flag & P_SINGLE_EXIT)
1446                 error = EINTR;
1447         else if (P_SHOULDSTOP(p) ||
1448             ((p->p_flag & P_TRACED) && (td->td_dbgflags & TDB_SUSPEND)))
1449                 error = sleep ? thread_suspend_check(0) : ERESTART;
1450         PROC_UNLOCK(p);
1451         return (error);
1452 }
1453
1454 void
1455 thread_suspend_switch(struct thread *td, struct proc *p)
1456 {
1457
1458         KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1459         PROC_LOCK_ASSERT(p, MA_OWNED);
1460         PROC_SLOCK_ASSERT(p, MA_OWNED);
1461         /*
1462          * We implement thread_suspend_one in stages here to avoid
1463          * dropping the proc lock while the thread lock is owned.
1464          */
1465         if (p == td->td_proc) {
1466                 thread_stopped(p);
1467                 p->p_suspcount++;
1468         }
1469         PROC_UNLOCK(p);
1470         thread_lock(td);
1471         td->td_flags &= ~TDF_NEEDSUSPCHK;
1472         TD_SET_SUSPENDED(td);
1473         sched_sleep(td, 0);
1474         PROC_SUNLOCK(p);
1475         DROP_GIANT();
1476         mi_switch(SW_VOL | SWT_SUSPEND);
1477         PICKUP_GIANT();
1478         PROC_LOCK(p);
1479         PROC_SLOCK(p);
1480 }
1481
1482 void
1483 thread_suspend_one(struct thread *td)
1484 {
1485         struct proc *p;
1486
1487         p = td->td_proc;
1488         PROC_SLOCK_ASSERT(p, MA_OWNED);
1489         THREAD_LOCK_ASSERT(td, MA_OWNED);
1490         KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1491         p->p_suspcount++;
1492         td->td_flags &= ~TDF_NEEDSUSPCHK;
1493         TD_SET_SUSPENDED(td);
1494         sched_sleep(td, 0);
1495 }
1496
1497 static int
1498 thread_unsuspend_one(struct thread *td, struct proc *p, bool boundary)
1499 {
1500
1501         THREAD_LOCK_ASSERT(td, MA_OWNED);
1502         KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
1503         TD_CLR_SUSPENDED(td);
1504         td->td_flags &= ~TDF_ALLPROCSUSP;
1505         if (td->td_proc == p) {
1506                 PROC_SLOCK_ASSERT(p, MA_OWNED);
1507                 p->p_suspcount--;
1508                 if (boundary && (td->td_flags & TDF_BOUNDARY) != 0) {
1509                         td->td_flags &= ~TDF_BOUNDARY;
1510                         p->p_boundary_count--;
1511                 }
1512         }
1513         return (setrunnable(td, 0));
1514 }
1515
1516 /*
1517  * Allow all threads blocked by single threading to continue running.
1518  */
1519 void
1520 thread_unsuspend(struct proc *p)
1521 {
1522         struct thread *td;
1523         int wakeup_swapper;
1524
1525         PROC_LOCK_ASSERT(p, MA_OWNED);
1526         PROC_SLOCK_ASSERT(p, MA_OWNED);
1527         wakeup_swapper = 0;
1528         if (!P_SHOULDSTOP(p)) {
1529                 FOREACH_THREAD_IN_PROC(p, td) {
1530                         thread_lock(td);
1531                         if (TD_IS_SUSPENDED(td)) {
1532                                 wakeup_swapper |= thread_unsuspend_one(td, p,
1533                                     true);
1534                         } else
1535                                 thread_unlock(td);
1536                 }
1537         } else if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
1538             p->p_numthreads == p->p_suspcount) {
1539                 /*
1540                  * Stopping everything also did the job for the single
1541                  * threading request. Now we've downgraded to single-threaded,
1542                  * let it continue.
1543                  */
1544                 if (p->p_singlethread->td_proc == p) {
1545                         thread_lock(p->p_singlethread);
1546                         wakeup_swapper = thread_unsuspend_one(
1547                             p->p_singlethread, p, false);
1548                 }
1549         }
1550         if (wakeup_swapper)
1551                 kick_proc0();
1552 }
1553
1554 /*
1555  * End the single threading mode..
1556  */
1557 void
1558 thread_single_end(struct proc *p, int mode)
1559 {
1560         struct thread *td;
1561         int wakeup_swapper;
1562
1563         KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
1564             mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
1565             ("invalid mode %d", mode));
1566         PROC_LOCK_ASSERT(p, MA_OWNED);
1567         KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
1568             (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
1569             ("mode %d does not match P_TOTAL_STOP", mode));
1570         KASSERT(mode == SINGLE_ALLPROC || p->p_singlethread == curthread,
1571             ("thread_single_end from other thread %p %p",
1572             curthread, p->p_singlethread));
1573         KASSERT(mode != SINGLE_BOUNDARY ||
1574             (p->p_flag & P_SINGLE_BOUNDARY) != 0,
1575             ("mis-matched SINGLE_BOUNDARY flags %x", p->p_flag));
1576         p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
1577             P_TOTAL_STOP);
1578         PROC_SLOCK(p);
1579         p->p_singlethread = NULL;
1580         wakeup_swapper = 0;
1581         /*
1582          * If there are other threads they may now run,
1583          * unless of course there is a blanket 'stop order'
1584          * on the process. The single threader must be allowed
1585          * to continue however as this is a bad place to stop.
1586          */
1587         if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
1588                 FOREACH_THREAD_IN_PROC(p, td) {
1589                         thread_lock(td);
1590                         if (TD_IS_SUSPENDED(td)) {
1591                                 wakeup_swapper |= thread_unsuspend_one(td, p,
1592                                     mode == SINGLE_BOUNDARY);
1593                         } else
1594                                 thread_unlock(td);
1595                 }
1596         }
1597         KASSERT(mode != SINGLE_BOUNDARY || p->p_boundary_count == 0,
1598             ("inconsistent boundary count %d", p->p_boundary_count));
1599         PROC_SUNLOCK(p);
1600         if (wakeup_swapper)
1601                 kick_proc0();
1602 }
1603
1604 /*
1605  * Locate a thread by number and return with proc lock held.
1606  *
1607  * thread exit establishes proc -> tidhash lock ordering, but lookup
1608  * takes tidhash first and needs to return locked proc.
1609  *
1610  * The problem is worked around by relying on type-safety of both
1611  * structures and doing the work in 2 steps:
1612  * - tidhash-locked lookup which saves both thread and proc pointers
1613  * - proc-locked verification that the found thread still matches
1614  */
1615 static bool
1616 tdfind_hash(lwpid_t tid, pid_t pid, struct proc **pp, struct thread **tdp)
1617 {
1618 #define RUN_THRESH      16
1619         struct proc *p;
1620         struct thread *td;
1621         int run;
1622         bool locked;
1623
1624         run = 0;
1625         rw_rlock(TIDHASHLOCK(tid));
1626         locked = true;
1627         LIST_FOREACH(td, TIDHASH(tid), td_hash) {
1628                 if (td->td_tid != tid) {
1629                         run++;
1630                         continue;
1631                 }
1632                 p = td->td_proc;
1633                 if (pid != -1 && p->p_pid != pid) {
1634                         td = NULL;
1635                         break;
1636                 }
1637                 if (run > RUN_THRESH) {
1638                         if (rw_try_upgrade(TIDHASHLOCK(tid))) {
1639                                 LIST_REMOVE(td, td_hash);
1640                                 LIST_INSERT_HEAD(TIDHASH(td->td_tid),
1641                                         td, td_hash);
1642                                 rw_wunlock(TIDHASHLOCK(tid));
1643                                 locked = false;
1644                                 break;
1645                         }
1646                 }
1647                 break;
1648         }
1649         if (locked)
1650                 rw_runlock(TIDHASHLOCK(tid));
1651         if (td == NULL)
1652                 return (false);
1653         *pp = p;
1654         *tdp = td;
1655         return (true);
1656 }
1657
1658 struct thread *
1659 tdfind(lwpid_t tid, pid_t pid)
1660 {
1661         struct proc *p;
1662         struct thread *td;
1663
1664         td = curthread;
1665         if (td->td_tid == tid) {
1666                 if (pid != -1 && td->td_proc->p_pid != pid)
1667                         return (NULL);
1668                 PROC_LOCK(td->td_proc);
1669                 return (td);
1670         }
1671
1672         for (;;) {
1673                 if (!tdfind_hash(tid, pid, &p, &td))
1674                         return (NULL);
1675                 PROC_LOCK(p);
1676                 if (td->td_tid != tid) {
1677                         PROC_UNLOCK(p);
1678                         continue;
1679                 }
1680                 if (td->td_proc != p) {
1681                         PROC_UNLOCK(p);
1682                         continue;
1683                 }
1684                 if (p->p_state == PRS_NEW) {
1685                         PROC_UNLOCK(p);
1686                         return (NULL);
1687                 }
1688                 return (td);
1689         }
1690 }
1691
1692 void
1693 tidhash_add(struct thread *td)
1694 {
1695         rw_wlock(TIDHASHLOCK(td->td_tid));
1696         LIST_INSERT_HEAD(TIDHASH(td->td_tid), td, td_hash);
1697         rw_wunlock(TIDHASHLOCK(td->td_tid));
1698 }
1699
1700 void
1701 tidhash_remove(struct thread *td)
1702 {
1703
1704         rw_wlock(TIDHASHLOCK(td->td_tid));
1705         LIST_REMOVE(td, td_hash);
1706         rw_wunlock(TIDHASHLOCK(td->td_tid));
1707 }