2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/imgact.h>
37 #include <sys/mutex.h>
39 #include <sys/ptrace.h>
41 #include <sys/syscallsubr.h>
42 #include <sys/sysproto.h>
43 #include <sys/sched.h>
44 #include <sys/signalvar.h>
45 #include <sys/sleepqueue.h>
51 static uma_zone_t upcall_zone;
54 extern int virtual_cpu;
55 extern int thread_debug;
57 extern int max_threads_per_proc;
58 extern int max_groups_per_proc;
59 extern int max_threads_hits;
60 extern struct mtx kse_zombie_lock;
63 TAILQ_HEAD(, kse_upcall) zombie_upcalls =
64 TAILQ_HEAD_INITIALIZER(zombie_upcalls);
66 static int thread_update_usr_ticks(struct thread *td);
67 static void thread_alloc_spare(struct thread *td);
72 struct kse_upcall *ku;
74 ku = uma_zalloc(upcall_zone, M_WAITOK | M_ZERO);
79 upcall_free(struct kse_upcall *ku)
82 uma_zfree(upcall_zone, ku);
86 upcall_link(struct kse_upcall *ku, struct proc *p)
89 mtx_assert(&sched_lock, MA_OWNED);
90 TAILQ_INSERT_TAIL(&p->p_upcalls, ku, ku_link);
95 upcall_unlink(struct kse_upcall *ku)
97 struct proc *p = ku->ku_proc;
99 mtx_assert(&sched_lock, MA_OWNED);
100 KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__));
101 TAILQ_REMOVE(&p->p_upcalls, ku, ku_link);
106 upcall_remove(struct thread *td)
109 mtx_assert(&sched_lock, MA_OWNED);
110 if (td->td_upcall != NULL) {
112 * If we are not a bound thread then decrement the count of
113 * possible upcall sources
115 if (td->td_pflags & TDP_SA)
116 td->td_proc->p_numupcalls--;
117 td->td_upcall->ku_owner = NULL;
118 upcall_unlink(td->td_upcall);
119 td->td_upcall = NULL;
124 #ifndef _SYS_SYSPROTO_H_
125 struct kse_switchin_args {
126 struct kse_thr_mailbox *tmbx;
132 kse_switchin(struct thread *td, struct kse_switchin_args *uap)
135 struct kse_thr_mailbox tmbx;
136 struct kse_upcall *ku;
139 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
141 error = (uap->tmbx == NULL) ? EINVAL : 0;
143 error = copyin(uap->tmbx, &tmbx, sizeof(tmbx));
144 if (!error && (uap->flags & KSE_SWITCHIN_SETTMBX))
145 error = (suword(&ku->ku_mailbox->km_curthread,
146 (long)uap->tmbx) != 0 ? EINVAL : 0);
148 error = set_mcontext(td, &tmbx.tm_context.uc_mcontext);
150 suword32(&uap->tmbx->tm_lwp, td->td_tid);
151 if (uap->flags & KSE_SWITCHIN_SETTMBX) {
152 td->td_mailbox = uap->tmbx;
153 td->td_pflags |= TDP_CAN_UNBIND;
155 PROC_LOCK(td->td_proc);
156 if (td->td_proc->p_flag & P_TRACED) {
158 if (tmbx.tm_dflags & TMDF_SSTEP)
159 ptrace_single_step(td);
161 ptrace_clear_single_step(td);
162 if (tmbx.tm_dflags & TMDF_SUSPEND) {
163 mtx_lock_spin(&sched_lock);
164 /* fuword can block, check again */
166 ku->ku_flags |= KUF_DOUPCALL;
167 mtx_unlock_spin(&sched_lock);
171 PROC_UNLOCK(td->td_proc);
173 return ((error == 0) ? EJUSTRETURN : error);
180 struct kse_thr_interrupt_args {
181 struct kse_thr_mailbox * tmbx;
187 kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
190 struct kse_execve_args args;
191 struct image_args iargs;
194 struct kse_upcall *ku;
195 struct kse_thr_mailbox *tmbx;
201 if (!(p->p_flag & P_SA))
205 case KSE_INTR_SENDSIG:
206 if (uap->data < 0 || uap->data > _SIG_MAXSIG)
208 case KSE_INTR_INTERRUPT:
209 case KSE_INTR_RESTART:
211 mtx_lock_spin(&sched_lock);
212 FOREACH_THREAD_IN_PROC(p, td2) {
213 if (td2->td_mailbox == uap->tmbx)
217 mtx_unlock_spin(&sched_lock);
221 if (uap->cmd == KSE_INTR_SENDSIG) {
223 td2->td_flags &= ~TDF_INTERRUPT;
224 mtx_unlock_spin(&sched_lock);
225 tdsignal(p, td2, (int)uap->data, NULL);
227 mtx_unlock_spin(&sched_lock);
230 td2->td_flags |= TDF_INTERRUPT | TDF_ASTPENDING;
231 if (TD_CAN_UNBIND(td2))
232 td2->td_upcall->ku_flags |= KUF_DOUPCALL;
233 if (uap->cmd == KSE_INTR_INTERRUPT)
234 td2->td_intrval = EINTR;
236 td2->td_intrval = ERESTART;
237 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR))
238 sleepq_abort(td2, td2->td_intrval);
239 mtx_unlock_spin(&sched_lock);
243 case KSE_INTR_SIGEXIT:
244 if (uap->data < 1 || uap->data > _SIG_MAXSIG)
247 sigexit(td, (int)uap->data);
250 case KSE_INTR_DBSUSPEND:
251 /* this sub-function is only for bound thread */
252 if (td->td_pflags & TDP_SA)
255 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
256 if (tmbx == NULL || tmbx == (void *)-1)
259 while ((p->p_flag & P_TRACED) && !(p->p_flag & P_SINGLE_EXIT)) {
260 flags = fuword32(&tmbx->tm_dflags);
261 if (!(flags & TMDF_SUSPEND))
264 mtx_lock_spin(&sched_lock);
266 thread_suspend_one(td);
268 mi_switch(SW_VOL, NULL);
269 mtx_unlock_spin(&sched_lock);
273 case KSE_INTR_EXECVE:
274 error = copyin((void *)uap->data, &args, sizeof(args));
277 error = exec_copyin_args(&iargs, args.path, UIO_USERSPACE,
278 args.argv, args.envp);
280 error = kern_execve(td, &iargs, NULL);
283 SIGSETOR(td->td_siglist, args.sigpend);
285 kern_sigprocmask(td, SIG_SETMASK, &args.sigmask, NULL,
300 struct kse_exit_args {
305 kse_exit(struct thread *td, struct kse_exit_args *uap)
309 struct kse_upcall *ku, *ku2;
314 * Ensure that this is only called from the UTS
316 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
321 * Calculate the existing non-exiting upcalls in this process.
322 * If we are the last upcall but there are still other threads,
323 * then do not exit. We need the other threads to be able to
324 * complete whatever they are doing.
325 * XXX This relies on the userland knowing what to do if we return.
326 * It may be a better choice to convert ourselves into a kse_release
327 * ( or similar) and wait in the kernel to be needed.
328 * XXX Where are those other threads? I suppose they are waiting in
329 * the kernel. We should wait for them all at the user boundary after
330 * turning into an exit.
334 mtx_lock_spin(&sched_lock);
335 FOREACH_UPCALL_IN_PROC(p, ku2) {
336 if ((ku2->ku_flags & KUF_EXITING) == 0)
339 if (count == 1 && (p->p_numthreads > 1)) {
340 mtx_unlock_spin(&sched_lock);
344 ku->ku_flags |= KUF_EXITING;
345 mtx_unlock_spin(&sched_lock);
349 * Mark the UTS mailbox as having been finished with.
350 * If that fails then just go for a segfault.
351 * XXX need to check it that can be deliverred without a mailbox.
353 error = suword32(&ku->ku_mailbox->km_flags, ku->ku_mflags|KMF_DONE);
354 if (!(td->td_pflags & TDP_SA))
355 if (suword32(&td->td_mailbox->tm_lwp, 0))
360 sigqueue_flush(&td->td_sigqueue);
361 mtx_lock_spin(&sched_lock);
363 if (p->p_numthreads != 1) {
369 * This is the last thread. Just return to the user.
370 * Effectively we have left threading mode..
371 * The only real thing left to do is ensure that the
372 * scheduler sets out concurrency back to 1 as that may be a
373 * resource leak otherwise.
374 * This is an A[PB]I issue.. what SHOULD we do?
375 * One possibility is to return to the user. It may not cope well.
376 * The other possibility would be to let the process exit.
379 mtx_unlock_spin(&sched_lock);
384 printf("kse_exit: called on last thread. Calling exit1()");
393 * Either becomes an upcall or waits for an awakening event and
394 * then becomes an upcall. Only error cases return.
397 struct kse_release_args {
398 struct timespec *timeout;
402 kse_release(struct thread *td, struct kse_release_args *uap)
406 struct kse_upcall *ku;
407 struct timespec timeout;
413 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) {
414 printf("kse_release: called outside of threading. exiting");
417 if (uap->timeout != NULL) {
418 if ((error = copyin(uap->timeout, &timeout, sizeof(timeout))))
420 TIMESPEC_TO_TIMEVAL(&tv, &timeout);
422 if (td->td_pflags & TDP_SA)
423 td->td_pflags |= TDP_UPCALLING;
425 ku->ku_mflags = fuword32(&ku->ku_mailbox->km_flags);
426 if (ku->ku_mflags == -1) {
428 sigexit(td, SIGSEGV);
432 if (ku->ku_mflags & KMF_WAITSIGEVENT) {
433 /* UTS wants to wait for signal event */
434 if (!(p->p_flag & P_SIGEVENT) &&
435 !(ku->ku_flags & KUF_DOUPCALL)) {
436 td->td_kflags |= TDK_KSERELSIG;
437 error = msleep(&p->p_siglist, &p->p_mtx, PPAUSE|PCATCH,
438 "ksesigwait", (uap->timeout ? tvtohz(&tv) : 0));
439 td->td_kflags &= ~(TDK_KSERELSIG | TDK_WAKEUP);
441 p->p_flag &= ~P_SIGEVENT;
442 sigset = p->p_siglist;
444 error = copyout(&sigset, &ku->ku_mailbox->km_sigscaught,
447 if ((ku->ku_flags & KUF_DOUPCALL) == 0 &&
448 ((ku->ku_mflags & KMF_NOCOMPLETED) ||
449 (p->p_completed == NULL))) {
451 td->td_kflags |= TDK_KSEREL;
452 error = msleep(&p->p_completed, &p->p_mtx,
453 PPAUSE|PCATCH, "kserel",
454 (uap->timeout ? tvtohz(&tv) : 0));
455 td->td_kflags &= ~(TDK_KSEREL | TDK_WAKEUP);
460 if (ku->ku_flags & KUF_DOUPCALL) {
461 mtx_lock_spin(&sched_lock);
462 ku->ku_flags &= ~KUF_DOUPCALL;
463 mtx_unlock_spin(&sched_lock);
471 /* struct kse_wakeup_args {
472 struct kse_mailbox *mbx;
475 kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
479 struct kse_upcall *ku;
485 /* KSE-enabled processes only, please. */
486 if (!(p->p_flag & P_SA))
489 mtx_lock_spin(&sched_lock);
491 FOREACH_UPCALL_IN_PROC(p, ku) {
492 if (ku->ku_mailbox == uap->mbx)
497 mtx_unlock_spin(&sched_lock);
498 wakeup(&p->p_completed);
502 ku = TAILQ_FIRST(&p->p_upcalls);
505 mtx_unlock_spin(&sched_lock);
509 if ((td2 = ku->ku_owner) == NULL) {
510 mtx_unlock_spin(&sched_lock);
511 panic("%s: no owner", __func__);
512 } else if (td2->td_kflags & (TDK_KSEREL | TDK_KSERELSIG)) {
513 mtx_unlock_spin(&sched_lock);
514 if (!(td2->td_kflags & TDK_WAKEUP)) {
515 td2->td_kflags |= TDK_WAKEUP;
516 if (td2->td_kflags & TDK_KSEREL)
517 sleepq_remove(td2, &p->p_completed);
519 sleepq_remove(td2, &p->p_siglist);
522 ku->ku_flags |= KUF_DOUPCALL;
523 mtx_unlock_spin(&sched_lock);
533 * newgroup == 0: first call: use current KSE, don't schedule an upcall
534 * All other situations, do allocate max new KSEs and schedule an upcall.
536 * XXX should be changed so that 'first' behaviour lasts for as long
537 * as you have not made a thread in this proc. i.e. as long as we do not have
540 /* struct kse_create_args {
541 struct kse_mailbox *mbx;
545 kse_create(struct thread *td, struct kse_create_args *uap)
549 struct kse_mailbox mbx;
550 struct kse_upcall *newku;
551 int err, ncpus, sa = 0, first = 0;
552 struct thread *newtd;
557 * Processes using the other threading model can't
558 * suddenly start calling this one
561 if ((p->p_flag & (P_SA|P_HADTHREADS)) == P_HADTHREADS) {
565 if (!(p->p_flag & P_SA)) {
567 p->p_flag |= P_SA|P_HADTHREADS;
570 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
574 if (virtual_cpu != 0)
577 * If the new UTS mailbox says that this
578 * will be a BOUND lwp, then it had better
579 * have its thread mailbox already there.
581 if ((mbx.km_flags & KMF_BOUND) || uap->newgroup) {
582 /* It's a bound thread (1:1) */
583 if (mbx.km_curthread == NULL)
586 if (!(uap->newgroup || first))
589 /* It's an upcall capable thread */
593 * Limit it to NCPU upcall contexts per proc in any case.
594 * numupcalls will soon be numkse or something
595 * as it will represent the number of
596 * non-bound upcalls available. (i.e. ones that can
599 if (p->p_numupcalls >= ncpus) {
608 * Even bound LWPs get a mailbox and an upcall to hold it.
609 * XXX This should change.
611 newku = upcall_alloc();
612 newku->ku_mailbox = uap->mbx;
613 newku->ku_func = mbx.km_func;
614 bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t));
617 * For the first call this may not have been set.
618 * Of course nor may it actually be needed.
619 * thread_schedule_upcall() will look for it.
621 if (td->td_standin == NULL)
622 thread_alloc_spare(td);
624 mtx_lock_spin(&sched_lock);
626 * If we are the first time, and a normal thread,
627 * then transfer all the signals back to the 'process'.
628 * SA threading will make a special thread to handle them.
631 sigqueue_move_set(&td->td_sigqueue, &p->p_sigqueue,
632 &td->td_sigqueue.sq_signals);
633 SIGFILLSET(td->td_sigmask);
634 SIG_CANTMASK(td->td_sigmask);
638 * Make the new upcall available to the process.
639 * It may or may not use it, but it's available.
641 upcall_link(newku, p);
644 /* XXX should this be in the thread? */
645 p->p_upquantum = max(1, mbx.km_quantum / tick);
648 * Each upcall structure has an owner thread, find which
653 * The newgroup parameter now means
654 * "bound, non SA, system scope"
655 * It is only used for the interrupt thread at the
656 * moment I think.. (or system scope threads dopey).
657 * We'll rename it later.
659 newtd = thread_schedule_upcall(td, newku);
662 * If the current thread hasn't an upcall structure,
663 * just assign the upcall to it.
666 if (td->td_upcall == NULL) {
667 newku->ku_owner = td;
668 td->td_upcall = newku;
672 * Create a new upcall thread to own it.
674 newtd = thread_schedule_upcall(td, newku);
677 mtx_unlock_spin(&sched_lock);
680 * Let the UTS instance know its LWPID.
681 * It doesn't really care. But the debugger will.
682 * XXX warning.. remember that this moves.
684 suword32(&newku->ku_mailbox->km_lwp, newtd->td_tid);
687 * In the same manner, if the UTS has a current user thread,
688 * then it is also running on this LWP so set it as well.
689 * The library could do that of course.. but why not..
690 * XXX I'm not sure this can ever happen but ...
691 * XXX does the UTS ever set this in the mailbox before calling this?
693 if (mbx.km_curthread)
694 suword32(&mbx.km_curthread->tm_lwp, newtd->td_tid);
697 newtd->td_pflags |= TDP_SA;
699 * If we are starting a new thread, kick it off.
702 mtx_lock_spin(&sched_lock);
703 sched_add(newtd, SRQ_BORING);
704 mtx_unlock_spin(&sched_lock);
707 newtd->td_pflags &= ~TDP_SA;
710 * Since a library will use the mailbox pointer to
711 * identify even a bound thread, and the mailbox pointer
712 * will never be allowed to change after this syscall
713 * for a bound thread, set it here so the library can
714 * find the thread after the syscall returns.
716 newtd->td_mailbox = mbx.km_curthread;
720 * If we did create a new thread then
721 * make sure it goes to the right place
722 * when it starts up, and make sure that it runs
723 * at full speed when it gets there.
724 * thread_schedule_upcall() copies all cpu state
725 * to the new thread, so we should clear single step
728 cpu_set_upcall_kse(newtd, newku->ku_func,
729 newku->ku_mailbox, &newku->ku_stack);
731 if (p->p_flag & P_TRACED) {
733 ptrace_clear_single_step(newtd);
737 mtx_lock_spin(&sched_lock);
738 sched_add(newtd, SRQ_BORING);
739 mtx_unlock_spin(&sched_lock);
750 * Initialize global thread allocation resources.
756 upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
757 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
761 * Stash an embarasingly extra upcall into the zombie upcall queue.
765 upcall_stash(struct kse_upcall *ku)
767 mtx_lock_spin(&kse_zombie_lock);
768 TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link);
769 mtx_unlock_spin(&kse_zombie_lock);
773 * Reap zombie kse resource.
778 struct kse_upcall *ku_first, *ku_next;
781 * Don't even bother to lock if none at this instant,
782 * we really don't care about the next instant..
784 if (!TAILQ_EMPTY(&zombie_upcalls)) {
785 mtx_lock_spin(&kse_zombie_lock);
786 ku_first = TAILQ_FIRST(&zombie_upcalls);
788 TAILQ_INIT(&zombie_upcalls);
789 mtx_unlock_spin(&kse_zombie_lock);
791 ku_next = TAILQ_NEXT(ku_first, ku_link);
792 upcall_free(ku_first);
799 * Store the thread context in the UTS's mailbox.
800 * then add the mailbox at the head of a list we are building in user space.
801 * The list is anchored in the proc structure.
804 thread_export_context(struct thread *td, int willexit)
815 * Post sync signal, or process SIGKILL and SIGSTOP.
816 * For sync signal, it is only possible when the signal is not
817 * caught by userland or process is being debugged.
820 if (td->td_flags & TDF_NEEDSIGCHK) {
821 mtx_lock_spin(&sched_lock);
822 td->td_flags &= ~TDF_NEEDSIGCHK;
823 mtx_unlock_spin(&sched_lock);
824 mtx_lock(&p->p_sigacts->ps_mtx);
825 while ((sig = cursig(td)) != 0)
827 mtx_unlock(&p->p_sigacts->ps_mtx);
830 SIGFILLSET(td->td_sigmask);
833 /* Export the user/machine context. */
834 get_mcontext(td, &mc, 0);
835 addr = (void *)(&td->td_mailbox->tm_context.uc_mcontext);
836 error = copyout(&mc, addr, sizeof(mcontext_t));
840 addr = (caddr_t)(&td->td_mailbox->tm_lwp);
841 if (suword32(addr, 0)) {
846 /* Get address in latest mbox of list pointer */
847 addr = (void *)(&td->td_mailbox->tm_next);
849 * Put the saved address of the previous first
850 * entry into this one
853 mbx = (uintptr_t)p->p_completed;
854 if (suword(addr, mbx)) {
859 if (mbx == (uintptr_t)p->p_completed) {
860 p->p_completed = td->td_mailbox;
862 * The thread context may be taken away by
863 * other upcall threads when we unlock
864 * process lock. it's no longer valid to
865 * use it again in any other places.
867 td->td_mailbox = NULL;
883 * Take the list of completed mailboxes for this Process and put them on this
884 * upcall's mailbox as it's the next one going up.
887 thread_link_mboxes(struct proc *p, struct kse_upcall *ku)
892 addr = (void *)(&ku->ku_mailbox->km_completed);
894 mbx = (uintptr_t)p->p_completed;
895 if (suword(addr, mbx)) {
902 if (mbx == (uintptr_t)p->p_completed) {
903 p->p_completed = NULL;
913 * This function should be called at statclock interrupt time
916 thread_statclock(int user)
918 struct thread *td = curthread;
920 if (!(td->td_pflags & TDP_SA))
923 /* Current always do via ast() */
924 mtx_lock_spin(&sched_lock);
925 td->td_flags |= TDF_ASTPENDING;
926 mtx_unlock_spin(&sched_lock);
928 } else if (td->td_mailbox != NULL)
934 * Export state clock ticks for userland
937 thread_update_usr_ticks(struct thread *td)
939 struct proc *p = td->td_proc;
943 if (td->td_mailbox == NULL)
946 if ((uticks = td->td_uuticks) != 0) {
948 addr = (caddr_t)&td->td_mailbox->tm_uticks;
949 if (suword32(addr, uticks+fuword32(addr)))
952 if ((uticks = td->td_usticks) != 0) {
954 addr = (caddr_t)&td->td_mailbox->tm_sticks;
955 if (suword32(addr, uticks+fuword32(addr)))
968 * This function is intended to be used to initialize a spare thread
969 * for upcall. Initialize thread's large data area outside sched_lock
970 * for thread_schedule_upcall(). The crhold is also here to get it out
971 * from the schedlock as it has a mutex op itself.
972 * XXX BUG.. we need to get the cr ref after the thread has
973 * checked and chenged its own, not 6 months before...
976 thread_alloc_spare(struct thread *td)
978 struct thread *spare;
982 spare = thread_alloc();
983 td->td_standin = spare;
984 bzero(&spare->td_startzero,
985 __rangeof(struct thread, td_startzero, td_endzero));
986 spare->td_proc = td->td_proc;
987 spare->td_ucred = crhold(td->td_ucred);
991 * Create a thread and schedule it for upcall on the KSE given.
992 * Use our thread's standin so that we don't have to allocate one.
995 thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
999 mtx_assert(&sched_lock, MA_OWNED);
1002 * Schedule an upcall thread on specified kse_upcall,
1003 * the kse_upcall must be free.
1004 * td must have a spare thread.
1006 KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__));
1007 if ((td2 = td->td_standin) != NULL) {
1008 td->td_standin = NULL;
1010 panic("no reserve thread when scheduling an upcall");
1013 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1014 td2, td->td_proc->p_pid, td->td_proc->p_comm);
1016 * Bzero already done in thread_alloc_spare() because we can't
1017 * do the crhold here because we are in schedlock already.
1019 bcopy(&td->td_startcopy, &td2->td_startcopy,
1020 __rangeof(struct thread, td_startcopy, td_endcopy));
1021 thread_link(td2, ku->ku_proc);
1022 /* inherit parts of blocked thread's context as a good template */
1023 cpu_set_upcall(td2, td);
1024 /* Let the new thread become owner of the upcall */
1026 td2->td_upcall = ku;
1028 td2->td_pflags = TDP_SA|TDP_UPCALLING;
1029 td2->td_state = TDS_CAN_RUN;
1030 td2->td_inhibitors = 0;
1031 SIGFILLSET(td2->td_sigmask);
1032 SIG_CANTMASK(td2->td_sigmask);
1033 sched_fork_thread(td, td2);
1034 return (td2); /* bogus.. should be a void function */
1038 * It is only used when thread generated a trap and process is being
1042 thread_signal_add(struct thread *td, ksiginfo_t *ksi)
1049 PROC_LOCK_ASSERT(p, MA_OWNED);
1051 mtx_assert(&ps->ps_mtx, MA_OWNED);
1053 mtx_unlock(&ps->ps_mtx);
1054 SIGADDSET(td->td_sigmask, ksi->ksi_signo);
1056 error = copyout(&ksi->ksi_info, &td->td_mailbox->tm_syncsig,
1060 sigexit(td, SIGSEGV);
1063 mtx_lock(&ps->ps_mtx);
1065 #include "opt_sched.h"
1067 thread_switchout(struct thread *td, int flags, struct thread *nextthread)
1069 struct kse_upcall *ku;
1072 mtx_assert(&sched_lock, MA_OWNED);
1075 * If the outgoing thread is in threaded group and has never
1076 * scheduled an upcall, decide whether this is a short
1077 * or long term event and thus whether or not to schedule
1079 * If it is a short term event, just suspend it in
1080 * a way that takes its KSE with it.
1081 * Select the events for which we want to schedule upcalls.
1082 * For now it's just sleep or if thread is suspended but
1083 * process wide suspending flag is not set (debugger
1085 * XXXKSE eventually almost any inhibition could do.
1087 if (TD_CAN_UNBIND(td) && (td->td_standin) &&
1088 (TD_ON_SLEEPQ(td) || (TD_IS_SUSPENDED(td) &&
1089 !P_SHOULDSTOP(td->td_proc)))) {
1091 * Release ownership of upcall, and schedule an upcall
1092 * thread, this new upcall thread becomes the owner of
1093 * the upcall structure. It will be ahead of us in the
1094 * run queue, so as we are stopping, it should either
1095 * start up immediatly, or at least before us if
1096 * we release our slot.
1099 ku->ku_owner = NULL;
1100 td->td_upcall = NULL;
1101 td->td_pflags &= ~TDP_CAN_UNBIND;
1102 td2 = thread_schedule_upcall(td, ku);
1103 if (flags & SW_INVOL || nextthread) {
1104 sched_add(td2, SRQ_YIELDING);
1106 /* Keep up with reality.. we have one extra thread
1107 * in the picture.. and it's 'running'.
1112 return (nextthread);
1116 * Setup done on the thread when it enters the kernel.
1119 thread_user_enter(struct thread *td)
1121 struct proc *p = td->td_proc;
1122 struct kse_upcall *ku;
1123 struct kse_thr_mailbox *tmbx;
1127 * First check that we shouldn't just abort. we
1128 * can suspend it here or just exit.
1130 if (__predict_false(P_SHOULDSTOP(p))) {
1132 thread_suspend_check(0);
1136 if (!(td->td_pflags & TDP_SA))
1140 * If we are doing a syscall in a KSE environment,
1141 * note where our mailbox is.
1146 KASSERT(ku != NULL, ("no upcall owned"));
1147 KASSERT(ku->ku_owner == td, ("wrong owner"));
1148 KASSERT(!TD_CAN_UNBIND(td), ("can unbind"));
1150 if (td->td_standin == NULL)
1151 thread_alloc_spare(td);
1152 ku->ku_mflags = fuword32((void *)&ku->ku_mailbox->km_flags);
1153 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1154 if ((tmbx == NULL) || (tmbx == (void *)-1L) ||
1155 (ku->ku_mflags & KMF_NOUPCALL)) {
1156 td->td_mailbox = NULL;
1158 flags = fuword32(&tmbx->tm_flags);
1160 * On some architectures, TP register points to thread
1161 * mailbox but not points to kse mailbox, and userland
1162 * can not atomically clear km_curthread, but can
1163 * use TP register, and set TMF_NOUPCALL in thread
1164 * flag to indicate a critical region.
1166 if (flags & TMF_NOUPCALL) {
1167 td->td_mailbox = NULL;
1169 td->td_mailbox = tmbx;
1170 td->td_pflags |= TDP_CAN_UNBIND;
1171 if (__predict_false(p->p_flag & P_TRACED)) {
1172 flags = fuword32(&tmbx->tm_dflags);
1173 if (flags & TMDF_SUSPEND) {
1174 mtx_lock_spin(&sched_lock);
1175 /* fuword can block, check again */
1177 ku->ku_flags |= KUF_DOUPCALL;
1178 mtx_unlock_spin(&sched_lock);
1186 * The extra work we go through if we are a threaded process when we
1187 * return to userland.
1189 * If we are a KSE process and returning to user mode, check for
1190 * extra work to do before we return (e.g. for more syscalls
1191 * to complete first). If we were in a critical section, we should
1192 * just return to let it finish. Same if we were in the UTS (in
1193 * which case the mailbox's context's busy indicator will be set).
1194 * The only traps we suport will have set the mailbox.
1195 * We will clear it here.
1198 thread_userret(struct thread *td, struct trapframe *frame)
1200 struct kse_upcall *ku;
1203 int error = 0, uts_crit;
1205 /* Nothing to do with bound thread */
1206 if (!(td->td_pflags & TDP_SA))
1210 * Update stat clock count for userland
1212 if (td->td_mailbox != NULL) {
1213 thread_update_usr_ticks(td);
1224 * This thread has not started any upcall.
1225 * If there is no work to report other than ourself,
1226 * then it can return direct to userland.
1228 if (TD_CAN_UNBIND(td)) {
1229 td->td_pflags &= ~TDP_CAN_UNBIND;
1230 if ((td->td_flags & TDF_NEEDSIGCHK) == 0 &&
1231 (p->p_completed == NULL) &&
1232 (ku->ku_flags & KUF_DOUPCALL) == 0 &&
1233 (p->p_upquantum && ticks < p->p_nextupcall)) {
1235 error = copyout(&ts,
1236 (caddr_t)&ku->ku_mailbox->km_timeofday,
1244 thread_export_context(td, 0);
1246 * There is something to report, and we own an upcall
1247 * structure, we can go to userland.
1248 * Turn ourself into an upcall thread.
1250 td->td_pflags |= TDP_UPCALLING;
1251 } else if (td->td_mailbox && (ku == NULL)) {
1252 thread_export_context(td, 1);
1255 wakeup(&p->p_completed);
1256 WITNESS_WARN(WARN_PANIC, &p->p_mtx.lock_object,
1257 "thread exiting in userret");
1258 sigqueue_flush(&td->td_sigqueue);
1259 mtx_lock_spin(&sched_lock);
1265 KASSERT(ku != NULL, ("upcall is NULL"));
1266 KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind"));
1268 if (p->p_numthreads > max_threads_per_proc) {
1271 mtx_lock_spin(&sched_lock);
1273 while (p->p_numthreads > max_threads_per_proc) {
1274 if (p->p_numupcalls >= max_threads_per_proc)
1276 mtx_unlock_spin(&sched_lock);
1277 if (msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH,
1278 "maxthreads", hz/10) != EWOULDBLOCK) {
1279 mtx_lock_spin(&sched_lock);
1282 mtx_lock_spin(&sched_lock);
1286 mtx_unlock_spin(&sched_lock);
1290 if (td->td_pflags & TDP_UPCALLING) {
1292 p->p_nextupcall = ticks + p->p_upquantum;
1294 * There is no more work to do and we are going to ride
1295 * this thread up to userland as an upcall.
1296 * Do the last parts of the setup needed for the upcall.
1298 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1299 td, td->td_proc->p_pid, td->td_proc->p_comm);
1301 td->td_pflags &= ~TDP_UPCALLING;
1302 if (ku->ku_flags & KUF_DOUPCALL) {
1303 mtx_lock_spin(&sched_lock);
1304 ku->ku_flags &= ~KUF_DOUPCALL;
1305 mtx_unlock_spin(&sched_lock);
1308 * Set user context to the UTS
1310 if (!(ku->ku_mflags & KMF_NOUPCALL)) {
1311 cpu_set_upcall_kse(td, ku->ku_func, ku->ku_mailbox,
1314 if (p->p_flag & P_TRACED) {
1316 ptrace_clear_single_step(td);
1320 error = suword32(&ku->ku_mailbox->km_lwp,
1324 error = suword(&ku->ku_mailbox->km_curthread, 0);
1330 * Unhook the list of completed threads.
1331 * anything that completes after this gets to
1332 * come in next time.
1333 * Put the list of completed thread mailboxes on
1334 * this KSE's mailbox.
1336 if (!(ku->ku_mflags & KMF_NOCOMPLETED) &&
1337 (error = thread_link_mboxes(p, ku)) != 0)
1342 error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts));
1348 * Things are going to be so screwed we should just kill
1350 * how do we do that?
1353 psignal(p, SIGSEGV);
1358 * Ensure that we have a spare thread available,
1359 * for when we re-enter the kernel.
1361 if (td->td_standin == NULL)
1362 thread_alloc_spare(td);
1366 td->td_mailbox = NULL;
1368 return (error); /* go sync */
1372 * called after ptrace resumed a process, force all
1373 * virtual CPUs to schedule upcall for SA process,
1374 * because debugger may have changed something in userland,
1375 * we should notice UTS as soon as possible.
1378 thread_continued(struct proc *p)
1380 struct kse_upcall *ku;
1383 PROC_LOCK_ASSERT(p, MA_OWNED);
1384 KASSERT(P_SHOULDSTOP(p), ("process not stopped"));
1386 if (!(p->p_flag & P_SA))
1389 if (p->p_flag & P_TRACED) {
1390 td = TAILQ_FIRST(&p->p_threads);
1391 if (td && (td->td_pflags & TDP_SA)) {
1392 FOREACH_UPCALL_IN_PROC(p, ku) {
1393 mtx_lock_spin(&sched_lock);
1394 ku->ku_flags |= KUF_DOUPCALL;
1395 mtx_unlock_spin(&sched_lock);
1396 wakeup(&p->p_completed);