2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/imgact.h>
37 #include <sys/mutex.h>
39 #include <sys/ptrace.h>
41 #include <sys/syscallsubr.h>
42 #include <sys/sysproto.h>
43 #include <sys/sched.h>
44 #include <sys/signalvar.h>
45 #include <sys/sleepqueue.h>
51 static uma_zone_t upcall_zone;
54 extern int virtual_cpu;
55 extern int thread_debug;
57 extern int max_threads_per_proc;
58 extern int max_groups_per_proc;
59 extern int max_threads_hits;
60 extern struct mtx kse_zombie_lock;
63 TAILQ_HEAD(, kse_upcall) zombie_upcalls =
64 TAILQ_HEAD_INITIALIZER(zombie_upcalls);
66 static int thread_update_usr_ticks(struct thread *td);
67 static void thread_alloc_spare(struct thread *td);
72 struct kse_upcall *ku;
74 ku = uma_zalloc(upcall_zone, M_WAITOK | M_ZERO);
79 upcall_free(struct kse_upcall *ku)
82 uma_zfree(upcall_zone, ku);
86 upcall_link(struct kse_upcall *ku, struct proc *p)
89 mtx_assert(&sched_lock, MA_OWNED);
90 TAILQ_INSERT_TAIL(&p->p_upcalls, ku, ku_link);
96 upcall_unlink(struct kse_upcall *ku)
98 struct proc *p = ku->ku_proc;
100 mtx_assert(&sched_lock, MA_OWNED);
101 KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__));
102 TAILQ_REMOVE(&p->p_upcalls, ku, ku_link);
108 upcall_remove(struct thread *td)
111 mtx_assert(&sched_lock, MA_OWNED);
112 if (td->td_upcall != NULL) {
113 td->td_upcall->ku_owner = NULL;
114 upcall_unlink(td->td_upcall);
115 td->td_upcall = NULL;
120 #ifndef _SYS_SYSPROTO_H_
121 struct kse_switchin_args {
122 struct kse_thr_mailbox *tmbx;
128 kse_switchin(struct thread *td, struct kse_switchin_args *uap)
131 struct kse_thr_mailbox tmbx;
132 struct kse_upcall *ku;
135 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
137 error = (uap->tmbx == NULL) ? EINVAL : 0;
139 error = copyin(uap->tmbx, &tmbx, sizeof(tmbx));
140 if (!error && (uap->flags & KSE_SWITCHIN_SETTMBX))
141 error = (suword(&ku->ku_mailbox->km_curthread,
142 (long)uap->tmbx) != 0 ? EINVAL : 0);
144 error = set_mcontext(td, &tmbx.tm_context.uc_mcontext);
146 suword32(&uap->tmbx->tm_lwp, td->td_tid);
147 if (uap->flags & KSE_SWITCHIN_SETTMBX) {
148 td->td_mailbox = uap->tmbx;
149 td->td_pflags |= TDP_CAN_UNBIND;
151 PROC_LOCK(td->td_proc);
152 if (td->td_proc->p_flag & P_TRACED) {
154 if (tmbx.tm_dflags & TMDF_SSTEP)
155 ptrace_single_step(td);
157 ptrace_clear_single_step(td);
158 if (tmbx.tm_dflags & TMDF_SUSPEND) {
159 mtx_lock_spin(&sched_lock);
160 /* fuword can block, check again */
162 ku->ku_flags |= KUF_DOUPCALL;
163 mtx_unlock_spin(&sched_lock);
167 PROC_UNLOCK(td->td_proc);
169 return ((error == 0) ? EJUSTRETURN : error);
176 struct kse_thr_interrupt_args {
177 struct kse_thr_mailbox * tmbx;
183 kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
186 struct kse_execve_args args;
187 struct image_args iargs;
190 struct kse_upcall *ku;
191 struct kse_thr_mailbox *tmbx;
197 if (!(p->p_flag & P_SA))
201 case KSE_INTR_SENDSIG:
202 if (uap->data < 0 || uap->data > _SIG_MAXSIG)
204 case KSE_INTR_INTERRUPT:
205 case KSE_INTR_RESTART:
207 mtx_lock_spin(&sched_lock);
208 FOREACH_THREAD_IN_PROC(p, td2) {
209 if (td2->td_mailbox == uap->tmbx)
213 mtx_unlock_spin(&sched_lock);
217 if (uap->cmd == KSE_INTR_SENDSIG) {
219 td2->td_flags &= ~TDF_INTERRUPT;
220 mtx_unlock_spin(&sched_lock);
221 tdsignal(p, td2, (int)uap->data, NULL);
223 mtx_unlock_spin(&sched_lock);
226 td2->td_flags |= TDF_INTERRUPT | TDF_ASTPENDING;
227 if (TD_CAN_UNBIND(td2))
228 td2->td_upcall->ku_flags |= KUF_DOUPCALL;
229 if (uap->cmd == KSE_INTR_INTERRUPT)
230 td2->td_intrval = EINTR;
232 td2->td_intrval = ERESTART;
233 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR))
234 sleepq_abort(td2, td2->td_intrval);
235 mtx_unlock_spin(&sched_lock);
239 case KSE_INTR_SIGEXIT:
240 if (uap->data < 1 || uap->data > _SIG_MAXSIG)
243 sigexit(td, (int)uap->data);
246 case KSE_INTR_DBSUSPEND:
247 /* this sub-function is only for bound thread */
248 if (td->td_pflags & TDP_SA)
251 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
252 if (tmbx == NULL || tmbx == (void *)-1)
255 while ((p->p_flag & P_TRACED) && !(p->p_flag & P_SINGLE_EXIT)) {
256 flags = fuword32(&tmbx->tm_dflags);
257 if (!(flags & TMDF_SUSPEND))
260 mtx_lock_spin(&sched_lock);
262 thread_suspend_one(td);
264 mi_switch(SW_VOL, NULL);
265 mtx_unlock_spin(&sched_lock);
269 case KSE_INTR_EXECVE:
270 error = copyin((void *)uap->data, &args, sizeof(args));
273 error = exec_copyin_args(&iargs, args.path, UIO_USERSPACE,
274 args.argv, args.envp);
276 error = kern_execve(td, &iargs, NULL);
279 SIGSETOR(td->td_siglist, args.sigpend);
281 kern_sigprocmask(td, SIG_SETMASK, &args.sigmask, NULL,
296 struct kse_exit_args {
301 kse_exit(struct thread *td, struct kse_exit_args *uap)
305 struct kse_upcall *ku, *ku2;
310 * Ensure that this is only called from the UTS
312 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
318 * Calculate the existing non-exiting upcalls in this process.
319 * If we are the last upcall but there are still other threads,
320 * then do not exit. We need the other threads to be able to
321 * complete whatever they are doing.
322 * XXX This relies on the userland knowing what to do if we return.
323 * It may be a better choice to convert ourselves into a kse_release
324 * ( or similar) and wait in the kernel to be needed.
327 mtx_lock_spin(&sched_lock);
328 FOREACH_UPCALL_IN_PROC(p, ku2) {
329 if (ku2->ku_flags & KUF_EXITING)
332 if ((p->p_numupcalls - count) == 1 &&
333 (p->p_numthreads > 1)) {
334 mtx_unlock_spin(&sched_lock);
338 ku->ku_flags |= KUF_EXITING;
339 mtx_unlock_spin(&sched_lock);
343 * Mark the UTS mailbox as having been finished with.
344 * If that fails then just go for a segfault.
345 * XXX need to check it that can be deliverred without a mailbox.
347 error = suword32(&ku->ku_mailbox->km_flags, ku->ku_mflags|KMF_DONE);
348 if (!(td->td_pflags & TDP_SA))
349 if (suword32(&td->td_mailbox->tm_lwp, 0))
354 sigqueue_flush(&td->td_sigqueue);
355 mtx_lock_spin(&sched_lock);
357 if (p->p_numthreads != 1) {
363 * This is the last thread. Just return to the user.
364 * Effectively we have left threading mode..
365 * The only real thing left to do is ensure that the
366 * scheduler sets out concurrency back to 1 as that may be a
367 * resource leak otherwise.
368 * This is an A[PB]I issue.. what SHOULD we do?
369 * One possibility is to return to the user. It may not cope well.
370 * The other possibility would be to let the process exit.
373 mtx_unlock_spin(&sched_lock);
378 printf("kse_exit: called on last thread. Calling exit1()");
387 * Either becomes an upcall or waits for an awakening event and
388 * then becomes an upcall. Only error cases return.
391 struct kse_release_args {
392 struct timespec *timeout;
396 kse_release(struct thread *td, struct kse_release_args *uap)
400 struct kse_upcall *ku;
401 struct timespec timeout;
407 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td)) {
408 printf("kse_release: called outside of threading. exiting");
411 if (uap->timeout != NULL) {
412 if ((error = copyin(uap->timeout, &timeout, sizeof(timeout))))
414 TIMESPEC_TO_TIMEVAL(&tv, &timeout);
416 if (td->td_pflags & TDP_SA)
417 td->td_pflags |= TDP_UPCALLING;
419 ku->ku_mflags = fuword32(&ku->ku_mailbox->km_flags);
420 if (ku->ku_mflags == -1) {
422 sigexit(td, SIGSEGV);
426 if (ku->ku_mflags & KMF_WAITSIGEVENT) {
427 /* UTS wants to wait for signal event */
428 if (!(p->p_flag & P_SIGEVENT) &&
429 !(ku->ku_flags & KUF_DOUPCALL)) {
430 td->td_kflags |= TDK_KSERELSIG;
431 error = msleep(&p->p_siglist, &p->p_mtx, PPAUSE|PCATCH,
432 "ksesigwait", (uap->timeout ? tvtohz(&tv) : 0));
433 td->td_kflags &= ~(TDK_KSERELSIG | TDK_WAKEUP);
435 p->p_flag &= ~P_SIGEVENT;
436 sigset = p->p_siglist;
438 error = copyout(&sigset, &ku->ku_mailbox->km_sigscaught,
441 if ((ku->ku_flags & KUF_DOUPCALL) == 0 &&
442 ((ku->ku_mflags & KMF_NOCOMPLETED) ||
443 (p->p_completed == NULL))) {
445 td->td_kflags |= TDK_KSEREL;
446 error = msleep(&p->p_completed, &p->p_mtx,
447 PPAUSE|PCATCH, "kserel",
448 (uap->timeout ? tvtohz(&tv) : 0));
449 td->td_kflags &= ~(TDK_KSEREL | TDK_WAKEUP);
454 if (ku->ku_flags & KUF_DOUPCALL) {
455 mtx_lock_spin(&sched_lock);
456 ku->ku_flags &= ~KUF_DOUPCALL;
457 mtx_unlock_spin(&sched_lock);
465 /* struct kse_wakeup_args {
466 struct kse_mailbox *mbx;
469 kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
473 struct kse_upcall *ku;
479 /* KSE-enabled processes only, please. */
480 if (!(p->p_flag & P_SA))
483 mtx_lock_spin(&sched_lock);
485 FOREACH_UPCALL_IN_PROC(p, ku) {
486 if (ku->ku_mailbox == uap->mbx)
491 mtx_unlock_spin(&sched_lock);
492 wakeup(&p->p_completed);
496 ku = TAILQ_FIRST(&p->p_upcalls);
499 mtx_unlock_spin(&sched_lock);
503 if ((td2 = ku->ku_owner) == NULL) {
504 mtx_unlock_spin(&sched_lock);
505 panic("%s: no owner", __func__);
506 } else if (td2->td_kflags & (TDK_KSEREL | TDK_KSERELSIG)) {
507 mtx_unlock_spin(&sched_lock);
508 if (!(td2->td_kflags & TDK_WAKEUP)) {
509 td2->td_kflags |= TDK_WAKEUP;
510 if (td2->td_kflags & TDK_KSEREL)
511 sleepq_remove(td2, &p->p_completed);
513 sleepq_remove(td2, &p->p_siglist);
516 ku->ku_flags |= KUF_DOUPCALL;
517 mtx_unlock_spin(&sched_lock);
527 * newgroup == 0: first call: use current KSE, don't schedule an upcall
528 * All other situations, do allocate max new KSEs and schedule an upcall.
530 * XXX should be changed so that 'first' behaviour lasts for as long
531 * as you have not made a thread in this proc. i.e. as long as we do not have
534 /* struct kse_create_args {
535 struct kse_mailbox *mbx;
539 kse_create(struct thread *td, struct kse_create_args *uap)
543 struct kse_mailbox mbx;
544 struct kse_upcall *newku;
545 int err, ncpus, sa = 0, first = 0;
546 struct thread *newtd;
551 * Processes using the other threading model can't
552 * suddenly start calling this one
555 if ((p->p_flag & (P_SA|P_HADTHREADS)) == P_HADTHREADS) {
559 if (!(p->p_flag & P_SA)) {
561 p->p_flag |= P_SA|P_HADTHREADS;
564 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
568 if (virtual_cpu != 0)
571 * If the new UTS mailbox says that this
572 * will be a BOUND lwp, then it had better
573 * have its thread mailbox already there.
575 if ((mbx.km_flags & KMF_BOUND) || uap->newgroup) {
576 if (mbx.km_curthread == NULL)
579 if (!(uap->newgroup || first))
585 * Limit it to NCPU upcall contexts per proc in any case.
587 if (p->p_numupcalls >= ncpus) {
592 * We want to make a thread (bound or unbound).
593 * If we are just the first call, either kind
594 * is ok, but if not then either we must be
595 * already an upcallable thread to make another,
596 * or a bound thread to make one of those.
597 * Once again, not quite right but good enough for now.. XXXKSE
601 if (!first && ((td->td_pflags & TDP_SA) != sa))
603 if (p->p_numupcalls == 0) {
604 sched_set_concurrency(p, ncpus);
609 * Even bound LWPs get a mailbox and an upcall to hold it.
611 newku = upcall_alloc();
612 newku->ku_mailbox = uap->mbx;
613 newku->ku_func = mbx.km_func;
614 bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t));
617 * For the first call this may not have been set.
618 * Of course nor may it actually be needed.
620 if (td->td_standin == NULL)
621 thread_alloc_spare(td);
624 mtx_lock_spin(&sched_lock);
626 if( p->p_numupcalls >= ncpus) {
627 mtx_unlock_spin(&sched_lock);
634 * If we are the first time, and a normal thread,
635 * then transfer all the signals back to the 'process'.
636 * SA threading will make a special thread to handle them.
639 sigqueue_move_set(&td->td_sigqueue, &p->p_sigqueue,
640 &td->td_sigqueue.sq_signals);
641 SIGFILLSET(td->td_sigmask);
642 SIG_CANTMASK(td->td_sigmask);
645 /* should subtract from process count (later) */
649 * Make the new upcall available to the process.
650 * It may or may not use it, but it's available.
652 upcall_link(newku, p);
655 /* XXX should this be in the thread? */
656 p->p_upquantum = max(1, mbx.km_quantum / tick);
659 * Each upcall structure has an owner thread, find which
664 * The newgroup parameter now means
665 * "bound, non SA, system scope"
666 * It is only used for the interrupt thread at the
668 * We'll rename it later.
670 newtd = thread_schedule_upcall(td, newku);
673 * If the current thread hasn't an upcall structure,
674 * just assign the upcall to it.
677 if (td->td_upcall == NULL) {
678 newku->ku_owner = td;
679 td->td_upcall = newku;
683 * Create a new upcall thread to own it.
685 newtd = thread_schedule_upcall(td, newku);
688 mtx_unlock_spin(&sched_lock);
691 * Let the UTS instance know its LWPID.
692 * It doesn't really care. But the debugger will.
693 * XXX warning.. remember that this moves.
695 suword32(&newku->ku_mailbox->km_lwp, newtd->td_tid);
698 * In the same manner, if the UTS has a current user thread,
699 * then it is also running on this LWP so set it as well.
700 * The library could do that of course.. but why not..
702 if (mbx.km_curthread)
703 suword32(&mbx.km_curthread->tm_lwp, newtd->td_tid);
707 newtd->td_pflags |= TDP_SA;
709 * If we are starting a new thread, kick it off.
712 mtx_lock_spin(&sched_lock);
713 setrunqueue(newtd, SRQ_BORING);
714 mtx_unlock_spin(&sched_lock);
717 newtd->td_pflags &= ~TDP_SA;
720 * Since a library will use the mailbox pointer to
721 * identify even a bound thread, and the mailbox pointer
722 * will never be allowed to change after this syscall
723 * for a bound thread, set it here so the library can
724 * find the thread after the syscall returns.
726 newtd->td_mailbox = mbx.km_curthread;
730 * If we did create a new thread then
731 * make sure it goes to the right place
732 * when it starts up, and make sure that it runs
733 * at full speed when it gets there.
734 * thread_schedule_upcall() copies all cpu state
735 * to the new thread, so we should clear single step
738 cpu_set_upcall_kse(newtd, newku->ku_func,
739 newku->ku_mailbox, &newku->ku_stack);
741 if (p->p_flag & P_TRACED) {
743 ptrace_clear_single_step(newtd);
747 mtx_lock_spin(&sched_lock);
748 setrunqueue(newtd, SRQ_BORING);
749 mtx_unlock_spin(&sched_lock);
760 * Initialize global thread allocation resources.
766 upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
767 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
771 * Stash an embarasingly extra upcall into the zombie upcall queue.
775 upcall_stash(struct kse_upcall *ku)
777 mtx_lock_spin(&kse_zombie_lock);
778 TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link);
779 mtx_unlock_spin(&kse_zombie_lock);
783 * Reap zombie kse resource.
788 struct kse_upcall *ku_first, *ku_next;
791 * Don't even bother to lock if none at this instant,
792 * we really don't care about the next instant..
794 if (!TAILQ_EMPTY(&zombie_upcalls)) {
795 mtx_lock_spin(&kse_zombie_lock);
796 ku_first = TAILQ_FIRST(&zombie_upcalls);
798 TAILQ_INIT(&zombie_upcalls);
799 mtx_unlock_spin(&kse_zombie_lock);
801 ku_next = TAILQ_NEXT(ku_first, ku_link);
802 upcall_free(ku_first);
809 * Store the thread context in the UTS's mailbox.
810 * then add the mailbox at the head of a list we are building in user space.
811 * The list is anchored in the proc structure.
814 thread_export_context(struct thread *td, int willexit)
825 * Post sync signal, or process SIGKILL and SIGSTOP.
826 * For sync signal, it is only possible when the signal is not
827 * caught by userland or process is being debugged.
830 if (td->td_flags & TDF_NEEDSIGCHK) {
831 mtx_lock_spin(&sched_lock);
832 td->td_flags &= ~TDF_NEEDSIGCHK;
833 mtx_unlock_spin(&sched_lock);
834 mtx_lock(&p->p_sigacts->ps_mtx);
835 while ((sig = cursig(td)) != 0)
837 mtx_unlock(&p->p_sigacts->ps_mtx);
840 SIGFILLSET(td->td_sigmask);
843 /* Export the user/machine context. */
844 get_mcontext(td, &mc, 0);
845 addr = (void *)(&td->td_mailbox->tm_context.uc_mcontext);
846 error = copyout(&mc, addr, sizeof(mcontext_t));
850 addr = (caddr_t)(&td->td_mailbox->tm_lwp);
851 if (suword32(addr, 0)) {
856 /* Get address in latest mbox of list pointer */
857 addr = (void *)(&td->td_mailbox->tm_next);
859 * Put the saved address of the previous first
860 * entry into this one
863 mbx = (uintptr_t)p->p_completed;
864 if (suword(addr, mbx)) {
869 if (mbx == (uintptr_t)p->p_completed) {
870 p->p_completed = td->td_mailbox;
872 * The thread context may be taken away by
873 * other upcall threads when we unlock
874 * process lock. it's no longer valid to
875 * use it again in any other places.
877 td->td_mailbox = NULL;
893 * Take the list of completed mailboxes for this Process and put them on this
894 * upcall's mailbox as it's the next one going up.
897 thread_link_mboxes(struct proc *p, struct kse_upcall *ku)
902 addr = (void *)(&ku->ku_mailbox->km_completed);
904 mbx = (uintptr_t)p->p_completed;
905 if (suword(addr, mbx)) {
912 if (mbx == (uintptr_t)p->p_completed) {
913 p->p_completed = NULL;
923 * This function should be called at statclock interrupt time
926 thread_statclock(int user)
928 struct thread *td = curthread;
930 if (!(td->td_pflags & TDP_SA))
933 /* Current always do via ast() */
934 mtx_lock_spin(&sched_lock);
935 td->td_flags |= TDF_ASTPENDING;
936 mtx_unlock_spin(&sched_lock);
938 } else if (td->td_mailbox != NULL)
944 * Export state clock ticks for userland
947 thread_update_usr_ticks(struct thread *td)
949 struct proc *p = td->td_proc;
953 if (td->td_mailbox == NULL)
956 if ((uticks = td->td_uuticks) != 0) {
958 addr = (caddr_t)&td->td_mailbox->tm_uticks;
959 if (suword32(addr, uticks+fuword32(addr)))
962 if ((uticks = td->td_usticks) != 0) {
964 addr = (caddr_t)&td->td_mailbox->tm_sticks;
965 if (suword32(addr, uticks+fuword32(addr)))
978 * This function is intended to be used to initialize a spare thread
979 * for upcall. Initialize thread's large data area outside sched_lock
980 * for thread_schedule_upcall(). The crhold is also here to get it out
981 * from the schedlock as it has a mutex op itself.
982 * XXX BUG.. we need to get the cr ref after the thread has
983 * checked and chenged its own, not 6 months before...
986 thread_alloc_spare(struct thread *td)
988 struct thread *spare;
992 spare = thread_alloc();
993 td->td_standin = spare;
994 bzero(&spare->td_startzero,
995 __rangeof(struct thread, td_startzero, td_endzero));
996 spare->td_proc = td->td_proc;
997 spare->td_ucred = crhold(td->td_ucred);
1001 * Create a thread and schedule it for upcall on the KSE given.
1002 * Use our thread's standin so that we don't have to allocate one.
1005 thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
1009 mtx_assert(&sched_lock, MA_OWNED);
1012 * Schedule an upcall thread on specified kse_upcall,
1013 * the kse_upcall must be free.
1014 * td must have a spare thread.
1016 KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__));
1017 if ((td2 = td->td_standin) != NULL) {
1018 td->td_standin = NULL;
1020 panic("no reserve thread when scheduling an upcall");
1023 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1024 td2, td->td_proc->p_pid, td->td_proc->p_comm);
1026 * Bzero already done in thread_alloc_spare() because we can't
1027 * do the crhold here because we are in schedlock already.
1029 bcopy(&td->td_startcopy, &td2->td_startcopy,
1030 __rangeof(struct thread, td_startcopy, td_endcopy));
1031 thread_link(td2, ku->ku_proc);
1032 /* inherit parts of blocked thread's context as a good template */
1033 cpu_set_upcall(td2, td);
1034 /* Let the new thread become owner of the upcall */
1036 td2->td_upcall = ku;
1038 td2->td_pflags = TDP_SA|TDP_UPCALLING;
1039 td2->td_state = TDS_CAN_RUN;
1040 td2->td_inhibitors = 0;
1041 SIGFILLSET(td2->td_sigmask);
1042 SIG_CANTMASK(td2->td_sigmask);
1043 sched_fork_thread(td, td2);
1044 return (td2); /* bogus.. should be a void function */
1048 * It is only used when thread generated a trap and process is being
1052 thread_signal_add(struct thread *td, ksiginfo_t *ksi)
1059 PROC_LOCK_ASSERT(p, MA_OWNED);
1061 mtx_assert(&ps->ps_mtx, MA_OWNED);
1063 mtx_unlock(&ps->ps_mtx);
1064 SIGADDSET(td->td_sigmask, ksi->ksi_signo);
1066 error = copyout(&ksi->ksi_info, &td->td_mailbox->tm_syncsig,
1070 sigexit(td, SIGSEGV);
1073 mtx_lock(&ps->ps_mtx);
1075 #include "opt_sched.h"
1077 thread_switchout(struct thread *td, int flags, struct thread *nextthread)
1079 struct kse_upcall *ku;
1082 mtx_assert(&sched_lock, MA_OWNED);
1085 * If the outgoing thread is in threaded group and has never
1086 * scheduled an upcall, decide whether this is a short
1087 * or long term event and thus whether or not to schedule
1089 * If it is a short term event, just suspend it in
1090 * a way that takes its KSE with it.
1091 * Select the events for which we want to schedule upcalls.
1092 * For now it's just sleep or if thread is suspended but
1093 * process wide suspending flag is not set (debugger
1095 * XXXKSE eventually almost any inhibition could do.
1097 if (TD_CAN_UNBIND(td) && (td->td_standin) &&
1098 (TD_ON_SLEEPQ(td) || (TD_IS_SUSPENDED(td) &&
1099 !P_SHOULDSTOP(td->td_proc)))) {
1101 * Release ownership of upcall, and schedule an upcall
1102 * thread, this new upcall thread becomes the owner of
1103 * the upcall structure. It will be ahead of us in the
1104 * run queue, so as we are stopping, it should either
1105 * start up immediatly, or at least before us if
1106 * we release our slot.
1109 ku->ku_owner = NULL;
1110 td->td_upcall = NULL;
1111 td->td_pflags &= ~TDP_CAN_UNBIND;
1112 td2 = thread_schedule_upcall(td, ku);
1113 if (flags & SW_INVOL || nextthread) {
1114 setrunqueue(td2, SRQ_YIELDING);
1116 /* Keep up with reality.. we have one extra thread
1117 * in the picture.. and it's 'running'.
1122 return (nextthread);
1126 * Setup done on the thread when it enters the kernel.
1129 thread_user_enter(struct thread *td)
1131 struct proc *p = td->td_proc;
1132 struct kse_upcall *ku;
1133 struct kse_thr_mailbox *tmbx;
1137 * First check that we shouldn't just abort. we
1138 * can suspend it here or just exit.
1140 if (__predict_false(P_SHOULDSTOP(p))) {
1142 thread_suspend_check(0);
1146 if (!(td->td_pflags & TDP_SA))
1150 * If we are doing a syscall in a KSE environment,
1151 * note where our mailbox is.
1156 KASSERT(ku != NULL, ("no upcall owned"));
1157 KASSERT(ku->ku_owner == td, ("wrong owner"));
1158 KASSERT(!TD_CAN_UNBIND(td), ("can unbind"));
1160 if (td->td_standin == NULL)
1161 thread_alloc_spare(td);
1162 ku->ku_mflags = fuword32((void *)&ku->ku_mailbox->km_flags);
1163 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1164 if ((tmbx == NULL) || (tmbx == (void *)-1L) ||
1165 (ku->ku_mflags & KMF_NOUPCALL)) {
1166 td->td_mailbox = NULL;
1168 flags = fuword32(&tmbx->tm_flags);
1170 * On some architectures, TP register points to thread
1171 * mailbox but not points to kse mailbox, and userland
1172 * can not atomically clear km_curthread, but can
1173 * use TP register, and set TMF_NOUPCALL in thread
1174 * flag to indicate a critical region.
1176 if (flags & TMF_NOUPCALL) {
1177 td->td_mailbox = NULL;
1179 td->td_mailbox = tmbx;
1180 td->td_pflags |= TDP_CAN_UNBIND;
1181 if (__predict_false(p->p_flag & P_TRACED)) {
1182 flags = fuword32(&tmbx->tm_dflags);
1183 if (flags & TMDF_SUSPEND) {
1184 mtx_lock_spin(&sched_lock);
1185 /* fuword can block, check again */
1187 ku->ku_flags |= KUF_DOUPCALL;
1188 mtx_unlock_spin(&sched_lock);
1196 * The extra work we go through if we are a threaded process when we
1197 * return to userland.
1199 * If we are a KSE process and returning to user mode, check for
1200 * extra work to do before we return (e.g. for more syscalls
1201 * to complete first). If we were in a critical section, we should
1202 * just return to let it finish. Same if we were in the UTS (in
1203 * which case the mailbox's context's busy indicator will be set).
1204 * The only traps we suport will have set the mailbox.
1205 * We will clear it here.
1208 thread_userret(struct thread *td, struct trapframe *frame)
1210 struct kse_upcall *ku;
1213 int error = 0, uts_crit;
1215 /* Nothing to do with bound thread */
1216 if (!(td->td_pflags & TDP_SA))
1220 * Update stat clock count for userland
1222 if (td->td_mailbox != NULL) {
1223 thread_update_usr_ticks(td);
1234 * This thread has not started any upcall.
1235 * If there is no work to report other than ourself,
1236 * then it can return direct to userland.
1238 if (TD_CAN_UNBIND(td)) {
1239 td->td_pflags &= ~TDP_CAN_UNBIND;
1240 if ((td->td_flags & TDF_NEEDSIGCHK) == 0 &&
1241 (p->p_completed == NULL) &&
1242 (ku->ku_flags & KUF_DOUPCALL) == 0 &&
1243 (p->p_upquantum && ticks < p->p_nextupcall)) {
1245 error = copyout(&ts,
1246 (caddr_t)&ku->ku_mailbox->km_timeofday,
1254 thread_export_context(td, 0);
1256 * There is something to report, and we own an upcall
1257 * structure, we can go to userland.
1258 * Turn ourself into an upcall thread.
1260 td->td_pflags |= TDP_UPCALLING;
1261 } else if (td->td_mailbox && (ku == NULL)) {
1262 thread_export_context(td, 1);
1265 wakeup(&p->p_completed);
1266 WITNESS_WARN(WARN_PANIC, &p->p_mtx.mtx_object,
1267 "thread exiting in userret");
1268 sigqueue_flush(&td->td_sigqueue);
1269 mtx_lock_spin(&sched_lock);
1275 KASSERT(ku != NULL, ("upcall is NULL"));
1276 KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind"));
1278 if (p->p_numthreads > max_threads_per_proc) {
1281 mtx_lock_spin(&sched_lock);
1283 while (p->p_numthreads > max_threads_per_proc) {
1284 if (p->p_numupcalls >= max_threads_per_proc)
1286 mtx_unlock_spin(&sched_lock);
1287 if (msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH,
1288 "maxthreads", hz/10) != EWOULDBLOCK) {
1289 mtx_lock_spin(&sched_lock);
1292 mtx_lock_spin(&sched_lock);
1296 mtx_unlock_spin(&sched_lock);
1300 if (td->td_pflags & TDP_UPCALLING) {
1302 p->p_nextupcall = ticks + p->p_upquantum;
1304 * There is no more work to do and we are going to ride
1305 * this thread up to userland as an upcall.
1306 * Do the last parts of the setup needed for the upcall.
1308 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1309 td, td->td_proc->p_pid, td->td_proc->p_comm);
1311 td->td_pflags &= ~TDP_UPCALLING;
1312 if (ku->ku_flags & KUF_DOUPCALL) {
1313 mtx_lock_spin(&sched_lock);
1314 ku->ku_flags &= ~KUF_DOUPCALL;
1315 mtx_unlock_spin(&sched_lock);
1318 * Set user context to the UTS
1320 if (!(ku->ku_mflags & KMF_NOUPCALL)) {
1321 cpu_set_upcall_kse(td, ku->ku_func, ku->ku_mailbox,
1324 if (p->p_flag & P_TRACED) {
1326 ptrace_clear_single_step(td);
1330 error = suword32(&ku->ku_mailbox->km_lwp,
1334 error = suword(&ku->ku_mailbox->km_curthread, 0);
1340 * Unhook the list of completed threads.
1341 * anything that completes after this gets to
1342 * come in next time.
1343 * Put the list of completed thread mailboxes on
1344 * this KSE's mailbox.
1346 if (!(ku->ku_mflags & KMF_NOCOMPLETED) &&
1347 (error = thread_link_mboxes(p, ku)) != 0)
1352 error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts));
1358 * Things are going to be so screwed we should just kill
1360 * how do we do that?
1363 psignal(p, SIGSEGV);
1368 * Ensure that we have a spare thread available,
1369 * for when we re-enter the kernel.
1371 if (td->td_standin == NULL)
1372 thread_alloc_spare(td);
1376 td->td_mailbox = NULL;
1378 return (error); /* go sync */
1382 * called after ptrace resumed a process, force all
1383 * virtual CPUs to schedule upcall for SA process,
1384 * because debugger may have changed something in userland,
1385 * we should notice UTS as soon as possible.
1388 thread_continued(struct proc *p)
1390 struct kse_upcall *ku;
1393 PROC_LOCK_ASSERT(p, MA_OWNED);
1394 KASSERT(P_SHOULDSTOP(p), ("process not stopped"));
1396 if (!(p->p_flag & P_SA))
1399 if (p->p_flag & P_TRACED) {
1400 td = TAILQ_FIRST(&p->p_threads);
1401 if (td && (td->td_pflags & TDP_SA)) {
1402 FOREACH_UPCALL_IN_PROC(p, ku) {
1403 mtx_lock_spin(&sched_lock);
1404 ku->ku_flags |= KUF_DOUPCALL;
1405 mtx_unlock_spin(&sched_lock);
1406 wakeup(&p->p_completed);