2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/imgact.h>
37 #include <sys/mutex.h>
39 #include <sys/ptrace.h>
41 #include <sys/syscallsubr.h>
42 #include <sys/sysproto.h>
43 #include <sys/sched.h>
44 #include <sys/signalvar.h>
45 #include <sys/sleepqueue.h>
51 * KSEGRP related storage.
53 static uma_zone_t upcall_zone;
56 extern int virtual_cpu;
57 extern int thread_debug;
59 extern int max_threads_per_proc;
60 extern int max_groups_per_proc;
61 extern int max_threads_hits;
62 extern struct mtx kse_zombie_lock;
65 TAILQ_HEAD(, kse_upcall) zombie_upcalls =
66 TAILQ_HEAD_INITIALIZER(zombie_upcalls);
68 static int thread_update_usr_ticks(struct thread *td);
69 static void thread_alloc_spare(struct thread *td);
74 struct kse_upcall *ku;
76 ku = uma_zalloc(upcall_zone, M_WAITOK | M_ZERO);
81 upcall_free(struct kse_upcall *ku)
84 uma_zfree(upcall_zone, ku);
88 upcall_link(struct kse_upcall *ku, struct ksegrp *kg)
91 mtx_assert(&sched_lock, MA_OWNED);
92 TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link);
98 upcall_unlink(struct kse_upcall *ku)
100 struct ksegrp *kg = ku->ku_ksegrp;
102 mtx_assert(&sched_lock, MA_OWNED);
103 KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__));
104 TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link);
110 upcall_remove(struct thread *td)
113 mtx_assert(&sched_lock, MA_OWNED);
114 if (td->td_upcall != NULL) {
115 td->td_upcall->ku_owner = NULL;
116 upcall_unlink(td->td_upcall);
117 td->td_upcall = NULL;
121 #ifndef _SYS_SYSPROTO_H_
122 struct kse_switchin_args {
123 struct kse_thr_mailbox *tmbx;
129 kse_switchin(struct thread *td, struct kse_switchin_args *uap)
131 struct kse_thr_mailbox tmbx;
132 struct kse_upcall *ku;
135 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
137 error = (uap->tmbx == NULL) ? EINVAL : 0;
139 error = copyin(uap->tmbx, &tmbx, sizeof(tmbx));
140 if (!error && (uap->flags & KSE_SWITCHIN_SETTMBX))
141 error = (suword(&ku->ku_mailbox->km_curthread,
142 (long)uap->tmbx) != 0 ? EINVAL : 0);
144 error = set_mcontext(td, &tmbx.tm_context.uc_mcontext);
146 suword32(&uap->tmbx->tm_lwp, td->td_tid);
147 if (uap->flags & KSE_SWITCHIN_SETTMBX) {
148 td->td_mailbox = uap->tmbx;
149 td->td_pflags |= TDP_CAN_UNBIND;
151 if (td->td_proc->p_flag & P_TRACED) {
152 if (tmbx.tm_dflags & TMDF_SSTEP)
153 ptrace_single_step(td);
155 ptrace_clear_single_step(td);
156 if (tmbx.tm_dflags & TMDF_SUSPEND) {
157 mtx_lock_spin(&sched_lock);
158 /* fuword can block, check again */
160 ku->ku_flags |= KUF_DOUPCALL;
161 mtx_unlock_spin(&sched_lock);
165 return ((error == 0) ? EJUSTRETURN : error);
169 struct kse_thr_interrupt_args {
170 struct kse_thr_mailbox * tmbx;
176 kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
178 struct kse_execve_args args;
179 struct image_args iargs;
182 struct kse_upcall *ku;
183 struct kse_thr_mailbox *tmbx;
189 if (!(p->p_flag & P_SA))
193 case KSE_INTR_SENDSIG:
194 if (uap->data < 0 || uap->data > _SIG_MAXSIG)
196 case KSE_INTR_INTERRUPT:
197 case KSE_INTR_RESTART:
199 mtx_lock_spin(&sched_lock);
200 FOREACH_THREAD_IN_PROC(p, td2) {
201 if (td2->td_mailbox == uap->tmbx)
205 mtx_unlock_spin(&sched_lock);
209 if (uap->cmd == KSE_INTR_SENDSIG) {
211 td2->td_flags &= ~TDF_INTERRUPT;
212 mtx_unlock_spin(&sched_lock);
213 tdsignal(td2, (int)uap->data, SIGTARGET_TD);
215 mtx_unlock_spin(&sched_lock);
218 td2->td_flags |= TDF_INTERRUPT | TDF_ASTPENDING;
219 if (TD_CAN_UNBIND(td2))
220 td2->td_upcall->ku_flags |= KUF_DOUPCALL;
221 if (uap->cmd == KSE_INTR_INTERRUPT)
222 td2->td_intrval = EINTR;
224 td2->td_intrval = ERESTART;
225 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR))
227 mtx_unlock_spin(&sched_lock);
231 case KSE_INTR_SIGEXIT:
232 if (uap->data < 1 || uap->data > _SIG_MAXSIG)
235 sigexit(td, (int)uap->data);
238 case KSE_INTR_DBSUSPEND:
239 /* this sub-function is only for bound thread */
240 if (td->td_pflags & TDP_SA)
243 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
244 if (tmbx == NULL || tmbx == (void *)-1)
247 while ((p->p_flag & P_TRACED) && !(p->p_flag & P_SINGLE_EXIT)) {
248 flags = fuword32(&tmbx->tm_dflags);
249 if (!(flags & TMDF_SUSPEND))
252 mtx_lock_spin(&sched_lock);
254 thread_suspend_one(td);
256 mi_switch(SW_VOL, NULL);
257 mtx_unlock_spin(&sched_lock);
261 case KSE_INTR_EXECVE:
262 error = copyin((void *)uap->data, &args, sizeof(args));
265 error = exec_copyin_args(&iargs, args.path, UIO_USERSPACE,
266 args.argv, args.envp);
268 error = kern_execve(td, &iargs, NULL);
269 exec_free_args(&iargs);
272 SIGSETOR(td->td_siglist, args.sigpend);
274 kern_sigprocmask(td, SIG_SETMASK, &args.sigmask, NULL,
286 struct kse_exit_args {
291 kse_exit(struct thread *td, struct kse_exit_args *uap)
295 struct kse_upcall *ku, *ku2;
300 * Ensure that this is only called from the UTS
302 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
309 * Calculate the existing non-exiting upcalls in this ksegroup.
310 * If we are the last upcall but there are still other threads,
311 * then do not exit. We need the other threads to be able to
312 * complete whatever they are doing.
313 * XXX This relies on the userland knowing what to do if we return.
314 * It may be a better choice to convert ourselves into a kse_release
315 * ( or similar) and wait in the kernel to be needed.
318 mtx_lock_spin(&sched_lock);
319 FOREACH_UPCALL_IN_GROUP(kg, ku2) {
320 if (ku2->ku_flags & KUF_EXITING)
323 if ((kg->kg_numupcalls - count) == 1 &&
324 (kg->kg_numthreads > 1)) {
325 mtx_unlock_spin(&sched_lock);
329 ku->ku_flags |= KUF_EXITING;
330 mtx_unlock_spin(&sched_lock);
334 * Mark the UTS mailbox as having been finished with.
335 * If that fails then just go for a segfault.
336 * XXX need to check it that can be deliverred without a mailbox.
338 error = suword32(&ku->ku_mailbox->km_flags, ku->ku_mflags|KMF_DONE);
339 if (!(td->td_pflags & TDP_SA))
340 if (suword32(&td->td_mailbox->tm_lwp, 0))
345 mtx_lock_spin(&sched_lock);
347 if (p->p_numthreads != 1) {
349 * If we are not the last thread, but we are the last
350 * thread in this ksegrp, then by definition this is not
351 * the last group and we need to clean it up as well.
352 * thread_exit will clean up the kseg as needed.
359 * This is the last thread. Just return to the user.
360 * We know that there is only one ksegrp too, as any others
361 * would have been discarded in previous calls to thread_exit().
362 * Effectively we have left threading mode..
363 * The only real thing left to do is ensure that the
364 * scheduler sets out concurrency back to 1 as that may be a
365 * resource leak otherwise.
366 * This is an A[PB]I issue.. what SHOULD we do?
367 * One possibility is to return to the user. It may not cope well.
368 * The other possibility would be to let the process exit.
371 mtx_unlock_spin(&sched_lock);
381 * Either becomes an upcall or waits for an awakening event and
382 * then becomes an upcall. Only error cases return.
385 struct kse_release_args {
386 struct timespec *timeout;
390 kse_release(struct thread *td, struct kse_release_args *uap)
394 struct kse_upcall *ku;
395 struct timespec timeout;
402 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
404 if (uap->timeout != NULL) {
405 if ((error = copyin(uap->timeout, &timeout, sizeof(timeout))))
407 TIMESPEC_TO_TIMEVAL(&tv, &timeout);
409 if (td->td_pflags & TDP_SA)
410 td->td_pflags |= TDP_UPCALLING;
412 ku->ku_mflags = fuword32(&ku->ku_mailbox->km_flags);
413 if (ku->ku_mflags == -1) {
415 sigexit(td, SIGSEGV);
419 if (ku->ku_mflags & KMF_WAITSIGEVENT) {
420 /* UTS wants to wait for signal event */
421 if (!(p->p_flag & P_SIGEVENT) &&
422 !(ku->ku_flags & KUF_DOUPCALL)) {
423 td->td_kflags |= TDK_KSERELSIG;
424 error = msleep(&p->p_siglist, &p->p_mtx, PPAUSE|PCATCH,
425 "ksesigwait", (uap->timeout ? tvtohz(&tv) : 0));
426 td->td_kflags &= ~(TDK_KSERELSIG | TDK_WAKEUP);
428 p->p_flag &= ~P_SIGEVENT;
429 sigset = p->p_siglist;
431 error = copyout(&sigset, &ku->ku_mailbox->km_sigscaught,
434 if ((ku->ku_flags & KUF_DOUPCALL) == 0 &&
435 ((ku->ku_mflags & KMF_NOCOMPLETED) ||
436 (kg->kg_completed == NULL))) {
438 td->td_kflags |= TDK_KSEREL;
439 error = msleep(&kg->kg_completed, &p->p_mtx,
440 PPAUSE|PCATCH, "kserel",
441 (uap->timeout ? tvtohz(&tv) : 0));
442 td->td_kflags &= ~(TDK_KSEREL | TDK_WAKEUP);
447 if (ku->ku_flags & KUF_DOUPCALL) {
448 mtx_lock_spin(&sched_lock);
449 ku->ku_flags &= ~KUF_DOUPCALL;
450 mtx_unlock_spin(&sched_lock);
455 /* struct kse_wakeup_args {
456 struct kse_mailbox *mbx;
459 kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
463 struct kse_upcall *ku;
469 /* KSE-enabled processes only, please. */
470 if (!(p->p_flag & P_SA))
473 mtx_lock_spin(&sched_lock);
475 FOREACH_KSEGRP_IN_PROC(p, kg) {
476 FOREACH_UPCALL_IN_GROUP(kg, ku) {
477 if (ku->ku_mailbox == uap->mbx)
485 if (kg->kg_upsleeps) {
486 mtx_unlock_spin(&sched_lock);
487 wakeup(&kg->kg_completed);
491 ku = TAILQ_FIRST(&kg->kg_upcalls);
494 mtx_unlock_spin(&sched_lock);
498 if ((td2 = ku->ku_owner) == NULL) {
499 mtx_unlock_spin(&sched_lock);
500 panic("%s: no owner", __func__);
501 } else if (td2->td_kflags & (TDK_KSEREL | TDK_KSERELSIG)) {
502 mtx_unlock_spin(&sched_lock);
503 if (!(td2->td_kflags & TDK_WAKEUP)) {
504 td2->td_kflags |= TDK_WAKEUP;
505 if (td2->td_kflags & TDK_KSEREL)
506 sleepq_remove(td2, &kg->kg_completed);
508 sleepq_remove(td2, &p->p_siglist);
511 ku->ku_flags |= KUF_DOUPCALL;
512 mtx_unlock_spin(&sched_lock);
519 * No new KSEG: first call: use current KSE, don't schedule an upcall
520 * All other situations, do allocate max new KSEs and schedule an upcall.
522 * XXX should be changed so that 'first' behaviour lasts for as long
523 * as you have not made a kse in this ksegrp. i.e. as long as we do not have
526 /* struct kse_create_args {
527 struct kse_mailbox *mbx;
531 kse_create(struct thread *td, struct kse_create_args *uap)
533 struct ksegrp *newkg;
536 struct kse_mailbox mbx;
537 struct kse_upcall *newku;
538 int err, ncpus, sa = 0, first = 0;
539 struct thread *newtd;
543 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
547 if (virtual_cpu != 0)
550 * If the new UTS mailbox says that this
551 * will be a BOUND lwp, then it had better
552 * have its thread mailbox already there.
553 * In addition, this ksegrp will be limited to
554 * a concurrency of 1. There is more on this later.
556 if (mbx.km_flags & KMF_BOUND) {
557 if (mbx.km_curthread == NULL)
566 * Processes using the other threading model can't
567 * suddenly start calling this one
569 if ((p->p_flag & (P_SA|P_HADTHREADS)) == P_HADTHREADS) {
575 * Limit it to NCPU upcall contexts per ksegrp in any case.
576 * There is a small race here as we don't hold proclock
577 * until we inc the ksegrp count, but it's not really a big problem
578 * if we get one too many, but we save a proc lock.
580 if ((!uap->newgroup) && (kg->kg_numupcalls >= ncpus)) {
585 if (!(p->p_flag & P_SA)) {
587 p->p_flag |= P_SA|P_HADTHREADS;
593 * If we are going to be bound, then we need to be either
594 * a new group, or the first call ever. In either
595 * case we will be creating (or be) the only thread in a group.
596 * and the concurrency will be set to 1.
597 * This is not quite right, as we may still make ourself
598 * bound after making other ksegrps but it will do for now.
599 * The library will only try do this much.
601 if (!sa && !(uap->newgroup || first))
605 newkg = ksegrp_alloc();
606 bzero(&newkg->kg_startzero,
607 __rangeof(struct ksegrp, kg_startzero, kg_endzero));
608 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
609 __rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
610 sched_init_concurrency(newkg);
612 if (p->p_numksegrps >= max_groups_per_proc) {
617 ksegrp_link(newkg, p);
618 mtx_lock_spin(&sched_lock);
619 sched_fork_ksegrp(td, newkg);
620 mtx_unlock_spin(&sched_lock);
624 * We want to make a thread in our own ksegrp.
625 * If we are just the first call, either kind
626 * is ok, but if not then either we must be
627 * already an upcallable thread to make another,
628 * or a bound thread to make one of those.
629 * Once again, not quite right but good enough for now.. XXXKSE
631 if (!first && ((td->td_pflags & TDP_SA) != sa))
638 * This test is a bit "indirect".
639 * It might simplify things if we made a direct way of testing
640 * if a ksegrp has been worked on before.
641 * In the case of a bound request and the concurrency being set to
642 * one, the concurrency will already be 1 so it's just inefficient
643 * but not dangerous to call this again. XXX
645 if (newkg->kg_numupcalls == 0) {
647 * Initialize KSE group with the appropriate
650 * For a multiplexed group, create as as much concurrency
651 * as the number of physical cpus.
652 * This increases concurrency in the kernel even if the
653 * userland is not MP safe and can only run on a single CPU.
654 * In an ideal world, every physical cpu should execute a
655 * thread. If there is enough concurrency, threads in the
656 * kernel can be executed parallel on different cpus at
657 * full speed without being restricted by the number of
658 * upcalls the userland provides.
659 * Adding more upcall structures only increases concurrency
662 * For a bound thread group, because there is only one thread
663 * in the group, we only set the concurrency for the group
664 * to 1. A thread in this kind of group will never schedule
665 * an upcall when blocked. This simulates pthread system
666 * scope thread behaviour.
668 sched_set_concurrency(newkg, ncpus);
671 * Even bound LWPs get a mailbox and an upcall to hold it.
673 newku = upcall_alloc();
674 newku->ku_mailbox = uap->mbx;
675 newku->ku_func = mbx.km_func;
676 bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t));
679 * For the first call this may not have been set.
680 * Of course nor may it actually be needed.
682 if (td->td_standin == NULL)
683 thread_alloc_spare(td);
686 mtx_lock_spin(&sched_lock);
687 if (newkg->kg_numupcalls >= ncpus) {
688 mtx_unlock_spin(&sched_lock);
695 * If we are the first time, and a normal thread,
696 * then transfer all the signals back to the 'process'.
697 * SA threading will make a special thread to handle them.
700 SIGSETOR(p->p_siglist, td->td_siglist);
701 SIGEMPTYSET(td->td_siglist);
702 SIGFILLSET(td->td_sigmask);
703 SIG_CANTMASK(td->td_sigmask);
707 * Make the new upcall available to the ksegrp.
708 * It may or may not use it, but it's available.
711 upcall_link(newku, newkg);
713 newkg->kg_upquantum = max(1, mbx.km_quantum / tick);
716 * Each upcall structure has an owner thread, find which
721 * Because the new ksegrp hasn't a thread,
722 * create an initial upcall thread to own it.
724 newtd = thread_schedule_upcall(td, newku);
727 * If the current thread hasn't an upcall structure,
728 * just assign the upcall to it.
731 if (td->td_upcall == NULL) {
732 newku->ku_owner = td;
733 td->td_upcall = newku;
737 * Create a new upcall thread to own it.
739 newtd = thread_schedule_upcall(td, newku);
742 mtx_unlock_spin(&sched_lock);
745 * Let the UTS instance know its LWPID.
746 * It doesn't really care. But the debugger will.
748 suword32(&newku->ku_mailbox->km_lwp, newtd->td_tid);
751 * In the same manner, if the UTS has a current user thread,
752 * then it is also running on this LWP so set it as well.
753 * The library could do that of course.. but why not..
755 if (mbx.km_curthread)
756 suword32(&mbx.km_curthread->tm_lwp, newtd->td_tid);
760 newtd->td_pflags |= TDP_SA;
762 newtd->td_pflags &= ~TDP_SA;
765 * Since a library will use the mailbox pointer to
766 * identify even a bound thread, and the mailbox pointer
767 * will never be allowed to change after this syscall
768 * for a bound thread, set it here so the library can
769 * find the thread after the syscall returns.
771 newtd->td_mailbox = mbx.km_curthread;
775 * If we did create a new thread then
776 * make sure it goes to the right place
777 * when it starts up, and make sure that it runs
778 * at full speed when it gets there.
779 * thread_schedule_upcall() copies all cpu state
780 * to the new thread, so we should clear single step
783 cpu_set_upcall_kse(newtd, newku->ku_func,
784 newku->ku_mailbox, &newku->ku_stack);
785 if (p->p_flag & P_TRACED)
786 ptrace_clear_single_step(newtd);
791 * If we are starting a new thread, kick it off.
794 mtx_lock_spin(&sched_lock);
795 setrunqueue(newtd, SRQ_BORING);
796 mtx_unlock_spin(&sched_lock);
802 * Initialize global thread allocation resources.
808 upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
809 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
813 * Stash an embarasingly extra upcall into the zombie upcall queue.
817 upcall_stash(struct kse_upcall *ku)
819 mtx_lock_spin(&kse_zombie_lock);
820 TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link);
821 mtx_unlock_spin(&kse_zombie_lock);
825 * Reap zombie kse resource.
830 struct kse_upcall *ku_first, *ku_next;
833 * Don't even bother to lock if none at this instant,
834 * we really don't care about the next instant..
836 if (!TAILQ_EMPTY(&zombie_upcalls)) {
837 mtx_lock_spin(&kse_zombie_lock);
838 ku_first = TAILQ_FIRST(&zombie_upcalls);
840 TAILQ_INIT(&zombie_upcalls);
841 mtx_unlock_spin(&kse_zombie_lock);
843 ku_next = TAILQ_NEXT(ku_first, ku_link);
844 upcall_free(ku_first);
851 * Store the thread context in the UTS's mailbox.
852 * then add the mailbox at the head of a list we are building in user space.
853 * The list is anchored in the ksegrp structure.
856 thread_export_context(struct thread *td, int willexit)
869 * Post sync signal, or process SIGKILL and SIGSTOP.
870 * For sync signal, it is only possible when the signal is not
871 * caught by userland or process is being debugged.
874 if (td->td_flags & TDF_NEEDSIGCHK) {
875 mtx_lock_spin(&sched_lock);
876 td->td_flags &= ~TDF_NEEDSIGCHK;
877 mtx_unlock_spin(&sched_lock);
878 mtx_lock(&p->p_sigacts->ps_mtx);
879 while ((sig = cursig(td)) != 0)
881 mtx_unlock(&p->p_sigacts->ps_mtx);
884 SIGFILLSET(td->td_sigmask);
887 /* Export the user/machine context. */
888 get_mcontext(td, &mc, 0);
889 addr = (void *)(&td->td_mailbox->tm_context.uc_mcontext);
890 error = copyout(&mc, addr, sizeof(mcontext_t));
894 addr = (caddr_t)(&td->td_mailbox->tm_lwp);
895 if (suword32(addr, 0)) {
900 /* Get address in latest mbox of list pointer */
901 addr = (void *)(&td->td_mailbox->tm_next);
903 * Put the saved address of the previous first
904 * entry into this one
907 mbx = (uintptr_t)kg->kg_completed;
908 if (suword(addr, mbx)) {
913 if (mbx == (uintptr_t)kg->kg_completed) {
914 kg->kg_completed = td->td_mailbox;
916 * The thread context may be taken away by
917 * other upcall threads when we unlock
918 * process lock. it's no longer valid to
919 * use it again in any other places.
921 td->td_mailbox = NULL;
937 * Take the list of completed mailboxes for this KSEGRP and put them on this
938 * upcall's mailbox as it's the next one going up.
941 thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku)
943 struct proc *p = kg->kg_proc;
947 addr = (void *)(&ku->ku_mailbox->km_completed);
949 mbx = (uintptr_t)kg->kg_completed;
950 if (suword(addr, mbx)) {
957 if (mbx == (uintptr_t)kg->kg_completed) {
958 kg->kg_completed = NULL;
968 * This function should be called at statclock interrupt time
971 thread_statclock(int user)
973 struct thread *td = curthread;
975 if (!(td->td_pflags & TDP_SA))
978 /* Current always do via ast() */
979 mtx_lock_spin(&sched_lock);
980 td->td_flags |= TDF_ASTPENDING;
981 mtx_unlock_spin(&sched_lock);
983 } else if (td->td_mailbox != NULL)
989 * Export state clock ticks for userland
992 thread_update_usr_ticks(struct thread *td)
994 struct proc *p = td->td_proc;
998 if (td->td_mailbox == NULL)
1001 if ((uticks = td->td_uuticks) != 0) {
1003 addr = (caddr_t)&td->td_mailbox->tm_uticks;
1004 if (suword32(addr, uticks+fuword32(addr)))
1007 if ((uticks = td->td_usticks) != 0) {
1009 addr = (caddr_t)&td->td_mailbox->tm_sticks;
1010 if (suword32(addr, uticks+fuword32(addr)))
1017 psignal(p, SIGSEGV);
1023 * This function is intended to be used to initialize a spare thread
1024 * for upcall. Initialize thread's large data area outside sched_lock
1025 * for thread_schedule_upcall(). The crhold is also here to get it out
1026 * from the schedlock as it has a mutex op itself.
1027 * XXX BUG.. we need to get the cr ref after the thread has
1028 * checked and chenged its own, not 6 months before...
1031 thread_alloc_spare(struct thread *td)
1033 struct thread *spare;
1037 spare = thread_alloc();
1038 td->td_standin = spare;
1039 bzero(&spare->td_startzero,
1040 __rangeof(struct thread, td_startzero, td_endzero));
1041 spare->td_proc = td->td_proc;
1042 spare->td_ucred = crhold(td->td_ucred);
1046 * Create a thread and schedule it for upcall on the KSE given.
1047 * Use our thread's standin so that we don't have to allocate one.
1050 thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
1054 mtx_assert(&sched_lock, MA_OWNED);
1057 * Schedule an upcall thread on specified kse_upcall,
1058 * the kse_upcall must be free.
1059 * td must have a spare thread.
1061 KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__));
1062 if ((td2 = td->td_standin) != NULL) {
1063 td->td_standin = NULL;
1065 panic("no reserve thread when scheduling an upcall");
1068 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1069 td2, td->td_proc->p_pid, td->td_proc->p_comm);
1071 * Bzero already done in thread_alloc_spare() because we can't
1072 * do the crhold here because we are in schedlock already.
1074 bcopy(&td->td_startcopy, &td2->td_startcopy,
1075 __rangeof(struct thread, td_startcopy, td_endcopy));
1076 thread_link(td2, ku->ku_ksegrp);
1077 /* inherit parts of blocked thread's context as a good template */
1078 cpu_set_upcall(td2, td);
1079 /* Let the new thread become owner of the upcall */
1081 td2->td_upcall = ku;
1083 td2->td_pflags = TDP_SA|TDP_UPCALLING;
1084 td2->td_state = TDS_CAN_RUN;
1085 td2->td_inhibitors = 0;
1086 SIGFILLSET(td2->td_sigmask);
1087 SIG_CANTMASK(td2->td_sigmask);
1088 sched_fork_thread(td, td2);
1089 return (td2); /* bogus.. should be a void function */
1093 * It is only used when thread generated a trap and process is being
1097 thread_signal_add(struct thread *td, int sig)
1105 PROC_LOCK_ASSERT(p, MA_OWNED);
1107 mtx_assert(&ps->ps_mtx, MA_OWNED);
1109 cpu_thread_siginfo(sig, 0, &siginfo);
1110 mtx_unlock(&ps->ps_mtx);
1111 SIGADDSET(td->td_sigmask, sig);
1113 error = copyout(&siginfo, &td->td_mailbox->tm_syncsig, sizeof(siginfo));
1116 sigexit(td, SIGSEGV);
1119 mtx_lock(&ps->ps_mtx);
1121 #include "opt_sched.h"
1123 thread_switchout(struct thread *td, int flags, struct thread *nextthread)
1125 struct kse_upcall *ku;
1128 mtx_assert(&sched_lock, MA_OWNED);
1131 * If the outgoing thread is in threaded group and has never
1132 * scheduled an upcall, decide whether this is a short
1133 * or long term event and thus whether or not to schedule
1135 * If it is a short term event, just suspend it in
1136 * a way that takes its KSE with it.
1137 * Select the events for which we want to schedule upcalls.
1138 * For now it's just sleep or if thread is suspended but
1139 * process wide suspending flag is not set (debugger
1141 * XXXKSE eventually almost any inhibition could do.
1143 if (TD_CAN_UNBIND(td) && (td->td_standin) &&
1144 (TD_ON_SLEEPQ(td) || (TD_IS_SUSPENDED(td) &&
1145 !P_SHOULDSTOP(td->td_proc)))) {
1147 * Release ownership of upcall, and schedule an upcall
1148 * thread, this new upcall thread becomes the owner of
1149 * the upcall structure. It will be ahead of us in the
1150 * run queue, so as we are stopping, it should either
1151 * start up immediatly, or at least before us if
1152 * we release our slot.
1155 ku->ku_owner = NULL;
1156 td->td_upcall = NULL;
1157 td->td_pflags &= ~TDP_CAN_UNBIND;
1158 td2 = thread_schedule_upcall(td, ku);
1159 if (flags & SW_INVOL || nextthread) {
1160 setrunqueue(td2, SRQ_YIELDING);
1162 /* Keep up with reality.. we have one extra thread
1163 * in the picture.. and it's 'running'.
1168 return (nextthread);
1172 * Setup done on the thread when it enters the kernel.
1175 thread_user_enter(struct thread *td)
1177 struct proc *p = td->td_proc;
1179 struct kse_upcall *ku;
1180 struct kse_thr_mailbox *tmbx;
1184 * First check that we shouldn't just abort. we
1185 * can suspend it here or just exit.
1187 if (__predict_false(P_SHOULDSTOP(p))) {
1189 thread_suspend_check(0);
1193 if (!(td->td_pflags & TDP_SA))
1197 * If we are doing a syscall in a KSE environment,
1198 * note where our mailbox is.
1204 KASSERT(ku != NULL, ("no upcall owned"));
1205 KASSERT(ku->ku_owner == td, ("wrong owner"));
1206 KASSERT(!TD_CAN_UNBIND(td), ("can unbind"));
1208 if (td->td_standin == NULL)
1209 thread_alloc_spare(td);
1210 ku->ku_mflags = fuword32((void *)&ku->ku_mailbox->km_flags);
1211 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1212 if ((tmbx == NULL) || (tmbx == (void *)-1L) ||
1213 (ku->ku_mflags & KMF_NOUPCALL)) {
1214 td->td_mailbox = NULL;
1216 flags = fuword32(&tmbx->tm_flags);
1218 * On some architectures, TP register points to thread
1219 * mailbox but not points to kse mailbox, and userland
1220 * can not atomically clear km_curthread, but can
1221 * use TP register, and set TMF_NOUPCALL in thread
1222 * flag to indicate a critical region.
1224 if (flags & TMF_NOUPCALL) {
1225 td->td_mailbox = NULL;
1227 td->td_mailbox = tmbx;
1228 td->td_pflags |= TDP_CAN_UNBIND;
1229 if (__predict_false(p->p_flag & P_TRACED)) {
1230 flags = fuword32(&tmbx->tm_dflags);
1231 if (flags & TMDF_SUSPEND) {
1232 mtx_lock_spin(&sched_lock);
1233 /* fuword can block, check again */
1235 ku->ku_flags |= KUF_DOUPCALL;
1236 mtx_unlock_spin(&sched_lock);
1244 * The extra work we go through if we are a threaded process when we
1245 * return to userland.
1247 * If we are a KSE process and returning to user mode, check for
1248 * extra work to do before we return (e.g. for more syscalls
1249 * to complete first). If we were in a critical section, we should
1250 * just return to let it finish. Same if we were in the UTS (in
1251 * which case the mailbox's context's busy indicator will be set).
1252 * The only traps we suport will have set the mailbox.
1253 * We will clear it here.
1256 thread_userret(struct thread *td, struct trapframe *frame)
1258 struct kse_upcall *ku;
1259 struct ksegrp *kg, *kg2;
1262 int error = 0, upcalls, uts_crit;
1264 /* Nothing to do with bound thread */
1265 if (!(td->td_pflags & TDP_SA))
1269 * Update stat clock count for userland
1271 if (td->td_mailbox != NULL) {
1272 thread_update_usr_ticks(td);
1284 * This thread has not started any upcall.
1285 * If there is no work to report other than ourself,
1286 * then it can return direct to userland.
1288 if (TD_CAN_UNBIND(td)) {
1289 td->td_pflags &= ~TDP_CAN_UNBIND;
1290 if ((td->td_flags & TDF_NEEDSIGCHK) == 0 &&
1291 (kg->kg_completed == NULL) &&
1292 (ku->ku_flags & KUF_DOUPCALL) == 0 &&
1293 (kg->kg_upquantum && ticks < kg->kg_nextupcall)) {
1295 error = copyout(&ts,
1296 (caddr_t)&ku->ku_mailbox->km_timeofday,
1304 thread_export_context(td, 0);
1306 * There is something to report, and we own an upcall
1307 * strucuture, we can go to userland.
1308 * Turn ourself into an upcall thread.
1310 td->td_pflags |= TDP_UPCALLING;
1311 } else if (td->td_mailbox && (ku == NULL)) {
1312 thread_export_context(td, 1);
1314 if (kg->kg_upsleeps)
1315 wakeup(&kg->kg_completed);
1316 mtx_lock_spin(&sched_lock);
1322 KASSERT(ku != NULL, ("upcall is NULL"));
1323 KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind"));
1325 if (p->p_numthreads > max_threads_per_proc) {
1328 mtx_lock_spin(&sched_lock);
1330 while (p->p_numthreads > max_threads_per_proc) {
1332 FOREACH_KSEGRP_IN_PROC(p, kg2) {
1333 if (kg2->kg_numupcalls == 0)
1336 upcalls += kg2->kg_numupcalls;
1338 if (upcalls >= max_threads_per_proc)
1340 mtx_unlock_spin(&sched_lock);
1341 if (msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH,
1343 mtx_lock_spin(&sched_lock);
1346 mtx_lock_spin(&sched_lock);
1350 mtx_unlock_spin(&sched_lock);
1354 if (td->td_pflags & TDP_UPCALLING) {
1356 kg->kg_nextupcall = ticks + kg->kg_upquantum;
1358 * There is no more work to do and we are going to ride
1359 * this thread up to userland as an upcall.
1360 * Do the last parts of the setup needed for the upcall.
1362 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1363 td, td->td_proc->p_pid, td->td_proc->p_comm);
1365 td->td_pflags &= ~TDP_UPCALLING;
1366 if (ku->ku_flags & KUF_DOUPCALL) {
1367 mtx_lock_spin(&sched_lock);
1368 ku->ku_flags &= ~KUF_DOUPCALL;
1369 mtx_unlock_spin(&sched_lock);
1372 * Set user context to the UTS
1374 if (!(ku->ku_mflags & KMF_NOUPCALL)) {
1375 cpu_set_upcall_kse(td, ku->ku_func, ku->ku_mailbox,
1377 if (p->p_flag & P_TRACED)
1378 ptrace_clear_single_step(td);
1379 error = suword32(&ku->ku_mailbox->km_lwp,
1383 error = suword(&ku->ku_mailbox->km_curthread, 0);
1389 * Unhook the list of completed threads.
1390 * anything that completes after this gets to
1391 * come in next time.
1392 * Put the list of completed thread mailboxes on
1393 * this KSE's mailbox.
1395 if (!(ku->ku_mflags & KMF_NOCOMPLETED) &&
1396 (error = thread_link_mboxes(kg, ku)) != 0)
1401 error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts));
1407 * Things are going to be so screwed we should just kill
1409 * how do we do that?
1412 psignal(p, SIGSEGV);
1417 * Ensure that we have a spare thread available,
1418 * for when we re-enter the kernel.
1420 if (td->td_standin == NULL)
1421 thread_alloc_spare(td);
1425 td->td_mailbox = NULL;
1427 return (error); /* go sync */
1431 thread_upcall_check(struct thread *td)
1433 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED);
1434 if (td->td_kflags & TDK_WAKEUP)
1441 * called after ptrace resumed a process, force all
1442 * virtual CPUs to schedule upcall for SA process,
1443 * because debugger may have changed something in userland,
1444 * we should notice UTS as soon as possible.
1447 thread_continued(struct proc *p)
1450 struct kse_upcall *ku;
1453 PROC_LOCK_ASSERT(p, MA_OWNED);
1454 mtx_assert(&sched_lock, MA_OWNED);
1456 if (!(p->p_flag & P_SA))
1459 if (p->p_flag & P_TRACED) {
1460 FOREACH_KSEGRP_IN_PROC(p, kg) {
1461 td = TAILQ_FIRST(&kg->kg_threads);
1464 /* not a SA group, nothing to do */
1465 if (!(td->td_pflags & TDP_SA))
1467 FOREACH_UPCALL_IN_GROUP(kg, ku) {
1468 ku->ku_flags |= KUF_DOUPCALL;
1469 wakeup(&kg->kg_completed);
1470 if (TD_IS_SUSPENDED(ku->ku_owner)) {
1471 thread_unsuspend_one(ku->ku_owner);