2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/imgact.h>
37 #include <sys/mutex.h>
39 #include <sys/ptrace.h>
41 #include <sys/syscallsubr.h>
42 #include <sys/sysproto.h>
43 #include <sys/sched.h>
44 #include <sys/signalvar.h>
45 #include <sys/sleepqueue.h>
51 * KSEGRP related storage.
53 static uma_zone_t upcall_zone;
56 extern int virtual_cpu;
57 extern int thread_debug;
59 extern int max_threads_per_proc;
60 extern int max_groups_per_proc;
61 extern int max_threads_hits;
62 extern struct mtx kse_zombie_lock;
65 TAILQ_HEAD(, kse_upcall) zombie_upcalls =
66 TAILQ_HEAD_INITIALIZER(zombie_upcalls);
68 static int thread_update_usr_ticks(struct thread *td);
69 static void thread_alloc_spare(struct thread *td);
74 struct kse_upcall *ku;
76 ku = uma_zalloc(upcall_zone, M_WAITOK | M_ZERO);
81 upcall_free(struct kse_upcall *ku)
84 uma_zfree(upcall_zone, ku);
88 upcall_link(struct kse_upcall *ku, struct ksegrp *kg)
91 mtx_assert(&sched_lock, MA_OWNED);
92 TAILQ_INSERT_TAIL(&kg->kg_upcalls, ku, ku_link);
98 upcall_unlink(struct kse_upcall *ku)
100 struct ksegrp *kg = ku->ku_ksegrp;
102 mtx_assert(&sched_lock, MA_OWNED);
103 KASSERT(ku->ku_owner == NULL, ("%s: have owner", __func__));
104 TAILQ_REMOVE(&kg->kg_upcalls, ku, ku_link);
110 upcall_remove(struct thread *td)
113 mtx_assert(&sched_lock, MA_OWNED);
114 if (td->td_upcall != NULL) {
115 td->td_upcall->ku_owner = NULL;
116 upcall_unlink(td->td_upcall);
117 td->td_upcall = NULL;
121 #ifndef _SYS_SYSPROTO_H_
122 struct kse_switchin_args {
123 struct kse_thr_mailbox *tmbx;
129 kse_switchin(struct thread *td, struct kse_switchin_args *uap)
131 struct kse_thr_mailbox tmbx;
132 struct kse_upcall *ku;
135 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
137 error = (uap->tmbx == NULL) ? EINVAL : 0;
139 error = copyin(uap->tmbx, &tmbx, sizeof(tmbx));
140 if (!error && (uap->flags & KSE_SWITCHIN_SETTMBX))
141 error = (suword(&ku->ku_mailbox->km_curthread,
142 (long)uap->tmbx) != 0 ? EINVAL : 0);
144 error = set_mcontext(td, &tmbx.tm_context.uc_mcontext);
146 suword32(&uap->tmbx->tm_lwp, td->td_tid);
147 if (uap->flags & KSE_SWITCHIN_SETTMBX) {
148 td->td_mailbox = uap->tmbx;
149 td->td_pflags |= TDP_CAN_UNBIND;
151 PROC_LOCK(td->td_proc);
152 if (td->td_proc->p_flag & P_TRACED) {
154 if (tmbx.tm_dflags & TMDF_SSTEP)
155 ptrace_single_step(td);
157 ptrace_clear_single_step(td);
158 if (tmbx.tm_dflags & TMDF_SUSPEND) {
159 mtx_lock_spin(&sched_lock);
160 /* fuword can block, check again */
162 ku->ku_flags |= KUF_DOUPCALL;
163 mtx_unlock_spin(&sched_lock);
167 PROC_UNLOCK(td->td_proc);
169 return ((error == 0) ? EJUSTRETURN : error);
173 struct kse_thr_interrupt_args {
174 struct kse_thr_mailbox * tmbx;
180 kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
182 struct kse_execve_args args;
183 struct image_args iargs;
186 struct kse_upcall *ku;
187 struct kse_thr_mailbox *tmbx;
193 if (!(p->p_flag & P_SA))
197 case KSE_INTR_SENDSIG:
198 if (uap->data < 0 || uap->data > _SIG_MAXSIG)
200 case KSE_INTR_INTERRUPT:
201 case KSE_INTR_RESTART:
203 mtx_lock_spin(&sched_lock);
204 FOREACH_THREAD_IN_PROC(p, td2) {
205 if (td2->td_mailbox == uap->tmbx)
209 mtx_unlock_spin(&sched_lock);
213 if (uap->cmd == KSE_INTR_SENDSIG) {
215 td2->td_flags &= ~TDF_INTERRUPT;
216 mtx_unlock_spin(&sched_lock);
217 tdsignal(td2, (int)uap->data, SIGTARGET_TD);
219 mtx_unlock_spin(&sched_lock);
222 td2->td_flags |= TDF_INTERRUPT | TDF_ASTPENDING;
223 if (TD_CAN_UNBIND(td2))
224 td2->td_upcall->ku_flags |= KUF_DOUPCALL;
225 if (uap->cmd == KSE_INTR_INTERRUPT)
226 td2->td_intrval = EINTR;
228 td2->td_intrval = ERESTART;
229 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR))
230 sleepq_abort(td2, td2->td_intrval);
231 mtx_unlock_spin(&sched_lock);
235 case KSE_INTR_SIGEXIT:
236 if (uap->data < 1 || uap->data > _SIG_MAXSIG)
239 sigexit(td, (int)uap->data);
242 case KSE_INTR_DBSUSPEND:
243 /* this sub-function is only for bound thread */
244 if (td->td_pflags & TDP_SA)
247 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
248 if (tmbx == NULL || tmbx == (void *)-1)
251 while ((p->p_flag & P_TRACED) && !(p->p_flag & P_SINGLE_EXIT)) {
252 flags = fuword32(&tmbx->tm_dflags);
253 if (!(flags & TMDF_SUSPEND))
256 mtx_lock_spin(&sched_lock);
258 thread_suspend_one(td);
260 mi_switch(SW_VOL, NULL);
261 mtx_unlock_spin(&sched_lock);
265 case KSE_INTR_EXECVE:
266 error = copyin((void *)uap->data, &args, sizeof(args));
269 error = exec_copyin_args(&iargs, args.path, UIO_USERSPACE,
270 args.argv, args.envp);
272 error = kern_execve(td, &iargs, NULL);
273 exec_free_args(&iargs);
276 SIGSETOR(td->td_siglist, args.sigpend);
278 kern_sigprocmask(td, SIG_SETMASK, &args.sigmask, NULL,
290 struct kse_exit_args {
295 kse_exit(struct thread *td, struct kse_exit_args *uap)
299 struct kse_upcall *ku, *ku2;
304 * Ensure that this is only called from the UTS
306 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
313 * Calculate the existing non-exiting upcalls in this ksegroup.
314 * If we are the last upcall but there are still other threads,
315 * then do not exit. We need the other threads to be able to
316 * complete whatever they are doing.
317 * XXX This relies on the userland knowing what to do if we return.
318 * It may be a better choice to convert ourselves into a kse_release
319 * ( or similar) and wait in the kernel to be needed.
322 mtx_lock_spin(&sched_lock);
323 FOREACH_UPCALL_IN_GROUP(kg, ku2) {
324 if (ku2->ku_flags & KUF_EXITING)
327 if ((kg->kg_numupcalls - count) == 1 &&
328 (kg->kg_numthreads > 1)) {
329 mtx_unlock_spin(&sched_lock);
333 ku->ku_flags |= KUF_EXITING;
334 mtx_unlock_spin(&sched_lock);
338 * Mark the UTS mailbox as having been finished with.
339 * If that fails then just go for a segfault.
340 * XXX need to check it that can be deliverred without a mailbox.
342 error = suword32(&ku->ku_mailbox->km_flags, ku->ku_mflags|KMF_DONE);
343 if (!(td->td_pflags & TDP_SA))
344 if (suword32(&td->td_mailbox->tm_lwp, 0))
349 mtx_lock_spin(&sched_lock);
351 if (p->p_numthreads != 1) {
353 * If we are not the last thread, but we are the last
354 * thread in this ksegrp, then by definition this is not
355 * the last group and we need to clean it up as well.
356 * thread_exit will clean up the kseg as needed.
363 * This is the last thread. Just return to the user.
364 * We know that there is only one ksegrp too, as any others
365 * would have been discarded in previous calls to thread_exit().
366 * Effectively we have left threading mode..
367 * The only real thing left to do is ensure that the
368 * scheduler sets out concurrency back to 1 as that may be a
369 * resource leak otherwise.
370 * This is an A[PB]I issue.. what SHOULD we do?
371 * One possibility is to return to the user. It may not cope well.
372 * The other possibility would be to let the process exit.
375 mtx_unlock_spin(&sched_lock);
385 * Either becomes an upcall or waits for an awakening event and
386 * then becomes an upcall. Only error cases return.
389 struct kse_release_args {
390 struct timespec *timeout;
394 kse_release(struct thread *td, struct kse_release_args *uap)
398 struct kse_upcall *ku;
399 struct timespec timeout;
406 if ((ku = td->td_upcall) == NULL || TD_CAN_UNBIND(td))
408 if (uap->timeout != NULL) {
409 if ((error = copyin(uap->timeout, &timeout, sizeof(timeout))))
411 TIMESPEC_TO_TIMEVAL(&tv, &timeout);
413 if (td->td_pflags & TDP_SA)
414 td->td_pflags |= TDP_UPCALLING;
416 ku->ku_mflags = fuword32(&ku->ku_mailbox->km_flags);
417 if (ku->ku_mflags == -1) {
419 sigexit(td, SIGSEGV);
423 if (ku->ku_mflags & KMF_WAITSIGEVENT) {
424 /* UTS wants to wait for signal event */
425 if (!(p->p_flag & P_SIGEVENT) &&
426 !(ku->ku_flags & KUF_DOUPCALL)) {
427 td->td_kflags |= TDK_KSERELSIG;
428 error = msleep(&p->p_siglist, &p->p_mtx, PPAUSE|PCATCH,
429 "ksesigwait", (uap->timeout ? tvtohz(&tv) : 0));
430 td->td_kflags &= ~(TDK_KSERELSIG | TDK_WAKEUP);
432 p->p_flag &= ~P_SIGEVENT;
433 sigset = p->p_siglist;
435 error = copyout(&sigset, &ku->ku_mailbox->km_sigscaught,
438 if ((ku->ku_flags & KUF_DOUPCALL) == 0 &&
439 ((ku->ku_mflags & KMF_NOCOMPLETED) ||
440 (kg->kg_completed == NULL))) {
442 td->td_kflags |= TDK_KSEREL;
443 error = msleep(&kg->kg_completed, &p->p_mtx,
444 PPAUSE|PCATCH, "kserel",
445 (uap->timeout ? tvtohz(&tv) : 0));
446 td->td_kflags &= ~(TDK_KSEREL | TDK_WAKEUP);
451 if (ku->ku_flags & KUF_DOUPCALL) {
452 mtx_lock_spin(&sched_lock);
453 ku->ku_flags &= ~KUF_DOUPCALL;
454 mtx_unlock_spin(&sched_lock);
459 /* struct kse_wakeup_args {
460 struct kse_mailbox *mbx;
463 kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
467 struct kse_upcall *ku;
473 /* KSE-enabled processes only, please. */
474 if (!(p->p_flag & P_SA))
477 mtx_lock_spin(&sched_lock);
479 FOREACH_KSEGRP_IN_PROC(p, kg) {
480 FOREACH_UPCALL_IN_GROUP(kg, ku) {
481 if (ku->ku_mailbox == uap->mbx)
489 if (kg->kg_upsleeps) {
490 mtx_unlock_spin(&sched_lock);
491 wakeup(&kg->kg_completed);
495 ku = TAILQ_FIRST(&kg->kg_upcalls);
498 mtx_unlock_spin(&sched_lock);
502 if ((td2 = ku->ku_owner) == NULL) {
503 mtx_unlock_spin(&sched_lock);
504 panic("%s: no owner", __func__);
505 } else if (td2->td_kflags & (TDK_KSEREL | TDK_KSERELSIG)) {
506 mtx_unlock_spin(&sched_lock);
507 if (!(td2->td_kflags & TDK_WAKEUP)) {
508 td2->td_kflags |= TDK_WAKEUP;
509 if (td2->td_kflags & TDK_KSEREL)
510 sleepq_remove(td2, &kg->kg_completed);
512 sleepq_remove(td2, &p->p_siglist);
515 ku->ku_flags |= KUF_DOUPCALL;
516 mtx_unlock_spin(&sched_lock);
523 * No new KSEG: first call: use current KSE, don't schedule an upcall
524 * All other situations, do allocate max new KSEs and schedule an upcall.
526 * XXX should be changed so that 'first' behaviour lasts for as long
527 * as you have not made a kse in this ksegrp. i.e. as long as we do not have
530 /* struct kse_create_args {
531 struct kse_mailbox *mbx;
535 kse_create(struct thread *td, struct kse_create_args *uap)
537 struct ksegrp *newkg;
540 struct kse_mailbox mbx;
541 struct kse_upcall *newku;
542 int err, ncpus, sa = 0, first = 0;
543 struct thread *newtd;
547 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
551 if (virtual_cpu != 0)
554 * If the new UTS mailbox says that this
555 * will be a BOUND lwp, then it had better
556 * have its thread mailbox already there.
557 * In addition, this ksegrp will be limited to
558 * a concurrency of 1. There is more on this later.
560 if (mbx.km_flags & KMF_BOUND) {
561 if (mbx.km_curthread == NULL)
570 * Processes using the other threading model can't
571 * suddenly start calling this one
573 if ((p->p_flag & (P_SA|P_HADTHREADS)) == P_HADTHREADS) {
579 * Limit it to NCPU upcall contexts per ksegrp in any case.
580 * There is a small race here as we don't hold proclock
581 * until we inc the ksegrp count, but it's not really a big problem
582 * if we get one too many, but we save a proc lock.
584 if ((!uap->newgroup) && (kg->kg_numupcalls >= ncpus)) {
589 if (!(p->p_flag & P_SA)) {
591 p->p_flag |= P_SA|P_HADTHREADS;
597 * If we are going to be bound, then we need to be either
598 * a new group, or the first call ever. In either
599 * case we will be creating (or be) the only thread in a group.
600 * and the concurrency will be set to 1.
601 * This is not quite right, as we may still make ourself
602 * bound after making other ksegrps but it will do for now.
603 * The library will only try do this much.
605 if (!sa && !(uap->newgroup || first))
609 newkg = ksegrp_alloc();
610 bzero(&newkg->kg_startzero,
611 __rangeof(struct ksegrp, kg_startzero, kg_endzero));
612 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
613 __rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
614 sched_init_concurrency(newkg);
616 if (p->p_numksegrps >= max_groups_per_proc) {
621 ksegrp_link(newkg, p);
622 mtx_lock_spin(&sched_lock);
623 sched_fork_ksegrp(td, newkg);
624 mtx_unlock_spin(&sched_lock);
628 * We want to make a thread in our own ksegrp.
629 * If we are just the first call, either kind
630 * is ok, but if not then either we must be
631 * already an upcallable thread to make another,
632 * or a bound thread to make one of those.
633 * Once again, not quite right but good enough for now.. XXXKSE
635 if (!first && ((td->td_pflags & TDP_SA) != sa))
642 * This test is a bit "indirect".
643 * It might simplify things if we made a direct way of testing
644 * if a ksegrp has been worked on before.
645 * In the case of a bound request and the concurrency being set to
646 * one, the concurrency will already be 1 so it's just inefficient
647 * but not dangerous to call this again. XXX
649 if (newkg->kg_numupcalls == 0) {
651 * Initialize KSE group with the appropriate
654 * For a multiplexed group, create as as much concurrency
655 * as the number of physical cpus.
656 * This increases concurrency in the kernel even if the
657 * userland is not MP safe and can only run on a single CPU.
658 * In an ideal world, every physical cpu should execute a
659 * thread. If there is enough concurrency, threads in the
660 * kernel can be executed parallel on different cpus at
661 * full speed without being restricted by the number of
662 * upcalls the userland provides.
663 * Adding more upcall structures only increases concurrency
666 * For a bound thread group, because there is only one thread
667 * in the group, we only set the concurrency for the group
668 * to 1. A thread in this kind of group will never schedule
669 * an upcall when blocked. This simulates pthread system
670 * scope thread behaviour.
672 sched_set_concurrency(newkg, ncpus);
675 * Even bound LWPs get a mailbox and an upcall to hold it.
677 newku = upcall_alloc();
678 newku->ku_mailbox = uap->mbx;
679 newku->ku_func = mbx.km_func;
680 bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t));
683 * For the first call this may not have been set.
684 * Of course nor may it actually be needed.
686 if (td->td_standin == NULL)
687 thread_alloc_spare(td);
690 mtx_lock_spin(&sched_lock);
691 if (newkg->kg_numupcalls >= ncpus) {
692 mtx_unlock_spin(&sched_lock);
699 * If we are the first time, and a normal thread,
700 * then transfer all the signals back to the 'process'.
701 * SA threading will make a special thread to handle them.
704 SIGSETOR(p->p_siglist, td->td_siglist);
705 SIGEMPTYSET(td->td_siglist);
706 SIGFILLSET(td->td_sigmask);
707 SIG_CANTMASK(td->td_sigmask);
711 * Make the new upcall available to the ksegrp.
712 * It may or may not use it, but it's available.
714 upcall_link(newku, newkg);
717 newkg->kg_upquantum = max(1, mbx.km_quantum / tick);
720 * Each upcall structure has an owner thread, find which
725 * Because the new ksegrp hasn't a thread,
726 * create an initial upcall thread to own it.
728 newtd = thread_schedule_upcall(td, newku);
731 * If the current thread hasn't an upcall structure,
732 * just assign the upcall to it.
735 if (td->td_upcall == NULL) {
736 newku->ku_owner = td;
737 td->td_upcall = newku;
741 * Create a new upcall thread to own it.
743 newtd = thread_schedule_upcall(td, newku);
746 mtx_unlock_spin(&sched_lock);
749 * Let the UTS instance know its LWPID.
750 * It doesn't really care. But the debugger will.
752 suword32(&newku->ku_mailbox->km_lwp, newtd->td_tid);
755 * In the same manner, if the UTS has a current user thread,
756 * then it is also running on this LWP so set it as well.
757 * The library could do that of course.. but why not..
759 if (mbx.km_curthread)
760 suword32(&mbx.km_curthread->tm_lwp, newtd->td_tid);
764 newtd->td_pflags |= TDP_SA;
766 newtd->td_pflags &= ~TDP_SA;
769 * Since a library will use the mailbox pointer to
770 * identify even a bound thread, and the mailbox pointer
771 * will never be allowed to change after this syscall
772 * for a bound thread, set it here so the library can
773 * find the thread after the syscall returns.
775 newtd->td_mailbox = mbx.km_curthread;
779 * If we did create a new thread then
780 * make sure it goes to the right place
781 * when it starts up, and make sure that it runs
782 * at full speed when it gets there.
783 * thread_schedule_upcall() copies all cpu state
784 * to the new thread, so we should clear single step
787 cpu_set_upcall_kse(newtd, newku->ku_func,
788 newku->ku_mailbox, &newku->ku_stack);
790 if (p->p_flag & P_TRACED) {
792 ptrace_clear_single_step(newtd);
800 * If we are starting a new thread, kick it off.
803 mtx_lock_spin(&sched_lock);
804 setrunqueue(newtd, SRQ_BORING);
805 mtx_unlock_spin(&sched_lock);
811 * Initialize global thread allocation resources.
817 upcall_zone = uma_zcreate("UPCALL", sizeof(struct kse_upcall),
818 NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
822 * Stash an embarasingly extra upcall into the zombie upcall queue.
826 upcall_stash(struct kse_upcall *ku)
828 mtx_lock_spin(&kse_zombie_lock);
829 TAILQ_INSERT_HEAD(&zombie_upcalls, ku, ku_link);
830 mtx_unlock_spin(&kse_zombie_lock);
834 * Reap zombie kse resource.
839 struct kse_upcall *ku_first, *ku_next;
842 * Don't even bother to lock if none at this instant,
843 * we really don't care about the next instant..
845 if (!TAILQ_EMPTY(&zombie_upcalls)) {
846 mtx_lock_spin(&kse_zombie_lock);
847 ku_first = TAILQ_FIRST(&zombie_upcalls);
849 TAILQ_INIT(&zombie_upcalls);
850 mtx_unlock_spin(&kse_zombie_lock);
852 ku_next = TAILQ_NEXT(ku_first, ku_link);
853 upcall_free(ku_first);
860 * Store the thread context in the UTS's mailbox.
861 * then add the mailbox at the head of a list we are building in user space.
862 * The list is anchored in the ksegrp structure.
865 thread_export_context(struct thread *td, int willexit)
878 * Post sync signal, or process SIGKILL and SIGSTOP.
879 * For sync signal, it is only possible when the signal is not
880 * caught by userland or process is being debugged.
883 if (td->td_flags & TDF_NEEDSIGCHK) {
884 mtx_lock_spin(&sched_lock);
885 td->td_flags &= ~TDF_NEEDSIGCHK;
886 mtx_unlock_spin(&sched_lock);
887 mtx_lock(&p->p_sigacts->ps_mtx);
888 while ((sig = cursig(td)) != 0)
890 mtx_unlock(&p->p_sigacts->ps_mtx);
893 SIGFILLSET(td->td_sigmask);
896 /* Export the user/machine context. */
897 get_mcontext(td, &mc, 0);
898 addr = (void *)(&td->td_mailbox->tm_context.uc_mcontext);
899 error = copyout(&mc, addr, sizeof(mcontext_t));
903 addr = (caddr_t)(&td->td_mailbox->tm_lwp);
904 if (suword32(addr, 0)) {
909 /* Get address in latest mbox of list pointer */
910 addr = (void *)(&td->td_mailbox->tm_next);
912 * Put the saved address of the previous first
913 * entry into this one
916 mbx = (uintptr_t)kg->kg_completed;
917 if (suword(addr, mbx)) {
922 if (mbx == (uintptr_t)kg->kg_completed) {
923 kg->kg_completed = td->td_mailbox;
925 * The thread context may be taken away by
926 * other upcall threads when we unlock
927 * process lock. it's no longer valid to
928 * use it again in any other places.
930 td->td_mailbox = NULL;
946 * Take the list of completed mailboxes for this KSEGRP and put them on this
947 * upcall's mailbox as it's the next one going up.
950 thread_link_mboxes(struct ksegrp *kg, struct kse_upcall *ku)
952 struct proc *p = kg->kg_proc;
956 addr = (void *)(&ku->ku_mailbox->km_completed);
958 mbx = (uintptr_t)kg->kg_completed;
959 if (suword(addr, mbx)) {
966 if (mbx == (uintptr_t)kg->kg_completed) {
967 kg->kg_completed = NULL;
977 * This function should be called at statclock interrupt time
980 thread_statclock(int user)
982 struct thread *td = curthread;
984 if (!(td->td_pflags & TDP_SA))
987 /* Current always do via ast() */
988 mtx_lock_spin(&sched_lock);
989 td->td_flags |= TDF_ASTPENDING;
990 mtx_unlock_spin(&sched_lock);
992 } else if (td->td_mailbox != NULL)
998 * Export state clock ticks for userland
1001 thread_update_usr_ticks(struct thread *td)
1003 struct proc *p = td->td_proc;
1007 if (td->td_mailbox == NULL)
1010 if ((uticks = td->td_uuticks) != 0) {
1012 addr = (caddr_t)&td->td_mailbox->tm_uticks;
1013 if (suword32(addr, uticks+fuword32(addr)))
1016 if ((uticks = td->td_usticks) != 0) {
1018 addr = (caddr_t)&td->td_mailbox->tm_sticks;
1019 if (suword32(addr, uticks+fuword32(addr)))
1026 psignal(p, SIGSEGV);
1032 * This function is intended to be used to initialize a spare thread
1033 * for upcall. Initialize thread's large data area outside sched_lock
1034 * for thread_schedule_upcall(). The crhold is also here to get it out
1035 * from the schedlock as it has a mutex op itself.
1036 * XXX BUG.. we need to get the cr ref after the thread has
1037 * checked and chenged its own, not 6 months before...
1040 thread_alloc_spare(struct thread *td)
1042 struct thread *spare;
1046 spare = thread_alloc();
1047 td->td_standin = spare;
1048 bzero(&spare->td_startzero,
1049 __rangeof(struct thread, td_startzero, td_endzero));
1050 spare->td_proc = td->td_proc;
1051 spare->td_ucred = crhold(td->td_ucred);
1055 * Create a thread and schedule it for upcall on the KSE given.
1056 * Use our thread's standin so that we don't have to allocate one.
1059 thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
1063 mtx_assert(&sched_lock, MA_OWNED);
1066 * Schedule an upcall thread on specified kse_upcall,
1067 * the kse_upcall must be free.
1068 * td must have a spare thread.
1070 KASSERT(ku->ku_owner == NULL, ("%s: upcall has owner", __func__));
1071 if ((td2 = td->td_standin) != NULL) {
1072 td->td_standin = NULL;
1074 panic("no reserve thread when scheduling an upcall");
1077 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1078 td2, td->td_proc->p_pid, td->td_proc->p_comm);
1080 * Bzero already done in thread_alloc_spare() because we can't
1081 * do the crhold here because we are in schedlock already.
1083 bcopy(&td->td_startcopy, &td2->td_startcopy,
1084 __rangeof(struct thread, td_startcopy, td_endcopy));
1085 thread_link(td2, ku->ku_ksegrp);
1086 /* inherit parts of blocked thread's context as a good template */
1087 cpu_set_upcall(td2, td);
1088 /* Let the new thread become owner of the upcall */
1090 td2->td_upcall = ku;
1092 td2->td_pflags = TDP_SA|TDP_UPCALLING;
1093 td2->td_state = TDS_CAN_RUN;
1094 td2->td_inhibitors = 0;
1095 SIGFILLSET(td2->td_sigmask);
1096 SIG_CANTMASK(td2->td_sigmask);
1097 sched_fork_thread(td, td2);
1098 return (td2); /* bogus.. should be a void function */
1102 * It is only used when thread generated a trap and process is being
1106 thread_signal_add(struct thread *td, int sig)
1114 PROC_LOCK_ASSERT(p, MA_OWNED);
1116 mtx_assert(&ps->ps_mtx, MA_OWNED);
1118 cpu_thread_siginfo(sig, 0, &siginfo);
1119 mtx_unlock(&ps->ps_mtx);
1120 SIGADDSET(td->td_sigmask, sig);
1122 error = copyout(&siginfo, &td->td_mailbox->tm_syncsig, sizeof(siginfo));
1125 sigexit(td, SIGSEGV);
1128 mtx_lock(&ps->ps_mtx);
1130 #include "opt_sched.h"
1132 thread_switchout(struct thread *td, int flags, struct thread *nextthread)
1134 struct kse_upcall *ku;
1137 mtx_assert(&sched_lock, MA_OWNED);
1140 * If the outgoing thread is in threaded group and has never
1141 * scheduled an upcall, decide whether this is a short
1142 * or long term event and thus whether or not to schedule
1144 * If it is a short term event, just suspend it in
1145 * a way that takes its KSE with it.
1146 * Select the events for which we want to schedule upcalls.
1147 * For now it's just sleep or if thread is suspended but
1148 * process wide suspending flag is not set (debugger
1150 * XXXKSE eventually almost any inhibition could do.
1152 if (TD_CAN_UNBIND(td) && (td->td_standin) &&
1153 (TD_ON_SLEEPQ(td) || (TD_IS_SUSPENDED(td) &&
1154 !P_SHOULDSTOP(td->td_proc)))) {
1156 * Release ownership of upcall, and schedule an upcall
1157 * thread, this new upcall thread becomes the owner of
1158 * the upcall structure. It will be ahead of us in the
1159 * run queue, so as we are stopping, it should either
1160 * start up immediatly, or at least before us if
1161 * we release our slot.
1164 ku->ku_owner = NULL;
1165 td->td_upcall = NULL;
1166 td->td_pflags &= ~TDP_CAN_UNBIND;
1167 td2 = thread_schedule_upcall(td, ku);
1168 if (flags & SW_INVOL || nextthread) {
1169 setrunqueue(td2, SRQ_YIELDING);
1171 /* Keep up with reality.. we have one extra thread
1172 * in the picture.. and it's 'running'.
1177 return (nextthread);
1181 * Setup done on the thread when it enters the kernel.
1184 thread_user_enter(struct thread *td)
1186 struct proc *p = td->td_proc;
1188 struct kse_upcall *ku;
1189 struct kse_thr_mailbox *tmbx;
1193 * First check that we shouldn't just abort. we
1194 * can suspend it here or just exit.
1196 if (__predict_false(P_SHOULDSTOP(p))) {
1198 thread_suspend_check(0);
1202 if (!(td->td_pflags & TDP_SA))
1206 * If we are doing a syscall in a KSE environment,
1207 * note where our mailbox is.
1213 KASSERT(ku != NULL, ("no upcall owned"));
1214 KASSERT(ku->ku_owner == td, ("wrong owner"));
1215 KASSERT(!TD_CAN_UNBIND(td), ("can unbind"));
1217 if (td->td_standin == NULL)
1218 thread_alloc_spare(td);
1219 ku->ku_mflags = fuword32((void *)&ku->ku_mailbox->km_flags);
1220 tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
1221 if ((tmbx == NULL) || (tmbx == (void *)-1L) ||
1222 (ku->ku_mflags & KMF_NOUPCALL)) {
1223 td->td_mailbox = NULL;
1225 flags = fuword32(&tmbx->tm_flags);
1227 * On some architectures, TP register points to thread
1228 * mailbox but not points to kse mailbox, and userland
1229 * can not atomically clear km_curthread, but can
1230 * use TP register, and set TMF_NOUPCALL in thread
1231 * flag to indicate a critical region.
1233 if (flags & TMF_NOUPCALL) {
1234 td->td_mailbox = NULL;
1236 td->td_mailbox = tmbx;
1237 td->td_pflags |= TDP_CAN_UNBIND;
1238 if (__predict_false(p->p_flag & P_TRACED)) {
1239 flags = fuword32(&tmbx->tm_dflags);
1240 if (flags & TMDF_SUSPEND) {
1241 mtx_lock_spin(&sched_lock);
1242 /* fuword can block, check again */
1244 ku->ku_flags |= KUF_DOUPCALL;
1245 mtx_unlock_spin(&sched_lock);
1253 * The extra work we go through if we are a threaded process when we
1254 * return to userland.
1256 * If we are a KSE process and returning to user mode, check for
1257 * extra work to do before we return (e.g. for more syscalls
1258 * to complete first). If we were in a critical section, we should
1259 * just return to let it finish. Same if we were in the UTS (in
1260 * which case the mailbox's context's busy indicator will be set).
1261 * The only traps we suport will have set the mailbox.
1262 * We will clear it here.
1265 thread_userret(struct thread *td, struct trapframe *frame)
1267 struct kse_upcall *ku;
1268 struct ksegrp *kg, *kg2;
1271 int error = 0, upcalls, uts_crit;
1273 /* Nothing to do with bound thread */
1274 if (!(td->td_pflags & TDP_SA))
1278 * Update stat clock count for userland
1280 if (td->td_mailbox != NULL) {
1281 thread_update_usr_ticks(td);
1293 * This thread has not started any upcall.
1294 * If there is no work to report other than ourself,
1295 * then it can return direct to userland.
1297 if (TD_CAN_UNBIND(td)) {
1298 td->td_pflags &= ~TDP_CAN_UNBIND;
1299 if ((td->td_flags & TDF_NEEDSIGCHK) == 0 &&
1300 (kg->kg_completed == NULL) &&
1301 (ku->ku_flags & KUF_DOUPCALL) == 0 &&
1302 (kg->kg_upquantum && ticks < kg->kg_nextupcall)) {
1304 error = copyout(&ts,
1305 (caddr_t)&ku->ku_mailbox->km_timeofday,
1313 thread_export_context(td, 0);
1315 * There is something to report, and we own an upcall
1316 * structure, we can go to userland.
1317 * Turn ourself into an upcall thread.
1319 td->td_pflags |= TDP_UPCALLING;
1320 } else if (td->td_mailbox && (ku == NULL)) {
1321 thread_export_context(td, 1);
1323 if (kg->kg_upsleeps)
1324 wakeup(&kg->kg_completed);
1325 WITNESS_WARN(WARN_PANIC, &p->p_mtx.mtx_object,
1326 "thread exiting in userret");
1327 mtx_lock_spin(&sched_lock);
1333 KASSERT(ku != NULL, ("upcall is NULL"));
1334 KASSERT(TD_CAN_UNBIND(td) == 0, ("can unbind"));
1336 if (p->p_numthreads > max_threads_per_proc) {
1339 mtx_lock_spin(&sched_lock);
1341 while (p->p_numthreads > max_threads_per_proc) {
1343 FOREACH_KSEGRP_IN_PROC(p, kg2) {
1344 if (kg2->kg_numupcalls == 0)
1347 upcalls += kg2->kg_numupcalls;
1349 if (upcalls >= max_threads_per_proc)
1351 mtx_unlock_spin(&sched_lock);
1352 if (msleep(&p->p_numthreads, &p->p_mtx, PPAUSE|PCATCH,
1353 "maxthreads", hz/10) != EWOULDBLOCK) {
1354 mtx_lock_spin(&sched_lock);
1357 mtx_lock_spin(&sched_lock);
1361 mtx_unlock_spin(&sched_lock);
1365 if (td->td_pflags & TDP_UPCALLING) {
1367 kg->kg_nextupcall = ticks + kg->kg_upquantum;
1369 * There is no more work to do and we are going to ride
1370 * this thread up to userland as an upcall.
1371 * Do the last parts of the setup needed for the upcall.
1373 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1374 td, td->td_proc->p_pid, td->td_proc->p_comm);
1376 td->td_pflags &= ~TDP_UPCALLING;
1377 if (ku->ku_flags & KUF_DOUPCALL) {
1378 mtx_lock_spin(&sched_lock);
1379 ku->ku_flags &= ~KUF_DOUPCALL;
1380 mtx_unlock_spin(&sched_lock);
1383 * Set user context to the UTS
1385 if (!(ku->ku_mflags & KMF_NOUPCALL)) {
1386 cpu_set_upcall_kse(td, ku->ku_func, ku->ku_mailbox,
1389 if (p->p_flag & P_TRACED) {
1391 ptrace_clear_single_step(td);
1395 error = suword32(&ku->ku_mailbox->km_lwp,
1399 error = suword(&ku->ku_mailbox->km_curthread, 0);
1405 * Unhook the list of completed threads.
1406 * anything that completes after this gets to
1407 * come in next time.
1408 * Put the list of completed thread mailboxes on
1409 * this KSE's mailbox.
1411 if (!(ku->ku_mflags & KMF_NOCOMPLETED) &&
1412 (error = thread_link_mboxes(kg, ku)) != 0)
1417 error = copyout(&ts, &ku->ku_mailbox->km_timeofday, sizeof(ts));
1423 * Things are going to be so screwed we should just kill
1425 * how do we do that?
1428 psignal(p, SIGSEGV);
1433 * Ensure that we have a spare thread available,
1434 * for when we re-enter the kernel.
1436 if (td->td_standin == NULL)
1437 thread_alloc_spare(td);
1441 td->td_mailbox = NULL;
1443 return (error); /* go sync */
1447 * called after ptrace resumed a process, force all
1448 * virtual CPUs to schedule upcall for SA process,
1449 * because debugger may have changed something in userland,
1450 * we should notice UTS as soon as possible.
1453 thread_continued(struct proc *p)
1456 struct kse_upcall *ku;
1459 PROC_LOCK_ASSERT(p, MA_OWNED);
1460 KASSERT(P_SHOULDSTOP(p), ("process not stopped"));
1462 if (!(p->p_flag & P_SA))
1465 if (p->p_flag & P_TRACED) {
1466 FOREACH_KSEGRP_IN_PROC(p, kg) {
1467 td = TAILQ_FIRST(&kg->kg_threads);
1470 /* not a SA group, nothing to do */
1471 if (!(td->td_pflags & TDP_SA))
1473 FOREACH_UPCALL_IN_GROUP(kg, ku) {
1474 mtx_lock_spin(&sched_lock);
1475 ku->ku_flags |= KUF_DOUPCALL;
1476 mtx_unlock_spin(&sched_lock);
1477 wakeup(&kg->kg_completed);