2 * Copyright (C) 1994, David Greenman
3 * Copyright (c) 1990, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * the University of Utah, and William Jolitz.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by the University of
20 * California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 * may be used to endorse or promote products derived from this software
23 * without specific prior written permission.
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91
45 #include <sys/param.h>
47 #include <sys/kernel.h>
49 #include <sys/mutex.h>
51 #include <sys/resourcevar.h>
52 #include <sys/signalvar.h>
53 #include <sys/systm.h>
54 #include <sys/vmmeter.h>
55 #include <machine/cpu.h>
56 #include <machine/pcb.h>
59 * Define the code needed before returning to user mode, for
65 userret(td, frame, oticks)
67 struct trapframe *frame;
70 struct proc *p = td->td_proc;
71 struct kse *ke = td->td_kse;
72 struct ksegrp *kg = td->td_ksegrp;
77 while ((sig = CURSIG(p)) != 0)
82 mtx_lock_spin(&sched_lock);
83 td->td_priority = kg->kg_user_pri;
84 if (ke->ke_flags & KEF_NEEDRESCHED) {
87 p->p_stats->p_ru.ru_nivcsw++;
89 mtx_unlock_spin(&sched_lock);
93 while ((sig = CURSIG(p)) != 0)
97 mtx_lock_spin(&sched_lock);
101 * Charge system time if profiling.
103 if (p->p_sflag & PS_PROFIL) {
106 ticks = ke->ke_sticks - oticks;
107 mtx_unlock_spin(&sched_lock);
108 addupc_task(ke, TRAPF_PC(frame), (u_int)ticks * psratio);
110 mtx_unlock_spin(&sched_lock);
114 * Process an asynchronous software trap.
115 * This is relatively easy.
116 * This function will return with preemption disabled.
120 struct trapframe *framep;
122 struct thread *td = curthread;
123 struct proc *p = td->td_proc;
124 struct kse *ke = td->td_kse;
125 u_int prticks, sticks;
129 #if defined(DEV_NPX) && !defined(SMP)
133 KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode"));
135 if (witness_list(td))
136 panic("Returning to user mode with mutex(s) held");
138 mtx_assert(&Giant, MA_NOTOWNED);
139 prticks = 0; /* XXX: Quiet warning. */
140 s = cpu_critical_enter();
141 while ((ke->ke_flags & (KEF_ASTPENDING | KEF_NEEDRESCHED)) != 0) {
142 cpu_critical_exit(s);
143 td->td_frame = framep;
145 * This updates the p_sflag's for the checks below in one
146 * "atomic" operation with turning off the astpending flag.
147 * If another AST is triggered while we are handling the
148 * AST's saved in sflag, the astpending flag will be set and
149 * we will loop again.
151 mtx_lock_spin(&sched_lock);
152 sticks = ke->ke_sticks;
154 flags = ke->ke_flags;
155 p->p_sflag &= ~(PS_PROFPEND | PS_ALRMPEND);
156 ke->ke_flags &= ~(KEF_OWEUPC | KEF_ASTPENDING);
158 if (flags & KEF_OWEUPC && sflag & PS_PROFIL) {
159 prticks = p->p_stats->p_prof.pr_ticks;
160 p->p_stats->p_prof.pr_ticks = 0;
162 mtx_unlock_spin(&sched_lock);
166 * As a diagnostic tool we make sure that td->td_ucred
167 * is NULL while we are in user space. This is
168 * because theoreticaly this field is only defined
169 * while the thread is in the kernel. Making it NULL
170 * will immediatly trap invalid usage of this field.
171 * In practice however we keep the reference to the ucred
172 * because it's almost always going to be the same cred we will
173 * need at the next syscall, and it can be expensive
174 * to keep dropping and reacquiring the reference.
175 * We thus stash it away elsewhere until we return
176 * to the kernel, where we bring it back. If
177 * DIAGNOSTIC is not defined we don't bother with
178 * making it NULL, and just leave it in place.
179 * (don't remove this comment without removing the pointers
180 * to it in sys/proc.h, trap.c, kern/kern_fork.c and here.)
183 panic("ast:thread got a cred before reaching AST");
184 td->td_ucred = td->td_ucred_cache;
185 td->td_ucred_cache = NULL;
186 #endif /* DIAGNOSTIC */
187 if (td->td_ucred != p->p_ucred)
188 cred_update_thread(td);
189 if (flags & KEF_OWEUPC && sflag & PS_PROFIL)
190 addupc_task(ke, p->p_stats->p_prof.pr_addr, prticks);
191 if (sflag & PS_ALRMPEND) {
193 psignal(p, SIGVTALRM);
196 #if defined(DEV_NPX) && !defined(SMP)
197 if (PCPU_GET(curpcb)->pcb_flags & PCB_NPXTRAP) {
198 atomic_clear_int(&PCPU_GET(curpcb)->pcb_flags,
202 trapsignal(p, SIGFPE, ucode);
206 if (sflag & PS_PROFPEND) {
212 userret(td, framep, sticks);
213 #ifdef DIAGNOSTIC /* see comment above */
214 if (td->td_ucred_cache)
215 panic("ast:thread already has cached ucred");
216 td->td_ucred_cache = td->td_ucred;
218 #endif /* DIAGNOSTIC */
220 s = cpu_critical_enter();
222 mtx_assert(&Giant, MA_NOTOWNED);
224 * We need to keep interrupts disabled so that if any further AST's
225 * come in, the interrupt they come in on will be delayed until we
226 * finish returning to userland. We assume that the return to userland
227 * will perform the equivalent of cpu_critical_exit().