4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
26 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
27 * Use is subject to license terms.
31 * Copyright (c) 2011, Joyent, Inc. All rights reserved.
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/types.h>
37 #include <sys/cpuset.h>
38 #include <sys/kernel.h>
39 #include <sys/malloc.h>
42 #include <sys/dtrace_impl.h>
43 #include <sys/dtrace_bsd.h>
44 #include <machine/clock.h>
45 #include <machine/cpufunc.h>
46 #include <machine/frame.h>
47 #include <machine/psl.h>
50 extern uintptr_t kernelbase;
51 extern uintptr_t dtrace_in_probe_addr;
52 extern int dtrace_in_probe;
54 extern void dtrace_getnanotime(struct timespec *tsp);
56 int dtrace_invop(uintptr_t, uintptr_t *, uintptr_t);
58 typedef struct dtrace_invop_hdlr {
59 int (*dtih_func)(uintptr_t, uintptr_t *, uintptr_t);
60 struct dtrace_invop_hdlr *dtih_next;
61 } dtrace_invop_hdlr_t;
63 dtrace_invop_hdlr_t *dtrace_invop_hdlr;
66 dtrace_invop(uintptr_t addr, uintptr_t *stack, uintptr_t eax)
68 dtrace_invop_hdlr_t *hdlr;
71 for (hdlr = dtrace_invop_hdlr; hdlr != NULL; hdlr = hdlr->dtih_next)
72 if ((rval = hdlr->dtih_func(addr, stack, eax)) != 0)
79 dtrace_invop_add(int (*func)(uintptr_t, uintptr_t *, uintptr_t))
81 dtrace_invop_hdlr_t *hdlr;
83 hdlr = kmem_alloc(sizeof (dtrace_invop_hdlr_t), KM_SLEEP);
84 hdlr->dtih_func = func;
85 hdlr->dtih_next = dtrace_invop_hdlr;
86 dtrace_invop_hdlr = hdlr;
90 dtrace_invop_remove(int (*func)(uintptr_t, uintptr_t *, uintptr_t))
92 dtrace_invop_hdlr_t *hdlr = dtrace_invop_hdlr, *prev = NULL;
96 panic("attempt to remove non-existent invop handler");
98 if (hdlr->dtih_func == func)
102 hdlr = hdlr->dtih_next;
106 ASSERT(dtrace_invop_hdlr == hdlr);
107 dtrace_invop_hdlr = hdlr->dtih_next;
109 ASSERT(dtrace_invop_hdlr != hdlr);
110 prev->dtih_next = hdlr->dtih_next;
117 dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
119 (*func)(0, kernelbase);
123 dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
127 if (cpu == DTRACE_CPUALL)
130 CPU_SETOF(cpu, &cpus);
132 smp_rendezvous_cpus(cpus, smp_no_rendevous_barrier, func,
133 smp_no_rendevous_barrier, arg);
137 dtrace_sync_func(void)
144 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
148 int (*dtrace_fasttrap_probe_ptr)(struct regs *);
149 int (*dtrace_pid_probe_ptr)(struct regs *);
150 int (*dtrace_return_probe_ptr)(struct regs *);
153 dtrace_user_probe(struct regs *rp, caddr_t addr, processorid_t cpuid)
157 extern void trap(struct regs *, caddr_t, processorid_t);
159 if (USERMODE(rp->r_cs) || (rp->r_ps & PS_VM)) {
160 if (curthread->t_cred != p->p_cred) {
161 cred_t *oldcred = curthread->t_cred;
163 * DTrace accesses t_cred in probe context. t_cred
164 * must always be either NULL, or point to a valid,
165 * allocated cred structure.
167 curthread->t_cred = crgetcred();
172 if (rp->r_trapno == T_DTRACE_RET) {
173 uint8_t step = curthread->t_dtrace_step;
174 uint8_t ret = curthread->t_dtrace_ret;
175 uintptr_t npc = curthread->t_dtrace_npc;
177 if (curthread->t_dtrace_ast) {
179 curthread->t_sig_check = 1;
183 * Clear all user tracing flags.
185 curthread->t_dtrace_ft = 0;
188 * If we weren't expecting to take a return probe trap, kill
189 * the process as though it had just executed an unassigned
193 tsignal(curthread, SIGILL);
198 * If we hit this trap unrelated to a return probe, we're
199 * just here to reset the AST flag since we deferred a signal
200 * until after we logically single-stepped the instruction we
209 * We need to wait until after we've called the
210 * dtrace_return_probe_ptr function pointer to set %pc.
212 rwp = &CPU->cpu_ft_lock;
213 rw_enter(rwp, RW_READER);
214 if (dtrace_return_probe_ptr != NULL)
215 (void) (*dtrace_return_probe_ptr)(rp);
219 } else if (rp->r_trapno == T_DTRACE_PROBE) {
220 rwp = &CPU->cpu_ft_lock;
221 rw_enter(rwp, RW_READER);
222 if (dtrace_fasttrap_probe_ptr != NULL)
223 (void) (*dtrace_fasttrap_probe_ptr)(rp);
226 } else if (rp->r_trapno == T_BPTFLT) {
228 rwp = &CPU->cpu_ft_lock;
231 * The DTrace fasttrap provider uses the breakpoint trap
232 * (int 3). We let DTrace take the first crack at handling
233 * this trap; if it's not a probe that DTrace knowns about,
234 * we call into the trap() routine to handle it like a
235 * breakpoint placed by a conventional debugger.
237 rw_enter(rwp, RW_READER);
238 if (dtrace_pid_probe_ptr != NULL &&
239 (*dtrace_pid_probe_ptr)(rp) == 0) {
246 * If the instruction that caused the breakpoint trap doesn't
247 * look like an int 3 anymore, it may be that this tracepoint
248 * was removed just after the user thread executed it. In
249 * that case, return to user land to retry the instuction.
251 if (fuword8((void *)(rp->r_pc - 1), &instr) == 0 &&
252 instr != FASTTRAP_INSTR) {
257 trap(rp, addr, cpuid);
260 trap(rp, addr, cpuid);
265 dtrace_safe_synchronous_signal(void)
267 kthread_t *t = curthread;
268 struct regs *rp = lwptoregs(ttolwp(t));
269 size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
271 ASSERT(t->t_dtrace_on);
274 * If we're not in the range of scratch addresses, we're not actually
275 * tracing user instructions so turn off the flags. If the instruction
276 * we copied out caused a synchonous trap, reset the pc back to its
277 * original value and turn off the flags.
279 if (rp->r_pc < t->t_dtrace_scrpc ||
280 rp->r_pc > t->t_dtrace_astpc + isz) {
282 } else if (rp->r_pc == t->t_dtrace_scrpc ||
283 rp->r_pc == t->t_dtrace_astpc) {
284 rp->r_pc = t->t_dtrace_pc;
290 dtrace_safe_defer_signal(void)
292 kthread_t *t = curthread;
293 struct regs *rp = lwptoregs(ttolwp(t));
294 size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
296 ASSERT(t->t_dtrace_on);
299 * If we're not in the range of scratch addresses, we're not actually
300 * tracing user instructions so turn off the flags.
302 if (rp->r_pc < t->t_dtrace_scrpc ||
303 rp->r_pc > t->t_dtrace_astpc + isz) {
309 * If we have executed the original instruction, but we have performed
310 * neither the jmp back to t->t_dtrace_npc nor the clean up of any
311 * registers used to emulate %rip-relative instructions in 64-bit mode,
312 * we'll save ourselves some effort by doing that here and taking the
313 * signal right away. We detect this condition by seeing if the program
314 * counter is the range [scrpc + isz, astpc).
316 if (rp->r_pc >= t->t_dtrace_scrpc + isz &&
317 rp->r_pc < t->t_dtrace_astpc) {
320 * If there is a scratch register and we're on the
321 * instruction immediately after the modified instruction,
322 * restore the value of that scratch register.
324 if (t->t_dtrace_reg != 0 &&
325 rp->r_pc == t->t_dtrace_scrpc + isz) {
326 switch (t->t_dtrace_reg) {
328 rp->r_rax = t->t_dtrace_regv;
331 rp->r_rcx = t->t_dtrace_regv;
334 rp->r_r8 = t->t_dtrace_regv;
337 rp->r_r9 = t->t_dtrace_regv;
342 rp->r_pc = t->t_dtrace_npc;
348 * Otherwise, make sure we'll return to the kernel after executing
349 * the copied out instruction and defer the signal.
351 if (!t->t_dtrace_step) {
352 ASSERT(rp->r_pc < t->t_dtrace_astpc);
353 rp->r_pc += t->t_dtrace_astpc - t->t_dtrace_scrpc;
354 t->t_dtrace_step = 1;
363 static int64_t tgt_cpu_tsc;
364 static int64_t hst_cpu_tsc;
365 static int64_t tsc_skew[MAXCPU];
366 static uint64_t nsec_scale;
368 /* See below for the explanation of this macro. */
369 #define SCALE_SHIFT 28
372 dtrace_gethrtime_init_cpu(void *arg)
374 uintptr_t cpu = (uintptr_t) arg;
377 tgt_cpu_tsc = rdtsc();
379 hst_cpu_tsc = rdtsc();
383 dtrace_gethrtime_init(void *arg)
391 * Get TSC frequency known at this moment.
392 * This should be constant if TSC is invariant.
393 * Otherwise tick->time conversion will be inaccurate, but
394 * will preserve monotonic property of TSC.
396 tsc_f = atomic_load_acq_64(&tsc_freq);
399 * The following line checks that nsec_scale calculated below
400 * doesn't overflow 32-bit unsigned integer, so that it can multiply
401 * another 32-bit integer without overflowing 64-bit.
402 * Thus minimum supported TSC frequency is 62.5MHz.
404 KASSERT(tsc_f > (NANOSEC >> (32 - SCALE_SHIFT)), ("TSC frequency is too low"));
407 * We scale up NANOSEC/tsc_f ratio to preserve as much precision
409 * 2^28 factor was chosen quite arbitrarily from practical
411 * - it supports TSC frequencies as low as 62.5MHz (see above);
412 * - it provides quite good precision (e < 0.01%) up to THz
413 * (terahertz) values;
415 nsec_scale = ((uint64_t)NANOSEC << SCALE_SHIFT) / tsc_f;
417 /* The current CPU is the reference one. */
419 tsc_skew[curcpu] = 0;
425 CPU_SETOF(PCPU_GET(cpuid), &map);
426 CPU_SET(pc->pc_cpuid, &map);
428 smp_rendezvous_cpus(map, NULL,
429 dtrace_gethrtime_init_cpu,
430 smp_no_rendevous_barrier, (void *)(uintptr_t) i);
432 tsc_skew[i] = tgt_cpu_tsc - hst_cpu_tsc;
437 SYSINIT(dtrace_gethrtime_init, SI_SUB_SMP, SI_ORDER_ANY, dtrace_gethrtime_init, NULL);
440 * DTrace needs a high resolution time function which can
441 * be called from a probe context and guaranteed not to have
442 * instrumented with probes itself.
444 * Returns nanoseconds since boot.
454 * We split TSC value into lower and higher 32-bit halves and separately
455 * scale them with nsec_scale, then we scale them down by 2^28
456 * (see nsec_scale calculations) taking into account 32-bit shift of
457 * the higher half and finally add.
459 tsc = rdtsc() - tsc_skew[curcpu];
462 return (((lo * nsec_scale) >> SCALE_SHIFT) +
463 ((hi * nsec_scale) << (32 - SCALE_SHIFT)));
467 dtrace_gethrestime(void)
469 struct timespec current_time;
471 dtrace_getnanotime(¤t_time);
473 return (current_time.tv_sec * 1000000000ULL + current_time.tv_nsec);
476 /* Function to handle DTrace traps during probes. See i386/i386/trap.c */
478 dtrace_trap(struct trapframe *frame, u_int type)
483 * A trap can occur while DTrace executes a probe. Before
484 * executing the probe, DTrace blocks re-scheduling and sets
485 * a flag in it's per-cpu flags to indicate that it doesn't
486 * want to fault. On returning from the probe, the no-fault
487 * flag is cleared and finally re-scheduling is enabled.
489 * Check if DTrace has enabled 'no-fault' mode:
493 nofault = cpu_core[curcpu].cpuc_dtrace_flags & CPU_DTRACE_NOFAULT;
496 KASSERT((read_eflags() & PSL_I) == 0, ("interrupts enabled"));
499 * There are only a couple of trap types that are expected.
500 * All the rest will be handled in the usual way.
503 /* General protection fault. */
505 /* Flag an illegal operation. */
506 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
509 * Offset the instruction pointer to the instruction
510 * following the one causing the fault.
512 frame->tf_eip += dtrace_instr_size((u_char *) frame->tf_eip);
516 /* Flag a bad address. */
517 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
518 cpu_core[curcpu].cpuc_dtrace_illval = rcr2();
521 * Offset the instruction pointer to the instruction
522 * following the one causing the fault.
524 frame->tf_eip += dtrace_instr_size((u_char *) frame->tf_eip);
527 /* Handle all other traps in the usual way. */
532 /* Handle the trap in the usual way. */