4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
26 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
27 * Use is subject to license terms.
31 * Copyright (c) 2011, Joyent, Inc. All rights reserved.
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/types.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
40 #include <sys/dtrace_impl.h>
41 #include <sys/dtrace_bsd.h>
42 #include <machine/clock.h>
43 #include <machine/cpufunc.h>
44 #include <machine/frame.h>
45 #include <machine/md_var.h>
46 #include <machine/psl.h>
47 #include <machine/trap.h>
50 extern void dtrace_getnanotime(struct timespec *tsp);
51 extern int (*dtrace_invop_jump_addr)(struct trapframe *);
53 int dtrace_invop(uintptr_t, struct trapframe *, uintptr_t);
54 int dtrace_invop_start(struct trapframe *frame);
55 void dtrace_invop_init(void);
56 void dtrace_invop_uninit(void);
58 typedef struct dtrace_invop_hdlr {
59 int (*dtih_func)(uintptr_t, struct trapframe *, uintptr_t);
60 struct dtrace_invop_hdlr *dtih_next;
61 } dtrace_invop_hdlr_t;
63 dtrace_invop_hdlr_t *dtrace_invop_hdlr;
66 dtrace_invop(uintptr_t addr, struct trapframe *frame, uintptr_t eax)
68 dtrace_invop_hdlr_t *hdlr;
71 for (hdlr = dtrace_invop_hdlr; hdlr != NULL; hdlr = hdlr->dtih_next)
72 if ((rval = hdlr->dtih_func(addr, frame, eax)) != 0)
79 dtrace_invop_add(int (*func)(uintptr_t, struct trapframe *, uintptr_t))
81 dtrace_invop_hdlr_t *hdlr;
83 hdlr = kmem_alloc(sizeof (dtrace_invop_hdlr_t), KM_SLEEP);
84 hdlr->dtih_func = func;
85 hdlr->dtih_next = dtrace_invop_hdlr;
86 dtrace_invop_hdlr = hdlr;
90 dtrace_invop_remove(int (*func)(uintptr_t, struct trapframe *, uintptr_t))
92 dtrace_invop_hdlr_t *hdlr = dtrace_invop_hdlr, *prev = NULL;
96 panic("attempt to remove non-existent invop handler");
98 if (hdlr->dtih_func == func)
102 hdlr = hdlr->dtih_next;
106 ASSERT(dtrace_invop_hdlr == hdlr);
107 dtrace_invop_hdlr = hdlr->dtih_next;
109 ASSERT(dtrace_invop_hdlr != hdlr);
110 prev->dtih_next = hdlr->dtih_next;
117 dtrace_invop_init(void)
120 dtrace_invop_jump_addr = dtrace_invop_start;
124 dtrace_invop_uninit(void)
127 dtrace_invop_jump_addr = NULL;
132 dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit))
134 (*func)(0, la57 ? (uintptr_t)addr_P5Tmap : (uintptr_t)addr_P4Tmap);
138 dtrace_xcall(processorid_t cpu, dtrace_xcall_t func, void *arg)
142 if (cpu == DTRACE_CPUALL)
145 CPU_SETOF(cpu, &cpus);
147 smp_rendezvous_cpus(cpus, smp_no_rendezvous_barrier, func,
148 smp_no_rendezvous_barrier, arg);
152 dtrace_sync_func(void)
159 dtrace_xcall(DTRACE_CPUALL, (dtrace_xcall_t)dtrace_sync_func, NULL);
164 dtrace_safe_synchronous_signal(void)
166 kthread_t *t = curthread;
167 struct regs *rp = lwptoregs(ttolwp(t));
168 size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
170 ASSERT(t->t_dtrace_on);
173 * If we're not in the range of scratch addresses, we're not actually
174 * tracing user instructions so turn off the flags. If the instruction
175 * we copied out caused a synchonous trap, reset the pc back to its
176 * original value and turn off the flags.
178 if (rp->r_pc < t->t_dtrace_scrpc ||
179 rp->r_pc > t->t_dtrace_astpc + isz) {
181 } else if (rp->r_pc == t->t_dtrace_scrpc ||
182 rp->r_pc == t->t_dtrace_astpc) {
183 rp->r_pc = t->t_dtrace_pc;
189 dtrace_safe_defer_signal(void)
191 kthread_t *t = curthread;
192 struct regs *rp = lwptoregs(ttolwp(t));
193 size_t isz = t->t_dtrace_npc - t->t_dtrace_pc;
195 ASSERT(t->t_dtrace_on);
198 * If we're not in the range of scratch addresses, we're not actually
199 * tracing user instructions so turn off the flags.
201 if (rp->r_pc < t->t_dtrace_scrpc ||
202 rp->r_pc > t->t_dtrace_astpc + isz) {
208 * If we have executed the original instruction, but we have performed
209 * neither the jmp back to t->t_dtrace_npc nor the clean up of any
210 * registers used to emulate %rip-relative instructions in 64-bit mode,
211 * we'll save ourselves some effort by doing that here and taking the
212 * signal right away. We detect this condition by seeing if the program
213 * counter is the range [scrpc + isz, astpc).
215 if (rp->r_pc >= t->t_dtrace_scrpc + isz &&
216 rp->r_pc < t->t_dtrace_astpc) {
219 * If there is a scratch register and we're on the
220 * instruction immediately after the modified instruction,
221 * restore the value of that scratch register.
223 if (t->t_dtrace_reg != 0 &&
224 rp->r_pc == t->t_dtrace_scrpc + isz) {
225 switch (t->t_dtrace_reg) {
227 rp->r_rax = t->t_dtrace_regv;
230 rp->r_rcx = t->t_dtrace_regv;
233 rp->r_r8 = t->t_dtrace_regv;
236 rp->r_r9 = t->t_dtrace_regv;
241 rp->r_pc = t->t_dtrace_npc;
247 * Otherwise, make sure we'll return to the kernel after executing
248 * the copied out instruction and defer the signal.
250 if (!t->t_dtrace_step) {
251 ASSERT(rp->r_pc < t->t_dtrace_astpc);
252 rp->r_pc += t->t_dtrace_astpc - t->t_dtrace_scrpc;
253 t->t_dtrace_step = 1;
262 static int64_t tgt_cpu_tsc;
263 static int64_t hst_cpu_tsc;
264 static int64_t tsc_skew[MAXCPU];
265 static uint64_t nsec_scale;
267 /* See below for the explanation of this macro. */
268 #define SCALE_SHIFT 28
271 dtrace_gethrtime_init_cpu(void *arg)
273 uintptr_t cpu = (uintptr_t) arg;
276 tgt_cpu_tsc = rdtsc();
278 hst_cpu_tsc = rdtsc();
281 #ifdef EARLY_AP_STARTUP
283 dtrace_gethrtime_init(void *arg)
291 * Get the frequency and scale factor as early as possible so that they can be
292 * used for boot-time tracing.
295 dtrace_gethrtime_init_early(void *arg)
301 * Get TSC frequency known at this moment.
302 * This should be constant if TSC is invariant.
303 * Otherwise tick->time conversion will be inaccurate, but
304 * will preserve monotonic property of TSC.
306 tsc_f = atomic_load_acq_64(&tsc_freq);
309 * The following line checks that nsec_scale calculated below
310 * doesn't overflow 32-bit unsigned integer, so that it can multiply
311 * another 32-bit integer without overflowing 64-bit.
312 * Thus minimum supported TSC frequency is 62.5MHz.
314 KASSERT(tsc_f > (NANOSEC >> (32 - SCALE_SHIFT)),
315 ("TSC frequency is too low"));
318 * We scale up NANOSEC/tsc_f ratio to preserve as much precision
320 * 2^28 factor was chosen quite arbitrarily from practical
322 * - it supports TSC frequencies as low as 62.5MHz (see above);
323 * - it provides quite good precision (e < 0.01%) up to THz
324 * (terahertz) values;
326 nsec_scale = ((uint64_t)NANOSEC << SCALE_SHIFT) / tsc_f;
327 #ifndef EARLY_AP_STARTUP
329 SYSINIT(dtrace_gethrtime_init_early, SI_SUB_CPU, SI_ORDER_ANY,
330 dtrace_gethrtime_init_early, NULL);
333 dtrace_gethrtime_init(void *arg)
340 if (vm_guest != VM_GUEST_NO)
343 /* The current CPU is the reference one. */
345 tsc_skew[curcpu] = 0;
351 CPU_SETOF(PCPU_GET(cpuid), &map);
352 CPU_SET(pc->pc_cpuid, &map);
354 smp_rendezvous_cpus(map, NULL,
355 dtrace_gethrtime_init_cpu,
356 smp_no_rendezvous_barrier, (void *)(uintptr_t) i);
358 tsc_skew[i] = tgt_cpu_tsc - hst_cpu_tsc;
362 #ifdef EARLY_AP_STARTUP
363 SYSINIT(dtrace_gethrtime_init, SI_SUB_DTRACE, SI_ORDER_ANY,
364 dtrace_gethrtime_init, NULL);
366 SYSINIT(dtrace_gethrtime_init, SI_SUB_SMP, SI_ORDER_ANY, dtrace_gethrtime_init,
371 * DTrace needs a high resolution time function which can
372 * be called from a probe context and guaranteed not to have
373 * instrumented with probes itself.
375 * Returns nanoseconds since boot.
378 dtrace_gethrtime(void)
385 * We split TSC value into lower and higher 32-bit halves and separately
386 * scale them with nsec_scale, then we scale them down by 2^28
387 * (see nsec_scale calculations) taking into account 32-bit shift of
388 * the higher half and finally add.
390 rflags = intr_disable();
391 tsc = rdtsc() - tsc_skew[curcpu];
392 intr_restore(rflags);
396 return (((lo * nsec_scale) >> SCALE_SHIFT) +
397 ((hi * nsec_scale) << (32 - SCALE_SHIFT)));
401 dtrace_gethrestime(void)
403 struct timespec current_time;
405 dtrace_getnanotime(¤t_time);
407 return (current_time.tv_sec * 1000000000ULL + current_time.tv_nsec);
410 /* Function to handle DTrace traps during probes. See amd64/amd64/trap.c. */
412 dtrace_trap(struct trapframe *frame, u_int type)
417 * A trap can occur while DTrace executes a probe. Before
418 * executing the probe, DTrace blocks re-scheduling and sets
419 * a flag in its per-cpu flags to indicate that it doesn't
420 * want to fault. On returning from the probe, the no-fault
421 * flag is cleared and finally re-scheduling is enabled.
423 * Check if DTrace has enabled 'no-fault' mode:
426 nofault = cpu_core[curcpu].cpuc_dtrace_flags & CPU_DTRACE_NOFAULT;
429 KASSERT((read_rflags() & PSL_I) == 0, ("interrupts enabled"));
432 * There are only a couple of trap types that are expected.
433 * All the rest will be handled in the usual way.
436 /* General protection fault. */
438 /* Flag an illegal operation. */
439 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
442 * Offset the instruction pointer to the instruction
443 * following the one causing the fault.
445 frame->tf_rip += dtrace_instr_size((u_char *) frame->tf_rip);
449 /* Flag a bad address. */
450 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
451 cpu_core[curcpu].cpuc_dtrace_illval = frame->tf_addr;
454 * Offset the instruction pointer to the instruction
455 * following the one causing the fault.
457 frame->tf_rip += dtrace_instr_size((u_char *) frame->tf_rip);
460 /* Handle all other traps in the usual way. */
465 /* Handle the trap in the usual way. */