4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
21 * Portions Copyright 2008 John Birrell <jb@freebsd.org>
27 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
28 * Use is subject to license terms.
33 #include <machine/asmacros.h>
34 #include <sys/cpuvar_defs.h>
35 #include <sys/dtrace.h>
41 movq TF_RDI(%rsp),%rdi; \
42 movq TF_RSI(%rsp),%rsi; \
43 movq TF_RDX(%rsp),%rdx; \
44 movq TF_RCX(%rsp),%rcx; \
45 movq TF_R8(%rsp),%r8; \
46 movq TF_R9(%rsp),%r9; \
47 movq TF_RAX(%rsp),%rax; \
48 movq TF_RBX(%rsp),%rbx; \
49 movq TF_RBP(%rsp),%rbp; \
50 movq TF_R10(%rsp),%r10; \
51 movq TF_R11(%rsp),%r11; \
52 movq TF_R12(%rsp),%r12; \
53 movq TF_R13(%rsp),%r13; \
54 movq TF_R14(%rsp),%r14; \
55 movq TF_R15(%rsp),%r15; \
56 testb $SEL_RPL_MASK,TF_CS(%rsp); \
64 .type calltrap,@function
65 ENTRY(dtrace_invop_start)
68 * #BP traps with %rip set to the next address. We need to decrement
69 * the value to indicate the address of the int3 (0xcc) instruction
70 * that we substituted.
72 movq TF_RIP(%rsp), %rdi
74 movq TF_RSP(%rsp), %rsi
75 movq TF_RAX(%rsp), %rdx
79 ALTENTRY(dtrace_invop_callsite)
81 cmpl $DTRACE_INVOP_PUSHL_EBP, %eax
83 cmpl $DTRACE_INVOP_LEAVE, %eax
85 cmpl $DTRACE_INVOP_NOP, %eax
87 cmpl $DTRACE_INVOP_RET, %eax
90 /* When all else fails handle the trap in the usual way. */
91 jmpq *dtrace_invop_calltrap_addr
95 * We must emulate a "pushq %rbp". To do this, we pull the stack
96 * down 8 bytes, and then store the base pointer.
99 subq $16, %rsp /* make room for %rbp */
100 pushq %rax /* push temp */
101 movq 24(%rsp), %rax /* load calling RIP */
102 movq %rax, 8(%rsp) /* store calling RIP */
103 movq 32(%rsp), %rax /* load calling CS */
104 movq %rax, 16(%rsp) /* store calling CS */
105 movq 40(%rsp), %rax /* load calling RFLAGS */
106 movq %rax, 24(%rsp) /* store calling RFLAGS */
107 movq 48(%rsp), %rax /* load calling RSP */
108 subq $8, %rax /* make room for %rbp */
109 movq %rax, 32(%rsp) /* store calling RSP */
110 movq 56(%rsp), %rax /* load calling SS */
111 movq %rax, 40(%rsp) /* store calling SS */
112 movq 32(%rsp), %rax /* reload calling RSP */
113 movq %rbp, (%rax) /* store %rbp there */
114 popq %rax /* pop off temp */
115 iretq /* return from interrupt */
120 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
121 * followed by a "popq %rbp". This is quite a bit simpler on amd64
122 * than it is on i386 -- we can exploit the fact that the %rsp is
123 * explicitly saved to effect the pop without having to reshuffle
124 * the other data pushed for the trap.
127 pushq %rax /* push temp */
128 movq 8(%rsp), %rax /* load calling RIP */
129 movq %rax, 8(%rsp) /* store calling RIP */
130 movq (%rbp), %rax /* get new %rbp */
131 addq $8, %rbp /* adjust new %rsp */
132 movq %rbp, 32(%rsp) /* store new %rsp */
133 movq %rax, %rbp /* set new %rbp */
134 popq %rax /* pop off temp */
135 iretq /* return from interrupt */
139 /* We must emulate a "nop". */
146 pushq %rax /* push temp */
147 movq 32(%rsp), %rax /* load %rsp */
148 movq (%rax), %rax /* load calling RIP */
149 movq %rax, 8(%rsp) /* store calling RIP */
150 addq $8, 32(%rsp) /* adjust new %rsp */
151 popq %rax /* pop off temp */
152 iretq /* return from interrupt */
155 END(dtrace_invop_start)
158 void dtrace_invop_init(void)
160 ENTRY(dtrace_invop_init)
161 movq $dtrace_invop_start, dtrace_invop_jump_addr(%rip)
163 END(dtrace_invop_init)
166 void dtrace_invop_uninit(void)
168 ENTRY(dtrace_invop_uninit)
169 movq $0, dtrace_invop_jump_addr(%rip)
171 END(dtrace_invop_uninit)
174 greg_t dtrace_getfp(void)
183 dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new)
188 cmpxchgl %edx, (%rdi)
194 dtrace_casptr(void *target, void *cmp, void *new)
199 cmpxchgq %rdx, (%rdi)
205 dtrace_caller(int aframes)
214 dtrace_copy(uintptr_t src, uintptr_t dest, size_t size)
220 xchgq %rdi, %rsi /* make %rsi source, %rdi dest */
221 movq %rdx, %rcx /* load count */
222 repz /* repeat for count ... */
223 smovb /* move from %ds:rsi to %ed:rdi */
230 dtrace_copystr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
231 volatile uint16_t *flags)
233 ENTRY(dtrace_copystr)
238 movb (%rdi), %al /* load from source */
239 movb %al, (%rsi) /* store to destination */
240 addq $1, %rdi /* increment source pointer */
241 addq $1, %rsi /* increment destination pointer */
242 subq $1, %rdx /* decrement remaining count */
245 testq $0xfff, %rdx /* test if count is 4k-aligned */
246 jnz 1f /* if not, continue with copying */
247 testq $CPU_DTRACE_BADADDR, (%rcx) /* load and test dtrace flags */
260 dtrace_fulword(void *addr)
262 ENTRY(dtrace_fulword)
269 dtrace_fuword8_nocheck(void *addr)
271 ENTRY(dtrace_fuword8_nocheck)
275 END(dtrace_fuword8_nocheck)
279 dtrace_fuword16_nocheck(void *addr)
281 ENTRY(dtrace_fuword16_nocheck)
285 END(dtrace_fuword16_nocheck)
289 dtrace_fuword32_nocheck(void *addr)
291 ENTRY(dtrace_fuword32_nocheck)
295 END(dtrace_fuword32_nocheck)
299 dtrace_fuword64_nocheck(void *addr)
301 ENTRY(dtrace_fuword64_nocheck)
304 END(dtrace_fuword64_nocheck)
308 dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which,
309 int fault, int fltoffs, uintptr_t illval)
311 ENTRY(dtrace_probe_error)
321 movl dtrace_probeid_error(%rip), %edi
326 END(dtrace_probe_error)
330 dtrace_membar_producer(void)
332 ENTRY(dtrace_membar_producer)
333 rep; ret /* use 2 byte return instruction when branch target */
334 /* AMD Software Optimization Guide - Section 6.2 */
335 END(dtrace_membar_producer)
339 dtrace_membar_consumer(void)
341 ENTRY(dtrace_membar_consumer)
342 rep; ret /* use 2 byte return instruction when branch target */
343 /* AMD Software Optimization Guide - Section 6.2 */
344 END(dtrace_membar_consumer)
348 dtrace_interrupt_disable(void)
350 ENTRY(dtrace_interrupt_disable)
355 END(dtrace_interrupt_disable)
359 dtrace_interrupt_enable(dtrace_icookie_t cookie)
361 ENTRY(dtrace_interrupt_enable)
365 END(dtrace_interrupt_enable)
368 * The panic() and cmn_err() functions invoke vpanic() as a common entry point
369 * into the panic code implemented in panicsys(). vpanic() is responsible
370 * for passing through the format string and arguments, and constructing a
371 * regs structure on the stack into which it saves the current register
372 * values. If we are not dying due to a fatal trap, these registers will
373 * then be preserved in panicbuf as the current processor state. Before
374 * invoking panicsys(), vpanic() activates the first panic trigger (see
375 * common/os/panic.c) and switches to the panic_stack if successful. Note that
376 * DTrace takes a slightly different panic path if it must panic from probe
377 * context. Instead of calling panic, it calls into dtrace_vpanic(), which
378 * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
379 * branches back into vpanic().
384 vpanic(const char *format, va_list alist)
386 ENTRY(vpanic) /* Initial stack layout: */
388 pushq %rbp /* | %rip | 0x60 */
389 movq %rsp, %rbp /* | %rbp | 0x58 */
390 pushfq /* | rfl | 0x50 */
391 pushq %r11 /* | %r11 | 0x48 */
392 pushq %r10 /* | %r10 | 0x40 */
393 pushq %rbx /* | %rbx | 0x38 */
394 pushq %rax /* | %rax | 0x30 */
395 pushq %r9 /* | %r9 | 0x28 */
396 pushq %r8 /* | %r8 | 0x20 */
397 pushq %rcx /* | %rcx | 0x18 */
398 pushq %rdx /* | %rdx | 0x10 */
399 pushq %rsi /* | %rsi | 0x8 alist */
400 pushq %rdi /* | %rdi | 0x0 format */
402 movq %rsp, %rbx /* %rbx = current %rsp */
404 leaq panic_quiesce(%rip), %rdi /* %rdi = &panic_quiesce */
405 call panic_trigger /* %eax = panic_trigger() */
409 * The panic_trigger result is in %eax from the call above, and
410 * dtrace_panic places it in %eax before branching here.
411 * The rdmsr instructions that follow below will clobber %eax so
412 * we stash the panic_trigger result in %r11d.
419 * If panic_trigger() was successful, we are the first to initiate a
420 * panic: we now switch to the reserved panic_stack before continuing.
422 leaq panic_stack(%rip), %rsp
423 addq $PANICSTKSIZE, %rsp
424 0: subq $REGSIZE, %rsp
426 * Now that we've got everything set up, store the register values as
427 * they were when we entered vpanic() to the designated location in
428 * the regs structure we allocated on the stack.
432 movq %rcx, REGOFF_RDI(%rsp)
434 movq %rcx, REGOFF_RSI(%rsp)
435 movq 0x10(%rbx), %rcx
436 movq %rcx, REGOFF_RDX(%rsp)
437 movq 0x18(%rbx), %rcx
438 movq %rcx, REGOFF_RCX(%rsp)
439 movq 0x20(%rbx), %rcx
441 movq %rcx, REGOFF_R8(%rsp)
442 movq 0x28(%rbx), %rcx
443 movq %rcx, REGOFF_R9(%rsp)
444 movq 0x30(%rbx), %rcx
445 movq %rcx, REGOFF_RAX(%rsp)
446 movq 0x38(%rbx), %rcx
447 movq %rcx, REGOFF_RBX(%rsp)
448 movq 0x58(%rbx), %rcx
450 movq %rcx, REGOFF_RBP(%rsp)
451 movq 0x40(%rbx), %rcx
452 movq %rcx, REGOFF_R10(%rsp)
453 movq 0x48(%rbx), %rcx
454 movq %rcx, REGOFF_R11(%rsp)
455 movq %r12, REGOFF_R12(%rsp)
457 movq %r13, REGOFF_R13(%rsp)
458 movq %r14, REGOFF_R14(%rsp)
459 movq %r15, REGOFF_R15(%rsp)
463 movq %rcx, REGOFF_DS(%rsp)
465 movq %rcx, REGOFF_ES(%rsp)
467 movq %rcx, REGOFF_FS(%rsp)
469 movq %rcx, REGOFF_GS(%rsp)
471 movq $0, REGOFF_TRAPNO(%rsp)
473 movq $0, REGOFF_ERR(%rsp)
474 leaq vpanic(%rip), %rcx
475 movq %rcx, REGOFF_RIP(%rsp)
478 movq %rcx, REGOFF_CS(%rsp)
479 movq 0x50(%rbx), %rcx
480 movq %rcx, REGOFF_RFL(%rsp)
483 movq %rcx, REGOFF_RSP(%rsp)
486 movq %rcx, REGOFF_SS(%rsp)
489 * panicsys(format, alist, rp, on_panic_stack)
491 movq REGOFF_RDI(%rsp), %rdi /* format */
492 movq REGOFF_RSI(%rsp), %rsi /* alist */
493 movq %rsp, %rdx /* struct regs */
494 movl %r11d, %ecx /* on_panic_stack */
515 dtrace_vpanic(const char *format, va_list alist)
517 ENTRY(dtrace_vpanic) /* Initial stack layout: */
519 pushq %rbp /* | %rip | 0x60 */
520 movq %rsp, %rbp /* | %rbp | 0x58 */
521 pushfq /* | rfl | 0x50 */
522 pushq %r11 /* | %r11 | 0x48 */
523 pushq %r10 /* | %r10 | 0x40 */
524 pushq %rbx /* | %rbx | 0x38 */
525 pushq %rax /* | %rax | 0x30 */
526 pushq %r9 /* | %r9 | 0x28 */
527 pushq %r8 /* | %r8 | 0x20 */
528 pushq %rcx /* | %rcx | 0x18 */
529 pushq %rdx /* | %rdx | 0x10 */
530 pushq %rsi /* | %rsi | 0x8 alist */
531 pushq %rdi /* | %rdi | 0x0 format */
533 movq %rsp, %rbx /* %rbx = current %rsp */
535 leaq panic_quiesce(%rip), %rdi /* %rdi = &panic_quiesce */
536 call dtrace_panic_trigger /* %eax = dtrace_panic_trigger() */
543 panic_trigger(int *tp)
547 movl $0xdefacedd, %edx
560 dtrace_panic_trigger(int *tp)
562 ENTRY(dtrace_panic_trigger)
564 movl $0xdefacedd, %edx
573 END(dtrace_panic_trigger)