4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
21 * Portions Copyright 2008 John Birrell <jb@freebsd.org>
27 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
28 * Use is subject to license terms.
33 #include <machine/asmacros.h>
34 #include <sys/cpuvar_defs.h>
35 #include <sys/dtrace.h>
41 movq TF_RDI(%rsp),%rdi; \
42 movq TF_RSI(%rsp),%rsi; \
43 movq TF_RDX(%rsp),%rdx; \
44 movq TF_RCX(%rsp),%rcx; \
45 movq TF_R8(%rsp),%r8; \
46 movq TF_R9(%rsp),%r9; \
47 movq TF_RAX(%rsp),%rax; \
48 movq TF_RBX(%rsp),%rbx; \
49 movq TF_RBP(%rsp),%rbp; \
50 movq TF_R10(%rsp),%r10; \
51 movq TF_R11(%rsp),%r11; \
52 movq TF_R12(%rsp),%r12; \
53 movq TF_R13(%rsp),%r13; \
54 movq TF_R14(%rsp),%r14; \
55 movq TF_R15(%rsp),%r15; \
56 testb $SEL_RPL_MASK,TF_CS(%rsp); \
63 ENTRY(dtrace_invop_start)
66 * #BP traps with %rip set to the next address. We need to decrement
67 * the value to indicate the address of the int3 (0xcc) instruction
68 * that we substituted.
70 movq TF_RIP(%rsp), %rdi
73 movq TF_RAX(%rsp), %rdx
75 ALTENTRY(dtrace_invop_callsite)
76 cmpl $DTRACE_INVOP_PUSHL_EBP, %eax
78 cmpl $DTRACE_INVOP_LEAVE, %eax
80 cmpl $DTRACE_INVOP_NOP, %eax
82 cmpl $DTRACE_INVOP_RET, %eax
85 /* When all else fails handle the trap in the usual way. */
86 jmpq *dtrace_invop_calltrap_addr
90 * We must emulate a "pushq %rbp". To do this, we pull the stack
91 * down 8 bytes, and then store the base pointer.
94 subq $16, %rsp /* make room for %rbp */
95 pushq %rax /* push temp */
96 movq 24(%rsp), %rax /* load calling RIP */
97 movq %rax, 8(%rsp) /* store calling RIP */
98 movq 32(%rsp), %rax /* load calling CS */
99 movq %rax, 16(%rsp) /* store calling CS */
100 movq 40(%rsp), %rax /* load calling RFLAGS */
101 movq %rax, 24(%rsp) /* store calling RFLAGS */
102 movq 48(%rsp), %rax /* load calling RSP */
103 subq $8, %rax /* make room for %rbp */
104 movq %rax, 32(%rsp) /* store calling RSP */
105 movq 56(%rsp), %rax /* load calling SS */
106 movq %rax, 40(%rsp) /* store calling SS */
107 movq 32(%rsp), %rax /* reload calling RSP */
108 movq %rbp, (%rax) /* store %rbp there */
109 popq %rax /* pop off temp */
110 iretq /* return from interrupt */
115 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
116 * followed by a "popq %rbp". This is quite a bit simpler on amd64
117 * than it is on i386 -- we can exploit the fact that the %rsp is
118 * explicitly saved to effect the pop without having to reshuffle
119 * the other data pushed for the trap.
122 pushq %rax /* push temp */
123 movq 8(%rsp), %rax /* load calling RIP */
124 movq %rax, 8(%rsp) /* store calling RIP */
125 movq (%rbp), %rax /* get new %rbp */
126 addq $8, %rbp /* adjust new %rsp */
127 movq %rbp, 32(%rsp) /* store new %rsp */
128 movq %rax, %rbp /* set new %rbp */
129 popq %rax /* pop off temp */
130 iretq /* return from interrupt */
134 /* We must emulate a "nop". */
141 pushq %rax /* push temp */
142 movq 32(%rsp), %rax /* load %rsp */
143 movq (%rax), %rax /* load calling RIP */
144 movq %rax, 8(%rsp) /* store calling RIP */
145 addq $8, 32(%rsp) /* adjust new %rsp */
146 popq %rax /* pop off temp */
147 iretq /* return from interrupt */
150 END(dtrace_invop_start)
153 void dtrace_invop_init(void)
155 ENTRY(dtrace_invop_init)
156 movq $dtrace_invop_start, dtrace_invop_jump_addr(%rip)
158 END(dtrace_invop_init)
161 void dtrace_invop_uninit(void)
163 ENTRY(dtrace_invop_uninit)
164 movq $0, dtrace_invop_jump_addr(%rip)
166 END(dtrace_invop_uninit)
169 greg_t dtrace_getfp(void)
178 dtrace_cas32(uint32_t *target, uint32_t cmp, uint32_t new)
183 cmpxchgl %edx, (%rdi)
189 dtrace_casptr(void *target, void *cmp, void *new)
194 cmpxchgq %rdx, (%rdi)
200 dtrace_caller(int aframes)
209 dtrace_copy(uintptr_t src, uintptr_t dest, size_t size)
211 ENTRY(dtrace_copy_nosmap)
215 xchgq %rdi, %rsi /* make %rsi source, %rdi dest */
216 movq %rdx, %rcx /* load count */
217 repz /* repeat for count ... */
218 smovb /* move from %ds:rsi to %ed:rdi */
221 END(dtrace_copy_nosmap)
223 ENTRY(dtrace_copy_smap)
227 xchgq %rdi, %rsi /* make %rsi source, %rdi dest */
228 movq %rdx, %rcx /* load count */
230 repz /* repeat for count ... */
231 smovb /* move from %ds:rsi to %ed:rdi */
235 END(dtrace_copy_smap)
239 dtrace_copystr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
240 volatile uint16_t *flags)
242 ENTRY(dtrace_copystr_nosmap)
247 movb (%rdi), %al /* load from source */
248 movb %al, (%rsi) /* store to destination */
249 addq $1, %rdi /* increment source pointer */
250 addq $1, %rsi /* increment destination pointer */
251 subq $1, %rdx /* decrement remaining count */
254 testq $0xfff, %rdx /* test if count is 4k-aligned */
255 jnz 1f /* if not, continue with copying */
256 testq $CPU_DTRACE_BADADDR, (%rcx) /* load and test dtrace flags */
265 END(dtrace_copystr_nosmap)
267 ENTRY(dtrace_copystr_smap)
273 movb (%rdi), %al /* load from source */
274 movb %al, (%rsi) /* store to destination */
275 addq $1, %rdi /* increment source pointer */
276 addq $1, %rsi /* increment destination pointer */
277 subq $1, %rdx /* decrement remaining count */
280 testq $0xfff, %rdx /* test if count is 4k-aligned */
281 jnz 1f /* if not, continue with copying */
282 testq $CPU_DTRACE_BADADDR, (%rcx) /* load and test dtrace flags */
292 END(dtrace_copystr_smap)
296 dtrace_fulword(void *addr)
298 ENTRY(dtrace_fulword_nosmap)
301 END(dtrace_fulword_nosmap)
303 ENTRY(dtrace_fulword_smap)
308 END(dtrace_fulword_smap)
312 dtrace_fuword8_nocheck(void *addr)
314 ENTRY(dtrace_fuword8_nocheck_nosmap)
318 END(dtrace_fuword8_nocheck_nosmap)
320 ENTRY(dtrace_fuword8_nocheck_smap)
326 END(dtrace_fuword8_nocheck_smap)
330 dtrace_fuword16_nocheck(void *addr)
332 ENTRY(dtrace_fuword16_nocheck_nosmap)
336 END(dtrace_fuword16_nocheck_nosmap)
338 ENTRY(dtrace_fuword16_nocheck_smap)
344 END(dtrace_fuword16_nocheck_smap)
348 dtrace_fuword32_nocheck(void *addr)
350 ENTRY(dtrace_fuword32_nocheck_nosmap)
354 END(dtrace_fuword32_nocheck_nosmap)
356 ENTRY(dtrace_fuword32_nocheck_smap)
362 END(dtrace_fuword32_nocheck_smap)
366 dtrace_fuword64_nocheck(void *addr)
368 ENTRY(dtrace_fuword64_nocheck_nosmap)
371 END(dtrace_fuword64_nocheck_nosmap)
373 ENTRY(dtrace_fuword64_nocheck_smap)
378 END(dtrace_fuword64_nocheck_smap)
382 dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which,
383 int fault, int fltoffs, uintptr_t illval)
385 ENTRY(dtrace_probe_error)
395 movl dtrace_probeid_error(%rip), %edi
400 END(dtrace_probe_error)
404 dtrace_membar_producer(void)
406 ENTRY(dtrace_membar_producer)
407 rep; ret /* use 2 byte return instruction when branch target */
408 /* AMD Software Optimization Guide - Section 6.2 */
409 END(dtrace_membar_producer)
413 dtrace_membar_consumer(void)
415 ENTRY(dtrace_membar_consumer)
416 rep; ret /* use 2 byte return instruction when branch target */
417 /* AMD Software Optimization Guide - Section 6.2 */
418 END(dtrace_membar_consumer)
422 dtrace_interrupt_disable(void)
424 ENTRY(dtrace_interrupt_disable)
429 END(dtrace_interrupt_disable)
433 dtrace_interrupt_enable(dtrace_icookie_t cookie)
435 ENTRY(dtrace_interrupt_enable)
439 END(dtrace_interrupt_enable)