1 /* $NetBSD: asm.h,v 1.29 2000/12/14 21:29:51 jeffs Exp $ */
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)machAsmDefs.h 8.1 (Berkeley) 6/10/93
35 * JNPR: asm.h,v 1.10 2007/08/09 11:23:32 katta
42 * Macros used when writing assembler programs.
44 * Copyright (C) 1989 Digital Equipment Corporation.
45 * Permission to use, copy, modify, and distribute this software and
46 * its documentation for any purpose and without fee is hereby granted,
47 * provided that the above copyright notice appears in all copies.
48 * Digital Equipment Corporation makes no representations about the
49 * suitability of this software for any purpose. It is provided "as is"
50 * without express or implied warranty.
52 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsmDefs.h,
53 * v 1.2 89/08/15 18:28:24 rab Exp SPRITE (DECWRL)
56 #ifndef _MACHINE_ASM_H_
57 #define _MACHINE_ASM_H_
60 #include <machine/regdef.h>
62 #include <machine/endian.h>
63 #include <machine/cdefs.h>
66 #if !defined(lint) && !defined(STRIP_FBSDID)
67 #define __FBSDID(s) .ident s
69 #define __FBSDID(s) /* nothing */
73 * Define -pg profile entry code.
74 * Must always be noreorder, must never use a macro instruction
75 * Final addiu to t9 must always equal the size of this _KERN_MCOUNT
77 #define _KERN_MCOUNT \
84 lui t9,%hi(_mcount); \
85 addiu t9,t9,%lo(_mcount); \
94 #define MCOUNT _KERN_MCOUNT
109 * WARN_REFERENCES: create a warning if the specified symbol is referenced
111 #define WARN_REFERENCES(_sym,_msg) \
112 .section .gnu.warning. ## _sym ; .ascii _msg ; .text
115 * These are temp registers whose names can be used in either the old
116 * or new ABI, although they map to different physical registers. In
117 * the old ABI, they map to t4-t7, and in the new ABI, they map to a4-a7.
119 * Because they overlap with the last 4 arg regs in the new ABI, ta0-ta3
120 * should be used only when we need more than t0-t3.
122 #if defined(__mips_n32) || defined(__mips_n64)
132 #endif /* __mips_n32 || __mips_n64 */
135 # define _C_LABEL(x) x
137 # define _C_LABEL(x) _ ## x
141 * WEAK_ALIAS: create a weak alias.
143 #define WEAK_ALIAS(alias,sym) \
148 * STRONG_ALIAS: create a strong alias.
150 #define STRONG_ALIAS(alias,sym) \
154 #define GLOBAL(sym) \
158 .text; .globl sym; .ent sym; sym:
160 #define ASM_ENTRY(sym) \
161 .text; .globl sym; .type sym,@function; sym:
165 * A leaf routine does
166 * - call no other function,
167 * - never use any register that callee-saved (S0-S8), and
168 * - not use any local stack storage.
171 .globl _C_LABEL(x); \
172 .ent _C_LABEL(x), 0; \
179 * No profilable leaf routine.
181 #define LEAF_NOPROFILE(x) \
182 .globl _C_LABEL(x); \
183 .ent _C_LABEL(x), 0; \
189 * declare alternate entry to leaf routine
192 .globl _C_LABEL(x); \
193 AENT (_C_LABEL(x)); \
198 * A function calls other functions and needs
199 * therefore stack space to save/restore registers.
201 #define NESTED(x, fsize, retpc) \
202 .globl _C_LABEL(x); \
203 .ent _C_LABEL(x), 0; \
205 .frame sp, fsize, retpc; \
209 * NESTED_NOPROFILE(x)
210 * No profilable nested routine.
212 #define NESTED_NOPROFILE(x, fsize, retpc) \
213 .globl _C_LABEL(x); \
214 .ent _C_LABEL(x), 0; \
216 .frame sp, fsize, retpc
220 * declare alternate entry point to nested routine.
223 .globl _C_LABEL(x); \
224 AENT (_C_LABEL(x)); \
229 * Mark end of a procedure.
235 * IMPORT -- import external symbol
237 #define IMPORT(sym, size) \
238 .extern _C_LABEL(sym),size
241 * EXPORT -- export definition of symbol
244 .globl _C_LABEL(x); \
249 * exception vector entrypoint
250 * XXX: regmask should be used to generate .mask
252 #define VECTOR(x, regmask) \
253 .ent _C_LABEL(x),0; \
256 #define VECTOR_END(x) \
261 * Macros to panic and printf from assembly language.
265 jal _C_LABEL(panic); \
269 #define PANIC_KSEG0(msg, reg) PANIC(msg)
271 #define PRINTF(msg) \
273 jal _C_LABEL(printf); \
282 #define ASMSTR(str) \
287 * Call ast if required
289 * XXX Do we really need to disable interrupts?
293 mfc0 t0, MIPS_COP_0_STATUS ;\
294 and a0, t0, MIPS_SR_INT_IE ;\
296 mtc0 t0, MIPS_COP_0_STATUS ;\
299 PTR_L s3, PC_CURPCB(s1) ;\
300 PTR_L s1, PC_CURTHREAD(s1) ;\
301 lw s2, TD_FLAGS(s1) ;\
302 li s0, TDF_ASTPENDING | TDF_NEEDRESCHED;\
304 mfc0 t0, MIPS_COP_0_STATUS ;\
306 mtc0 t0, MIPS_COP_0_STATUS ;\
310 PTR_LA s0, _C_LABEL(ast) ;\
312 PTR_ADDU a0, s3, U_PCB_REGS ;\
319 * XXX retain dialects XXX
321 #define ALEAF(x) XLEAF(x)
322 #define NLEAF(x) LEAF_NOPROFILE(x)
323 #define NON_LEAF(x, fsize, retpc) NESTED(x, fsize, retpc)
324 #define NNON_LEAF(x, fsize, retpc) NESTED_NOPROFILE(x, fsize, retpc)
326 #if defined(__mips_o32)
332 #if defined(__mips_o32) || defined(__mips_o64)
333 #define ALSK 7 /* stack alignment */
334 #define ALMASK -7 /* stack alignment */
339 #define ALSK 15 /* stack alignment */
340 #define ALMASK -15 /* stack alignment */
347 * standard callframe {
348 * register_t cf_pad[N]; o32/64 (N=0), n32 (N=1) n64 (N=1)
349 * register_t cf_args[4]; arg0 - arg3 (only on o32 and o64)
350 * register_t cf_gp; global pointer (only on n32 and n64)
351 * register_t cf_sp; frame pointer
352 * register_t cf_ra; return address
355 #if defined(__mips_o32) || defined(__mips_o64)
356 #define CALLFRAME_SIZ (SZREG * (4 + 2))
357 #define CALLFRAME_S0 0
358 #elif defined(__mips_n32) || defined(__mips_n64)
359 #define CALLFRAME_SIZ (SZREG * 4)
360 #define CALLFRAME_S0 (CALLFRAME_SIZ - 4 * SZREG)
363 #define CALLFRAME_GP (CALLFRAME_SIZ - 3 * SZREG)
365 #define CALLFRAME_SP (CALLFRAME_SIZ - 2 * SZREG)
366 #define CALLFRAME_RA (CALLFRAME_SIZ - 1 * SZREG)
369 * Endian-independent assembly-code aliases for unaligned memory accesses.
371 #if _BYTE_ORDER == _LITTLE_ENDIAN
389 #if _BYTE_ORDER == _BIG_ENDIAN
408 * While it would be nice to be compatible with the SGI
409 * REG_L and REG_S macros, because they do not take parameters, it
410 * is impossible to use them with the _MIPS_SIM_ABIX32 model.
412 * These macros hide the use of mips3 instructions from the
413 * assembler to prevent the assembler from generating 64-bit style
416 #if _MIPS_SZPTR == 32
418 #define PTR_ADDI addi
419 #define PTR_ADDU addu
420 #define PTR_ADDIU addiu
422 #define PTR_SUBI subi
423 #define PTR_SUBU subu
424 #define PTR_SUBIU subu
430 #define PTR_SLLV sllv
432 #define PTR_SRLV srlv
434 #define PTR_SRAV srav
437 #define PTR_WORD .word
438 #define PTR_SCALESHIFT 2
439 #else /* _MIPS_SZPTR == 64 */
441 #define PTR_ADDI daddi
442 #define PTR_ADDU daddu
443 #define PTR_ADDIU daddiu
445 #define PTR_SUBI dsubi
446 #define PTR_SUBU dsubu
447 #define PTR_SUBIU dsubu
453 #define PTR_SLLV dsllv
455 #define PTR_SRLV dsrlv
457 #define PTR_SRAV dsrav
460 #define PTR_WORD .dword
461 #define PTR_SCALESHIFT 3
462 #endif /* _MIPS_SZPTR == 64 */
464 #if _MIPS_SZINT == 32
466 #define INT_ADDI addi
467 #define INT_ADDU addu
468 #define INT_ADDIU addiu
470 #define INT_SUBI subi
471 #define INT_SUBU subu
472 #define INT_SUBIU subu
477 #define INT_SLLV sllv
479 #define INT_SRLV srlv
481 #define INT_SRAV srav
484 #define INT_WORD .word
485 #define INT_SCALESHIFT 2
488 #define INT_ADDI daddi
489 #define INT_ADDU daddu
490 #define INT_ADDIU daddiu
492 #define INT_SUBI dsubi
493 #define INT_SUBU dsubu
494 #define INT_SUBIU dsubu
499 #define INT_SLLV dsllv
501 #define INT_SRLV dsrlv
503 #define INT_SRAV dsrav
506 #define INT_WORD .dword
507 #define INT_SCALESHIFT 3
510 #if _MIPS_SZLONG == 32
512 #define LONG_ADDI addi
513 #define LONG_ADDU addu
514 #define LONG_ADDIU addiu
516 #define LONG_SUBI subi
517 #define LONG_SUBU subu
518 #define LONG_SUBIU subu
523 #define LONG_SLLV sllv
525 #define LONG_SRLV srlv
527 #define LONG_SRAV srav
530 #define LONG_WORD .word
531 #define LONG_SCALESHIFT 2
533 #define LONG_ADD dadd
534 #define LONG_ADDI daddi
535 #define LONG_ADDU daddu
536 #define LONG_ADDIU daddiu
537 #define LONG_SUB dadd
538 #define LONG_SUBI dsubi
539 #define LONG_SUBU dsubu
540 #define LONG_SUBIU dsubu
544 #define LONG_SLL dsll
545 #define LONG_SLLV dsllv
546 #define LONG_SRL dsrl
547 #define LONG_SRLV dsrlv
548 #define LONG_SRA dsra
549 #define LONG_SRAV dsrav
552 #define LONG_WORD .dword
553 #define LONG_SCALESHIFT 3
560 #define REG_ADDU addu
562 #define REG_SLLV sllv
564 #define REG_SRLV srlv
566 #define REG_SRAV srav
569 #define REG_SCALESHIFT 2
574 #define REG_ADDU daddu
576 #define REG_SLLV dsllv
578 #define REG_SRLV dsrlv
580 #define REG_SRAV dsrav
583 #define REG_SCALESHIFT 3
586 #if _MIPS_ISA == _MIPS_ISA_MIPS1 || _MIPS_ISA == _MIPS_ISA_MIPS2 || \
587 _MIPS_ISA == _MIPS_ISA_MIPS32
591 #if _MIPS_ISA == _MIPS_ISA_MIPS3 || _MIPS_ISA == _MIPS_ISA_MIPS4 || \
592 _MIPS_ISA == _MIPS_ISA_MIPS64
597 #if defined(__mips_o32) || defined(__mips_o64)
600 #define CPRESTORE(r) .cprestore r
601 #define CPLOAD(r) .cpload r
603 #define CPRESTORE(r) /* not needed */
604 #define CPLOAD(r) /* not needed */
612 #define SETUP_GPX(r) \
615 move r,ra; /* save old ra */ \
621 #define SETUP_GPX_L(r,lbl) \
624 move r,ra; /* save old ra */ \
630 #define SAVE_GP(x) .cprestore x
632 #define SETUP_GP64(a,b) /* n32/n64 specific */
633 #define SETUP_GP64_R(a,b) /* n32/n64 specific */
634 #define SETUP_GPX64(a,b) /* n32/n64 specific */
635 #define SETUP_GPX64_L(a,b,c) /* n32/n64 specific */
636 #define RESTORE_GP64 /* n32/n64 specific */
637 #define USE_ALT_CP(a) /* n32/n64 specific */
638 #endif /* __mips_o32 || __mips_o64 */
640 #if defined(__mips_o32) || defined(__mips_o64)
641 #define REG_PROLOGUE .set push
642 #define REG_EPILOGUE .set pop
644 #if defined(__mips_n32) || defined(__mips_n64)
645 #define REG_PROLOGUE .set push ; .set mips3
646 #define REG_EPILOGUE .set pop
649 #if defined(__mips_n32) || defined(__mips_n64)
650 #define SETUP_GP /* o32 specific */
651 #define SETUP_GPX(r) /* o32 specific */
652 #define SETUP_GPX_L(r,lbl) /* o32 specific */
653 #define SAVE_GP(x) /* o32 specific */
654 #define SETUP_GP64(a,b) .cpsetup $25, a, b
655 #define SETUP_GPX64(a,b) \
662 .cpsetup ra, a, 7b; \
664 #define SETUP_GPX64_L(a,b,c) \
673 #define RESTORE_GP64 .cpreturn
674 #define USE_ALT_CP(a) .cplocal a
675 #endif /* __mips_n32 || __mips_n64 */
677 #define mfc0_macro(data, spr) \
678 __asm __volatile ("mfc0 %0, $%1" \
679 : "=r" (data) /* outputs */ \
680 : "i" (spr)); /* inputs */
682 #define mtc0_macro(data, spr) \
683 __asm __volatile ("mtc0 %0, $%1" \
685 : "r" (data), "i" (spr)); /* inputs */
687 #define cfc0_macro(data, spr) \
688 __asm __volatile ("cfc0 %0, $%1" \
689 : "=r" (data) /* outputs */ \
690 : "i" (spr)); /* inputs */
692 #define ctc0_macro(data, spr) \
693 __asm __volatile ("ctc0 %0, $%1" \
695 : "r" (data), "i" (spr)); /* inputs */
698 #define lbu_macro(data, addr) \
699 __asm __volatile ("lbu %0, 0x0(%1)" \
700 : "=r" (data) /* outputs */ \
701 : "r" (addr)); /* inputs */
703 #define lb_macro(data, addr) \
704 __asm __volatile ("lb %0, 0x0(%1)" \
705 : "=r" (data) /* outputs */ \
706 : "r" (addr)); /* inputs */
708 #define lwl_macro(data, addr) \
709 __asm __volatile ("lwl %0, 0x0(%1)" \
710 : "=r" (data) /* outputs */ \
711 : "r" (addr)); /* inputs */
713 #define lwr_macro(data, addr) \
714 __asm __volatile ("lwr %0, 0x0(%1)" \
715 : "=r" (data) /* outputs */ \
716 : "r" (addr)); /* inputs */
718 #define ldl_macro(data, addr) \
719 __asm __volatile ("ldl %0, 0x0(%1)" \
720 : "=r" (data) /* outputs */ \
721 : "r" (addr)); /* inputs */
723 #define ldr_macro(data, addr) \
724 __asm __volatile ("ldr %0, 0x0(%1)" \
725 : "=r" (data) /* outputs */ \
726 : "r" (addr)); /* inputs */
728 #define sb_macro(data, addr) \
729 __asm __volatile ("sb %0, 0x0(%1)" \
731 : "r" (data), "r" (addr)); /* inputs */
733 #define swl_macro(data, addr) \
734 __asm __volatile ("swl %0, 0x0(%1)" \
736 : "r" (data), "r" (addr)); /* inputs */
738 #define swr_macro(data, addr) \
739 __asm __volatile ("swr %0, 0x0(%1)" \
741 : "r" (data), "r" (addr)); /* inputs */
743 #define sdl_macro(data, addr) \
744 __asm __volatile ("sdl %0, 0x0(%1)" \
746 : "r" (data), "r" (addr)); /* inputs */
748 #define sdr_macro(data, addr) \
749 __asm __volatile ("sdr %0, 0x0(%1)" \
751 : "r" (data), "r" (addr)); /* inputs */
753 #define mfgr_macro(data, gr) \
754 __asm __volatile ("move %0, $%1" \
755 : "=r" (data) /* outputs */ \
756 : "i" (gr)); /* inputs */
758 #define dmfc0_macro(data, spr) \
759 __asm __volatile ("dmfc0 %0, $%1" \
760 : "=r" (data) /* outputs */ \
761 : "i" (spr)); /* inputs */
763 #define dmtc0_macro(data, spr, sel) \
764 __asm __volatile ("dmtc0 %0, $%1, %2" \
766 : "r" (data), "i" (spr), "i" (sel)); /* inputs */
769 * The DYNAMIC_STATUS_MASK option adds an additional masking operation
770 * when updating the hardware interrupt mask in the status register.
772 * This is useful for platforms that need to at run-time mask
773 * interrupts based on motherboard configuration or to handle
774 * slowly clearing interrupts.
776 * XXX this is only currently implemented for mips3.
778 #ifdef MIPS_DYNAMIC_STATUS_MASK
779 #define DYNAMIC_STATUS_MASK(sr,scratch) \
780 lw scratch, mips_dynamic_status_mask; \
783 #define DYNAMIC_STATUS_MASK_TOUSER(sr,scratch1) \
784 ori sr, (MIPS_INT_MASK | MIPS_SR_INT_IE); \
785 DYNAMIC_STATUS_MASK(sr,scratch1)
787 #define DYNAMIC_STATUS_MASK(sr,scratch)
788 #define DYNAMIC_STATUS_MASK_TOUSER(sr,scratch1)
791 #define GET_CPU_PCPU(reg) \
792 PTR_L reg, _C_LABEL(pcpup);
795 * Description of the setjmp buffer
797 * word 0 magic number (dependant on creator)
809 * 12 GP (dependent on ABI)
810 * 13 signal mask (dependant on magic)
815 * The magic number number identifies the jmp_buf and
816 * how the buffer was created as well as providing
821 #define _JB_MAGIC__SETJMP 0xBADFACED
822 #define _JB_MAGIC_SETJMP 0xFACEDBAD
824 /* Valid for all jmp_buf's */
836 #define _JB_REG_SP 10
837 #define _JB_REG_S8 11
838 #if defined(__mips_n32) || defined(__mips_n64)
839 #define _JB_REG_GP 12
842 /* Only valid with the _JB_MAGIC_SETJMP magic */
844 #define _JB_SIGMASK 13
847 * Various macros for dealing with TLB hazards
850 * (c) why not used everywhere?
853 * Assume that w alaways need nops to escape CP0 hazard
854 * TODO: Make hazard delays configurable. Stuck with 5 cycles on the moment
855 * For more info on CP0 hazards see Chapter 7 (p.99) of "MIPS32 Architecture
856 * For Programmers Volume III: The MIPS32 Privileged Resource Architecture"
858 #define ITLBNOPFIX nop;nop;nop;nop;nop;nop;nop;nop;nop;nop;
859 #define HAZARD_DELAY nop;nop;nop;nop;nop;
860 #endif /* !_MACHINE_ASM_H_ */