1 /* $OpenBSD: locore.S,v 1.18 1998/09/15 10:58:53 pefo Exp $ */
3 * Copyright (c) 1992, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * Digital Equipment Corporation and Ralph Campbell.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * Copyright (C) 1989 Digital Equipment Corporation.
34 * Permission to use, copy, modify, and distribute this software and
35 * its documentation for any purpose and without fee is hereby granted,
36 * provided that the above copyright notice appears in all copies.
37 * Digital Equipment Corporation makes no representations about the
38 * suitability of this software for any purpose. It is provided "as is"
39 * without express or implied warranty.
41 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
42 * v 1.1 89/07/11 17:55:04 nelson Exp SPRITE (DECWRL)
43 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
44 * v 9.2 90/01/29 18:00:39 shirriff Exp SPRITE (DECWRL)
45 * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
46 * v 1.1 89/07/10 14:27:41 nelson Exp SPRITE (DECWRL)
48 * from: @(#)locore.s 8.5 (Berkeley) 1/4/94
49 * JNPR: support.S,v 1.5.2.2 2007/08/29 10:03:49 girish
54 * Copyright (c) 1997 Jonathan Stone (hereinafter referred to as the author)
55 * All rights reserved.
57 * Redistribution and use in source and binary forms, with or without
58 * modification, are permitted provided that the following conditions
60 * 1. Redistributions of source code must retain the above copyright
61 * notice, this list of conditions and the following disclaimer.
62 * 2. Redistributions in binary form must reproduce the above copyright
63 * notice, this list of conditions and the following disclaimer in the
64 * documentation and/or other materials provided with the distribution.
65 * 3. All advertising materials mentioning features or use of this software
66 * must display the following acknowledgement:
67 * This product includes software developed by Jonathan R. Stone for
69 * 4. The name of the author may not be used to endorse or promote products
70 * derived from this software without specific prior written permission.
72 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
73 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
74 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
75 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
76 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
77 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
78 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
79 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
80 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
81 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
86 * Contains assembly language support routines.
90 #include <sys/errno.h>
91 #include <machine/asm.h>
92 #include <machine/cpu.h>
93 #include <machine/regnum.h>
94 #include <machine/cpuregs.h>
95 #include <machine/pcb.h>
99 .set noreorder # Noreorder is default style!
108 * See if access to addr with a len type instruction causes a machine check.
109 * len is length of access (1=byte, 2=short, 4=int)
118 PTR_L v1, PC_CURPCB(v1)
120 PTR_S v0, U_PCB_ONFAULT(v1)
131 PTR_S zero, U_PCB_ONFAULT(v1)
133 move v0, zero # made it w/o errors
136 li v0, 1 # trap sends us here
140 * int copystr(void *kfaddr, void *kdaddr, size_t maxlen, size_t *lencopied)
141 * Copy a NIL-terminated string, at most maxlen characters long. Return the
142 * number of characters copied (including the NIL) in *lencopied. If the
143 * string is too long, return ENAMETOOLONG; else return 0.
152 sb v0, 0(a1) # each byte until NIL
154 bne a2, zero, 1b # less than maxlen
157 li v0, ENAMETOOLONG # run out of space
159 beq a3, zero, 3f # return num. of copied bytes
160 PTR_SUBU a2, t0, a2 # if the 4th arg was non-NULL
163 j ra # v0 is 0 or ENAMETOOLONG
169 * Copy a null terminated string from the user address space into
170 * the kernel address space.
172 * copyinstr(fromaddr, toaddr, maxlength, &lencopied)
178 NESTED(copyinstr, CALLFRAME_SIZ, ra)
179 PTR_SUBU sp, sp, CALLFRAME_SIZ
180 .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
182 blt a0, zero, _C_LABEL(copyerr) # make sure address is in user space
183 REG_S ra, CALLFRAME_RA(sp)
185 PTR_L v1, PC_CURPCB(v1)
186 jal _C_LABEL(copystr)
187 PTR_S v0, U_PCB_ONFAULT(v1)
188 REG_L ra, CALLFRAME_RA(sp)
190 PTR_L v1, PC_CURPCB(v1)
191 PTR_S zero, U_PCB_ONFAULT(v1)
193 PTR_ADDU sp, sp, CALLFRAME_SIZ
197 * Copy a null terminated string from the kernel address space into
198 * the user address space.
200 * copyoutstr(fromaddr, toaddr, maxlength, &lencopied)
206 NESTED(copyoutstr, CALLFRAME_SIZ, ra)
207 PTR_SUBU sp, sp, CALLFRAME_SIZ
208 .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
210 blt a1, zero, _C_LABEL(copyerr) # make sure address is in user space
211 REG_S ra, CALLFRAME_RA(sp)
213 PTR_L v1, PC_CURPCB(v1)
214 jal _C_LABEL(copystr)
215 PTR_S v0, U_PCB_ONFAULT(v1)
216 REG_L ra, CALLFRAME_RA(sp)
218 PTR_L v1, PC_CURPCB(v1)
219 PTR_S zero, U_PCB_ONFAULT(v1)
221 PTR_ADDU sp, sp, CALLFRAME_SIZ
225 * Copy specified amount of data from user space into the kernel
226 * copyin(from, to, len)
227 * caddr_t *from; (user source address)
228 * caddr_t *to; (kernel destination address)
231 NESTED(copyin, CALLFRAME_SIZ, ra)
232 PTR_SUBU sp, sp, CALLFRAME_SIZ
233 .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
235 blt a0, zero, _C_LABEL(copyerr) # make sure address is in user space
236 REG_S ra, CALLFRAME_RA(sp)
238 PTR_L v1, PC_CURPCB(v1)
240 PTR_S v0, U_PCB_ONFAULT(v1)
241 REG_L ra, CALLFRAME_RA(sp)
243 PTR_L v1, PC_CURPCB(v1) # bcopy modified v1, so reload
244 PTR_S zero, U_PCB_ONFAULT(v1)
245 PTR_ADDU sp, sp, CALLFRAME_SIZ
251 * Copy specified amount of data from kernel to the user space
252 * copyout(from, to, len)
253 * caddr_t *from; (kernel source address)
254 * caddr_t *to; (user destination address)
257 NESTED(copyout, CALLFRAME_SIZ, ra)
258 PTR_SUBU sp, sp, CALLFRAME_SIZ
259 .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
261 blt a1, zero, _C_LABEL(copyerr) # make sure address is in user space
262 REG_S ra, CALLFRAME_RA(sp)
264 PTR_L v1, PC_CURPCB(v1)
266 PTR_S v0, U_PCB_ONFAULT(v1)
267 REG_L ra, CALLFRAME_RA(sp)
269 PTR_L v1, PC_CURPCB(v1) # bcopy modified v1, so reload
270 PTR_S zero, U_PCB_ONFAULT(v1)
271 PTR_ADDU sp, sp, CALLFRAME_SIZ
277 REG_L ra, CALLFRAME_RA(sp)
278 PTR_ADDU sp, sp, CALLFRAME_SIZ
280 li v0, EFAULT # return error
284 * {fu,su},{ibyte,isword,iword}, fetch or store a byte, short or word to
286 * {fu,su},{byte,sword,word}, fetch or store a byte, short or word to
293 blt a0, zero, fswberr # make sure address is in user space
296 PTR_L v1, PC_CURPCB(v1)
297 PTR_S v0, U_PCB_ONFAULT(v1)
298 ld v0, 0(a0) # fetch word
300 PTR_S zero, U_PCB_ONFAULT(v1)
309 blt a0, zero, fswberr # make sure address is in user space
312 PTR_L v1, PC_CURPCB(v1)
313 PTR_S v0, U_PCB_ONFAULT(v1)
314 lw v0, 0(a0) # fetch word
316 PTR_S zero, U_PCB_ONFAULT(v1)
321 blt a0, zero, fswberr # make sure address is in user space
324 PTR_L v1, PC_CURPCB(v1)
325 PTR_S v0, U_PCB_ONFAULT(v1)
326 lhu v0, 0(a0) # fetch short
328 PTR_S zero, U_PCB_ONFAULT(v1)
333 blt a0, zero, fswberr # make sure address is in user space
336 PTR_L v1, PC_CURPCB(v1)
337 PTR_S v0, U_PCB_ONFAULT(v1)
338 lbu v0, 0(a0) # fetch byte
340 PTR_S zero, U_PCB_ONFAULT(v1)
348 blt a0, zero, fswberr # make sure address is in user space
351 PTR_L v1, PC_CURPCB(v1)
352 PTR_S v0, U_PCB_ONFAULT(v1)
353 sw a1, 0(a0) # store word
354 PTR_S zero, U_PCB_ONFAULT(v1)
363 blt a0, zero, fswberr # make sure address is in user space
366 PTR_L v1, PC_CURPCB(v1)
367 PTR_S v0, U_PCB_ONFAULT(v1)
368 sd a1, 0(a0) # store word
369 PTR_S zero, U_PCB_ONFAULT(v1)
377 * <v0>u_long casuword(<a0>u_long *p, <a1>u_long oldval, <a2>u_long newval)
381 * <v0>uint32_t casuword(<a0>uint32_t *p, <a1>uint32_t oldval,
382 * <a2>uint32_t newval)
389 blt a0, zero, fswberr # make sure address is in user space
392 PTR_L v1, PC_CURPCB(v1)
393 PTR_S v0, U_PCB_ONFAULT(v1)
399 sc t0, 0(a0) # store word
407 PTR_S zero, U_PCB_ONFAULT(v1)
416 blt a0, zero, fswberr # make sure address is in user space
419 PTR_L v1, PC_CURPCB(v1)
420 PTR_S v0, U_PCB_ONFAULT(v1)
426 scd t0, 0(a0) # store double word
434 PTR_S zero, U_PCB_ONFAULT(v1)
441 * Will have to flush the instruction cache if byte merging is done in hardware.
445 blt a0, zero, fswberr # make sure address is in user space
448 PTR_L v1, PC_CURPCB(v1)
449 PTR_S v0, U_PCB_ONFAULT(v1)
450 sh a1, 0(a0) # store short
451 PTR_S zero, U_PCB_ONFAULT(v1)
458 blt a0, zero, fswberr # make sure address is in user space
461 PTR_L v1, PC_CURPCB(v1)
462 PTR_S v0, U_PCB_ONFAULT(v1)
463 sb a1, 0(a0) # store byte
464 PTR_S zero, U_PCB_ONFAULT(v1)
475 * fuswintr and suswintr are just like fusword and susword except that if
476 * the page is not in memory or would cause a trap, then we return an error.
477 * The important thing is to prevent sleep() and switch().
480 PTR_LA v0, fswintrberr
481 blt a0, zero, fswintrberr # make sure address is in user space
484 PTR_L v1, PC_CURPCB(v1)
485 PTR_S v0, U_PCB_ONFAULT(v1)
486 lhu v0, 0(a0) # fetch short
488 PTR_S zero, U_PCB_ONFAULT(v1)
492 PTR_LA v0, fswintrberr
493 blt a0, zero, fswintrberr # make sure address is in user space
496 PTR_L v1, PC_CURPCB(v1)
497 PTR_S v0, U_PCB_ONFAULT(v1)
498 sh a1, 0(a0) # store short
499 PTR_S zero, U_PCB_ONFAULT(v1)
510 * memset(void *s1, int c, int len)
511 * NetBSD: memset.S,v 1.3 2001/10/16 15:40:53 uch Exp
515 blt a2, 12, memsetsmallclr # small amount to clear?
516 move v0, a0 # save s1 for result
518 sll t1, a1, 8 # compute c << 8 in t1
519 or t1, t1, a1 # compute c << 8 | c in 11
520 sll t2, t1, 16 # shift that left 16
521 or t1, t2, t1 # or together
523 PTR_SUBU t0, zero, a0 # compute # bytes to word align address
525 beq t0, zero, 1f # skip if word aligned
526 PTR_SUBU a2, a2, t0 # subtract from remaining count
527 SWHI t1, 0(a0) # store 1, 2, or 3 bytes to align
530 and v1, a2, 3 # compute number of whole words left
533 PTR_ADDU t0, t0, a0 # compute ending address
535 PTR_ADDU a0, a0, 4 # clear words
536 bne a0, t0, 2b # unrolling loop does not help
537 sw t1, -4(a0) # since we are limited by memory speed
541 PTR_ADDU t0, a2, a0 # compute ending address
543 PTR_ADDU a0, a0, 1 # clear bytes
558 blt a1, 12, smallclr # small amount to clear?
559 PTR_SUBU a3, zero, a0 # compute # bytes to word align address
561 beq a3, zero, 1f # skip if word aligned
562 PTR_SUBU a1, a1, a3 # subtract from remaining count
563 SWHI zero, 0(a0) # clear 1, 2, or 3 bytes to align
566 and v0, a1, 3 # compute number of words left
569 PTR_ADDU a3, a3, a0 # compute ending address
571 PTR_ADDU a0, a0, 4 # clear words
572 bne a0, a3, 2b # unrolling loop does not help
573 sw zero, -4(a0) # since we are limited by memory speed
576 PTR_ADDU a3, a1, a0 # compute ending address
578 PTR_ADDU a0, a0, 1 # clear bytes
592 blt a2, 16, smallcmp # is it worth any trouble?
593 xor v0, a0, a1 # compare low two bits of addresses
595 PTR_SUBU a3, zero, a1 # compute # bytes to word align address
596 bne v0, zero, unalignedcmp # not possible to align addresses
600 PTR_SUBU a2, a2, a3 # subtract from remaining count
601 move v0, v1 # init v0,v1 so unmodified bytes match
602 LWHI v0, 0(a0) # read 1, 2, or 3 bytes
608 and a3, a2, ~3 # compute number of whole words left
609 PTR_SUBU a2, a2, a3 # which has to be >= (16-3) & ~3
610 PTR_ADDU a3, a3, a0 # compute ending address
612 lw v0, 0(a0) # compare words
619 b smallcmp # finish remainder
623 PTR_SUBU a2, a2, a3 # subtract from remaining count
624 PTR_ADDU a3, a3, a0 # compute ending address
626 lbu v0, 0(a0) # compare bytes until a1 word aligned
634 and a3, a2, ~3 # compute number of whole words left
635 PTR_SUBU a2, a2, a3 # which has to be >= (16-3) & ~3
636 PTR_ADDU a3, a3, a0 # compute ending address
638 LWHI v0, 0(a0) # compare words a0 unaligned, a1 aligned
648 PTR_ADDU a3, a2, a0 # compute ending address
674 and v1, a0, 1 # bit set?
676 beq v1, zero, 1b # no, continue
685 * atomic_set_16(u_int16_t *a, u_int16_t b)
692 srl a0, a0, 2 # round down address to be 32-bit aligned
707 * atomic_clear_16(u_int16_t *a, u_int16_t b)
712 LEAF(atomic_clear_16)
714 srl a0, a0, 2 # round down address to be 32-bit aligned
720 andi t1, t1, 0xffff # t1 has the original lower 16 bits
721 and t1, t1, a1 # t1 has the new lower 16 bits
722 srl t0, t0, 16 # preserve original top 16 bits
735 * atomic_subtract_16(uint16_t *a, uint16_t b)
740 LEAF(atomic_subtract_16)
742 srl a0, a0, 2 # round down address to be 32-bit aligned
747 andi t1, t1, 0xffff # t1 has the original lower 16 bits
749 andi t1, t1, 0xffff # t1 has the new lower 16 bits
750 srl t0, t0, 16 # preserve original top 16 bits
758 END(atomic_subtract_16)
762 * atomic_add_16(uint16_t *a, uint16_t b)
769 srl a0, a0, 2 # round down address to be 32-bit aligned
774 andi t1, t1, 0xffff # t1 has the original lower 16 bits
776 andi t1, t1, 0xffff # t1 has the new lower 16 bits
777 srl t0, t0, 16 # preserve original top 16 bits
789 * atomic_add_8(uint8_t *a, uint8_t b)
796 srl a0, a0, 2 # round down address to be 32-bit aligned
801 andi t1, t1, 0xff # t1 has the original lower 8 bits
803 andi t1, t1, 0xff # t1 has the new lower 8 bits
804 srl t0, t0, 8 # preserve original top 24 bits
817 * atomic_subtract_8(uint8_t *a, uint8_t b)
822 LEAF(atomic_subtract_8)
824 srl a0, a0, 2 # round down address to be 32-bit aligned
829 andi t1, t1, 0xff # t1 has the original lower 8 bits
831 andi t1, t1, 0xff # t1 has the new lower 8 bits
832 srl t0, t0, 8 # preserve original top 24 bits
840 END(atomic_subtract_8)
843 * atomic 64-bit register read/write assembly language support routines.
846 .set noreorder # Noreorder is default style!
848 #if !defined(__mips_n64) && !defined(__mips_n32)
850 * I don't know if these routines have the right number of
851 * NOPs in it for all processors. XXX
853 * Maybe it would be better to just leave this undefined in that case.
855 * XXX These routines are not safe in the case of a TLB miss on a1 or
856 * a0 unless the trapframe is 64-bit, which it just isn't with O32.
857 * If we take any exception, not just an interrupt, the upper
858 * 32-bits will be clobbered. Use only N32 and N64 kernels if you
859 * want to use 64-bit registers while interrupts are enabled or
860 * with memory operations. Since this isn't even using load-linked
861 * and store-conditional, perhaps it should just use two registers
862 * instead, as is right and good with the O32 ABI.
864 LEAF(atomic_store_64)
865 mfc0 t1, MIPS_COP_0_STATUS
866 and t2, t1, ~MIPS_SR_INT_IE
867 mtc0 t2, MIPS_COP_0_STATUS
878 mtc0 t1,MIPS_COP_0_STATUS
888 mfc0 t1, MIPS_COP_0_STATUS
889 and t2, t1, ~MIPS_SR_INT_IE
890 mtc0 t2, MIPS_COP_0_STATUS
901 mtc0 t1,MIPS_COP_0_STATUS
911 #if defined(DDB) || defined(DEBUG)
915 and v0, a0, 3 # unaligned ?
917 PTR_L t1, PC_CURPCB(t1)
919 PTR_S v1, U_PCB_ONFAULT(t1)
923 PTR_S zero, U_PCB_ONFAULT(t1)
929 PTR_S zero, U_PCB_ONFAULT(t1)
934 and v0, a0, 3 # unaligned ?
936 PTR_L t1, PC_CURPCB(t1)
938 PTR_S v1, U_PCB_ONFAULT(t1)
942 PTR_S zero, U_PCB_ONFAULT(t1)
948 PTR_S zero, U_PCB_ONFAULT(t1)
958 and v0, a0, 3 # unaligned ?
960 PTR_L t1, PC_CURPCB(t1)
962 PTR_S v1, U_PCB_ONFAULT(t1)
966 PTR_S zero, U_PCB_ONFAULT(t1)
972 PTR_S zero, U_PCB_ONFAULT(t1)
980 #endif /* DDB || DEBUG */
984 break MIPS_BREAK_SOVER_VAL
990 mfc0 v0, MIPS_COP_0_STATUS # Later the "real" spl value!
991 REG_S s0, (SZREG * PCB_REG_S0)(a0)
992 REG_S s1, (SZREG * PCB_REG_S1)(a0)
993 REG_S s2, (SZREG * PCB_REG_S2)(a0)
994 REG_S s3, (SZREG * PCB_REG_S3)(a0)
995 REG_S s4, (SZREG * PCB_REG_S4)(a0)
996 REG_S s5, (SZREG * PCB_REG_S5)(a0)
997 REG_S s6, (SZREG * PCB_REG_S6)(a0)
998 REG_S s7, (SZREG * PCB_REG_S7)(a0)
999 REG_S s8, (SZREG * PCB_REG_S8)(a0)
1000 REG_S sp, (SZREG * PCB_REG_SP)(a0)
1001 REG_S ra, (SZREG * PCB_REG_RA)(a0)
1002 REG_S v0, (SZREG * PCB_REG_SR)(a0)
1004 li v0, 0 # setjmp return
1008 REG_L v0, (SZREG * PCB_REG_SR)(a0)
1009 REG_L ra, (SZREG * PCB_REG_RA)(a0)
1010 REG_L s0, (SZREG * PCB_REG_S0)(a0)
1011 REG_L s1, (SZREG * PCB_REG_S1)(a0)
1012 REG_L s2, (SZREG * PCB_REG_S2)(a0)
1013 REG_L s3, (SZREG * PCB_REG_S3)(a0)
1014 REG_L s4, (SZREG * PCB_REG_S4)(a0)
1015 REG_L s5, (SZREG * PCB_REG_S5)(a0)
1016 REG_L s6, (SZREG * PCB_REG_S6)(a0)
1017 REG_L s7, (SZREG * PCB_REG_S7)(a0)
1018 REG_L s8, (SZREG * PCB_REG_S8)(a0)
1019 REG_L sp, (SZREG * PCB_REG_SP)(a0)
1020 mtc0 v0, MIPS_COP_0_STATUS # Later the "real" spl value!
1023 li v0, 1 # longjmp return
1030 #if defined(__mips_o32)
1031 mfc0 t0, MIPS_COP_0_STATUS # turn off interrupts
1032 and t1, t0, ~(MIPS_SR_INT_IE)
1033 mtc0 t1, MIPS_COP_0_STATUS
1040 #if _BYTE_ORDER == _BIG_ENDIAN
1042 dsra v1, v1, 32 # low word in v1
1043 dsra v0, v0, 32 # high word in v0
1045 dsra v1, v0, 32 # high word in v1
1047 dsra v0, v0, 32 # low word in v0
1050 mtc0 t0, MIPS_COP_0_STATUS # restore intr status.
1053 #else /* !__mips_o32 */
1055 #endif /* !__mips_o32 */
1066 #if defined(__mips_o32)
1067 mfc0 t0, MIPS_COP_0_STATUS # turn off interrupts
1068 and t1, t0, ~(MIPS_SR_INT_IE)
1069 mtc0 t1, MIPS_COP_0_STATUS
1075 # NOTE: a1 is padding!
1077 #if _BYTE_ORDER == _BIG_ENDIAN
1078 dsll a2, a2, 32 # high word in a2
1079 dsll a3, a3, 32 # low word in a3
1082 dsll a2, a2, 32 # low word in a2
1084 dsll a3, a3, 32 # high word in a3
1089 mtc0 t0, MIPS_COP_0_STATUS # restore intr status.
1092 #else /* !__mips_o32 */
1094 #endif /* !__mips_o32 */