1 /* $OpenBSD: locore.S,v 1.18 1998/09/15 10:58:53 pefo Exp $ */
3 * Copyright (c) 1992, 1993
4 * The Regents of the University of California. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * Digital Equipment Corporation and Ralph Campbell.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * Copyright (C) 1989 Digital Equipment Corporation.
34 * Permission to use, copy, modify, and distribute this software and
35 * its documentation for any purpose and without fee is hereby granted,
36 * provided that the above copyright notice appears in all copies.
37 * Digital Equipment Corporation makes no representations about the
38 * suitability of this software for any purpose. It is provided "as is"
39 * without express or implied warranty.
41 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/loMem.s,
42 * v 1.1 89/07/11 17:55:04 nelson Exp SPRITE (DECWRL)
43 * from: Header: /sprite/src/kernel/mach/ds3100.md/RCS/machAsm.s,
44 * v 9.2 90/01/29 18:00:39 shirriff Exp SPRITE (DECWRL)
45 * from: Header: /sprite/src/kernel/vm/ds3100.md/vmPmaxAsm.s,
46 * v 1.1 89/07/10 14:27:41 nelson Exp SPRITE (DECWRL)
48 * from: @(#)locore.s 8.5 (Berkeley) 1/4/94
49 * JNPR: support.S,v 1.5.2.2 2007/08/29 10:03:49 girish
54 * Copyright (c) 1997 Jonathan Stone (hereinafter referred to as the author)
55 * All rights reserved.
57 * Redistribution and use in source and binary forms, with or without
58 * modification, are permitted provided that the following conditions
60 * 1. Redistributions of source code must retain the above copyright
61 * notice, this list of conditions and the following disclaimer.
62 * 2. Redistributions in binary form must reproduce the above copyright
63 * notice, this list of conditions and the following disclaimer in the
64 * documentation and/or other materials provided with the distribution.
65 * 3. All advertising materials mentioning features or use of this software
66 * must display the following acknowledgement:
67 * This product includes software developed by Jonathan R. Stone for
69 * 4. The name of the author may not be used to endorse or promote products
70 * derived from this software without specific prior written permission.
72 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
73 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
74 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
75 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE
76 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
77 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
78 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
79 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
80 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
81 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
86 * Contains code that is the first executed at boot time plus
87 * assembly language support routines.
90 #include "opt_cputype.h"
92 #include <sys/errno.h>
93 #include <machine/asm.h>
94 #include <machine/cpu.h>
95 #include <machine/regnum.h>
96 #include <machine/cpuregs.h>
100 .set noreorder # Noreorder is default style!
109 * See if access to addr with a len type instruction causes a machine check.
110 * len is length of access (1=byte, 2=short, 4=int)
119 PTR_L v1, PC_CURPCB(v1)
121 PTR_S v0, U_PCB_ONFAULT(v1)
132 PTR_S zero, U_PCB_ONFAULT(v1)
134 move v0, zero # made it w/o errors
137 li v0, 1 # trap sends us here
141 * int copystr(void *kfaddr, void *kdaddr, size_t maxlen, size_t *lencopied)
142 * Copy a NIL-terminated string, at most maxlen characters long. Return the
143 * number of characters copied (including the NIL) in *lencopied. If the
144 * string is too long, return ENAMETOOLONG; else return 0.
153 sb v0, 0(a1) # each byte until NIL
155 bne a2, zero, 1b # less than maxlen
158 li v0, ENAMETOOLONG # run out of space
160 beq a3, zero, 3f # return num. of copied bytes
161 PTR_SUBU a2, t0, a2 # if the 4th arg was non-NULL
164 j ra # v0 is 0 or ENAMETOOLONG
170 * fillw(pat, addr, count)
184 * Optimized memory zero code.
185 * mem_zero_page(addr);
199 * Block I/O routines mainly used by I/O drivers.
202 * a1 = memory address
264 bne v0, zero, 3f # arghh, unaligned.
290 bne v0, zero, 3f # arghh, unaligned.
312 * Copy a null terminated string from the user address space into
313 * the kernel address space.
315 * copyinstr(fromaddr, toaddr, maxlength, &lencopied)
321 NON_LEAF(copyinstr, CALLFRAME_SIZ, ra)
322 PTR_SUBU sp, sp, CALLFRAME_SIZ
323 .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
325 blt a0, zero, _C_LABEL(copyerr) # make sure address is in user space
326 REG_S ra, CALLFRAME_RA(sp)
328 PTR_L v1, PC_CURPCB(v1)
329 jal _C_LABEL(copystr)
330 PTR_S v0, U_PCB_ONFAULT(v1)
331 REG_L ra, CALLFRAME_RA(sp)
333 PTR_L v1, PC_CURPCB(v1)
334 PTR_S zero, U_PCB_ONFAULT(v1)
336 PTR_ADDU sp, sp, CALLFRAME_SIZ
340 * Copy a null terminated string from the kernel address space into
341 * the user address space.
343 * copyoutstr(fromaddr, toaddr, maxlength, &lencopied)
349 NON_LEAF(copyoutstr, CALLFRAME_SIZ, ra)
350 PTR_SUBU sp, sp, CALLFRAME_SIZ
351 .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
353 blt a1, zero, _C_LABEL(copyerr) # make sure address is in user space
354 REG_S ra, CALLFRAME_RA(sp)
356 PTR_L v1, PC_CURPCB(v1)
357 jal _C_LABEL(copystr)
358 PTR_S v0, U_PCB_ONFAULT(v1)
359 REG_L ra, CALLFRAME_RA(sp)
361 PTR_L v1, PC_CURPCB(v1)
362 PTR_S zero, U_PCB_ONFAULT(v1)
364 PTR_ADDU sp, sp, CALLFRAME_SIZ
368 * Copy specified amount of data from user space into the kernel
369 * copyin(from, to, len)
370 * caddr_t *from; (user source address)
371 * caddr_t *to; (kernel destination address)
374 NON_LEAF(copyin, CALLFRAME_SIZ, ra)
375 PTR_SUBU sp, sp, CALLFRAME_SIZ
376 .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
378 blt a0, zero, _C_LABEL(copyerr) # make sure address is in user space
379 REG_S ra, CALLFRAME_RA(sp)
381 PTR_L v1, PC_CURPCB(v1)
383 PTR_S v0, U_PCB_ONFAULT(v1)
384 REG_L ra, CALLFRAME_RA(sp)
386 PTR_L v1, PC_CURPCB(v1) # bcopy modified v1, so reload
387 PTR_S zero, U_PCB_ONFAULT(v1)
388 PTR_ADDU sp, sp, CALLFRAME_SIZ
394 * Copy specified amount of data from kernel to the user space
395 * copyout(from, to, len)
396 * caddr_t *from; (kernel source address)
397 * caddr_t *to; (user destination address)
400 NON_LEAF(copyout, CALLFRAME_SIZ, ra)
401 PTR_SUBU sp, sp, CALLFRAME_SIZ
402 .mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
404 blt a1, zero, _C_LABEL(copyerr) # make sure address is in user space
405 REG_S ra, CALLFRAME_RA(sp)
407 PTR_L v1, PC_CURPCB(v1)
409 PTR_S v0, U_PCB_ONFAULT(v1)
410 REG_L ra, CALLFRAME_RA(sp)
412 PTR_L v1, PC_CURPCB(v1) # bcopy modified v1, so reload
413 PTR_S zero, U_PCB_ONFAULT(v1)
414 PTR_ADDU sp, sp, CALLFRAME_SIZ
420 REG_L ra, CALLFRAME_RA(sp)
421 PTR_ADDU sp, sp, CALLFRAME_SIZ
423 li v0, EFAULT # return error
427 * {fu,su},{ibyte,isword,iword}, fetch or store a byte, short or word to
429 * {fu,su},{byte,sword,word}, fetch or store a byte, short or word to
437 blt a0, zero, fswberr # make sure address is in user space
440 PTR_L v1, PC_CURPCB(v1)
441 PTR_S v0, U_PCB_ONFAULT(v1)
442 ld v0, 0(a0) # fetch word
444 PTR_S zero, U_PCB_ONFAULT(v1)
454 blt a0, zero, fswberr # make sure address is in user space
457 PTR_L v1, PC_CURPCB(v1)
458 PTR_S v0, U_PCB_ONFAULT(v1)
459 lw v0, 0(a0) # fetch word
461 PTR_S zero, U_PCB_ONFAULT(v1)
467 blt a0, zero, fswberr # make sure address is in user space
470 PTR_L v1, PC_CURPCB(v1)
471 PTR_S v0, U_PCB_ONFAULT(v1)
472 lhu v0, 0(a0) # fetch short
474 PTR_S zero, U_PCB_ONFAULT(v1)
480 blt a0, zero, fswberr # make sure address is in user space
483 PTR_L v1, PC_CURPCB(v1)
484 PTR_S v0, U_PCB_ONFAULT(v1)
485 lbu v0, 0(a0) # fetch byte
487 PTR_S zero, U_PCB_ONFAULT(v1)
495 blt a0, zero, fswberr # make sure address is in user space
498 PTR_L v1, PC_CURPCB(v1)
499 PTR_S v0, U_PCB_ONFAULT(v1)
500 sw a1, 0(a0) # store word
501 PTR_S zero, U_PCB_ONFAULT(v1)
510 blt a0, zero, fswberr # make sure address is in user space
513 PTR_L v1, PC_CURPCB(v1)
514 PTR_S v0, U_PCB_ONFAULT(v1)
515 sd a1, 0(a0) # store word
516 PTR_S zero, U_PCB_ONFAULT(v1)
524 * <v0>u_long casuword(<a0>u_long *p, <a1>u_long oldval, <a2>u_long newval)
528 * <v0>uint32_t casuword(<a0>uint32_t *p, <a1>uint32_t oldval,
529 * <a2>uint32_t newval)
536 blt a0, zero, fswberr # make sure address is in user space
539 PTR_L v1, PC_CURPCB(v1)
540 PTR_S v0, U_PCB_ONFAULT(v1)
546 sc t0, 0(a0) # store word
554 PTR_S zero, U_PCB_ONFAULT(v1)
563 blt a0, zero, fswberr # make sure address is in user space
566 PTR_L v1, PC_CURPCB(v1)
567 PTR_S v0, U_PCB_ONFAULT(v1)
573 scd t0, 0(a0) # store double word
581 PTR_S zero, U_PCB_ONFAULT(v1)
588 /* unused in FreeBSD */
590 * Have to flush instruction cache afterwards.
594 blt a0, zero, fswberr # make sure address is in user space
597 PTR_L v1, PC_CURPCB(v1)
598 PTR_S v0, U_PCB_ONFAULT(v1)
599 sw a1, 0(a0) # store word
600 PTR_S zero, U_PCB_ONFAULT(v1)
601 j _C_LABEL(Mips_SyncICache) # FlushICache sets v0 = 0. (Ugly)
602 li a1, 4 # size of word
607 * Will have to flush the instruction cache if byte merging is done in hardware.
612 blt a0, zero, fswberr # make sure address is in user space
615 PTR_L v1, PC_CURPCB(v1)
616 PTR_S v0, U_PCB_ONFAULT(v1)
617 sh a1, 0(a0) # store short
618 PTR_S zero, U_PCB_ONFAULT(v1)
626 blt a0, zero, fswberr # make sure address is in user space
629 PTR_L v1, PC_CURPCB(v1)
630 PTR_S v0, U_PCB_ONFAULT(v1)
631 sb a1, 0(a0) # store byte
632 PTR_S zero, U_PCB_ONFAULT(v1)
643 * fuswintr and suswintr are just like fusword and susword except that if
644 * the page is not in memory or would cause a trap, then we return an error.
645 * The important thing is to prevent sleep() and switch().
648 PTR_LA v0, fswintrberr
649 blt a0, zero, fswintrberr # make sure address is in user space
652 PTR_L v1, PC_CURPCB(v1)
653 PTR_S v0, U_PCB_ONFAULT(v1)
654 lhu v0, 0(a0) # fetch short
656 PTR_S zero, U_PCB_ONFAULT(v1)
660 PTR_LA v0, fswintrberr
661 blt a0, zero, fswintrberr # make sure address is in user space
664 PTR_L v1, PC_CURPCB(v1)
665 PTR_S v0, U_PCB_ONFAULT(v1)
666 sh a1, 0(a0) # store short
667 PTR_S zero, U_PCB_ONFAULT(v1)
678 * memcpy(to, from, len)
679 * {ov}bcopy(from, to, len)
683 move v0, a0 # swap from and to
689 PTR_ADDU t0, a0, a2 # t0 = end of s1 region
692 and t1, t1, t2 # t1 = true if from < to < (from+len)
693 beq t1, zero, forward # non overlapping, do forward copy
694 slt t2, a2, 12 # check for small copy
697 PTR_ADDU t1, a1, a2 # t1 = end of to region
699 lb v1, -1(t0) # copy bytes backwards,
700 PTR_SUBU t0, t0, 1 # doesnt happen often so do slow way
708 bne t2, zero, smallcpy # do a small bcopy
709 xor v1, a0, a1 # compare low two bits of addresses
711 PTR_SUBU a3, zero, a1 # compute # bytes to word align address
712 beq v1, zero, aligned # addresses can be word aligned
716 PTR_SUBU a2, a2, a3 # subtract from remaining count
717 LWHI v1, 0(a0) # get next 4 bytes (unaligned)
720 SWHI v1, 0(a1) # store 1, 2, or 3 bytes to align a1
723 and v1, a2, 3 # compute number of words left
726 PTR_ADDU a3, a3, a0 # compute ending address
728 LWHI v1, 0(a0) # copy words a0 unaligned, a1 aligned
734 nop # We have to do this mmu-bug.
739 PTR_SUBU a2, a2, a3 # subtract from remaining count
740 LWHI v1, 0(a0) # copy 1, 2, or 3 bytes to align
745 and v1, a2, 3 # compute number of whole words left
748 PTR_ADDU a3, a3, a0 # compute ending address
750 lw v1, 0(a0) # copy words
757 PTR_ADDU a3, a2, a0 # compute ending address
759 lbu v1, 0(a0) # copy bytes
763 PTR_ADDU a1, a1, 1 # MMU BUG ? can not do -1(a1) at 0x80000000!!
770 * memset(void *s1, int c, int len)
771 * NetBSD: memset.S,v 1.3 2001/10/16 15:40:53 uch Exp
775 blt a2, 12, memsetsmallclr # small amount to clear?
776 move v0, a0 # save s1 for result
778 sll t1, a1, 8 # compute c << 8 in t1
779 or t1, t1, a1 # compute c << 8 | c in 11
780 sll t2, t1, 16 # shift that left 16
781 or t1, t2, t1 # or together
783 PTR_SUBU t0, zero, a0 # compute # bytes to word align address
785 beq t0, zero, 1f # skip if word aligned
786 PTR_SUBU a2, a2, t0 # subtract from remaining count
787 SWHI t1, 0(a0) # store 1, 2, or 3 bytes to align
790 and v1, a2, 3 # compute number of whole words left
793 PTR_ADDU t0, t0, a0 # compute ending address
795 PTR_ADDU a0, a0, 4 # clear words
802 bne a0, t0, 2b # unrolling loop does not help
803 sw t1, -4(a0) # since we are limited by memory speed
807 PTR_ADDU t0, a2, a0 # compute ending address
809 PTR_ADDU a0, a0, 1 # clear bytes
830 blt a1, 12, smallclr # small amount to clear?
831 PTR_SUBU a3, zero, a0 # compute # bytes to word align address
833 beq a3, zero, 1f # skip if word aligned
834 PTR_SUBU a1, a1, a3 # subtract from remaining count
835 SWHI zero, 0(a0) # clear 1, 2, or 3 bytes to align
838 and v0, a1, 3 # compute number of words left
841 PTR_ADDU a3, a3, a0 # compute ending address
843 PTR_ADDU a0, a0, 4 # clear words
844 bne a0, a3, 2b # unrolling loop does not help
845 sw zero, -4(a0) # since we are limited by memory speed
848 PTR_ADDU a3, a1, a0 # compute ending address
850 PTR_ADDU a0, a0, 1 # clear bytes
864 blt a2, 16, smallcmp # is it worth any trouble?
865 xor v0, a0, a1 # compare low two bits of addresses
867 PTR_SUBU a3, zero, a1 # compute # bytes to word align address
868 bne v0, zero, unalignedcmp # not possible to align addresses
872 PTR_SUBU a2, a2, a3 # subtract from remaining count
873 move v0, v1 # init v0,v1 so unmodified bytes match
874 LWHI v0, 0(a0) # read 1, 2, or 3 bytes
880 and a3, a2, ~3 # compute number of whole words left
881 PTR_SUBU a2, a2, a3 # which has to be >= (16-3) & ~3
882 PTR_ADDU a3, a3, a0 # compute ending address
884 lw v0, 0(a0) # compare words
891 b smallcmp # finish remainder
895 PTR_SUBU a2, a2, a3 # subtract from remaining count
896 PTR_ADDU a3, a3, a0 # compute ending address
898 lbu v0, 0(a0) # compare bytes until a1 word aligned
906 and a3, a2, ~3 # compute number of whole words left
907 PTR_SUBU a2, a2, a3 # which has to be >= (16-3) & ~3
908 PTR_ADDU a3, a3, a0 # compute ending address
910 LWHI v0, 0(a0) # compare words a0 unaligned, a1 aligned
920 PTR_ADDU a3, a2, a0 # compute ending address
946 and v1, a0, 1 # bit set?
948 beq v1, zero, 1b # no, continue
974 * u_int32_t atomic_cmpset_32(u_int32_t *p, u_int32_t cmpval, u_int32_t newval)
975 * Atomically compare the value stored at p with cmpval
976 * and if the two values are equal, update value *p with
977 * newval. Return zero if compare failed, non-zero otherwise
981 LEAF(atomic_cmpset_32)
994 END(atomic_cmpset_32)
998 * atomic_readandclear_32(u_int32_t *a)
1005 LEAF(atomic_readandclear_32)
1016 END(atomic_readandclear_32)
1020 * atomic_set_32(u_int32_t *a, u_int32_t b)
1039 * atomic_add_32(uint32_t *a, uint32_t b)
1046 srl a0, a0, 2 # round down address to be 32-bit aligned
1060 * atomic_clear_32(u_int32_t *a, u_int32_t b)
1065 LEAF(atomic_clear_32)
1067 srl a0, a0, 2 # round down address to be 32-bit aligned
1072 and t0, t0, a1 # t1 has the new lower 16 bits
1078 END(atomic_clear_32)
1082 * atomic_subtract_32(uint16_t *a, uint16_t b)
1087 LEAF(atomic_subtract_32)
1089 srl a0, a0, 2 # round down address to be 32-bit aligned
1099 END(atomic_subtract_32)
1105 * atomic_set_16(u_int16_t *a, u_int16_t b)
1112 srl a0, a0, 2 # round down address to be 32-bit aligned
1127 * atomic_clear_16(u_int16_t *a, u_int16_t b)
1132 LEAF(atomic_clear_16)
1134 srl a0, a0, 2 # round down address to be 32-bit aligned
1140 andi t1, t1, 0xffff # t1 has the original lower 16 bits
1141 and t1, t1, a1 # t1 has the new lower 16 bits
1142 srl t0, t0, 16 # preserve original top 16 bits
1150 END(atomic_clear_16)
1155 * atomic_subtract_16(uint16_t *a, uint16_t b)
1160 LEAF(atomic_subtract_16)
1162 srl a0, a0, 2 # round down address to be 32-bit aligned
1167 andi t1, t1, 0xffff # t1 has the original lower 16 bits
1169 andi t1, t1, 0xffff # t1 has the new lower 16 bits
1170 srl t0, t0, 16 # preserve original top 16 bits
1178 END(atomic_subtract_16)
1182 * atomic_add_16(uint16_t *a, uint16_t b)
1189 srl a0, a0, 2 # round down address to be 32-bit aligned
1194 andi t1, t1, 0xffff # t1 has the original lower 16 bits
1196 andi t1, t1, 0xffff # t1 has the new lower 16 bits
1197 srl t0, t0, 16 # preserve original top 16 bits
1209 * atomic_add_8(uint8_t *a, uint8_t b)
1216 srl a0, a0, 2 # round down address to be 32-bit aligned
1221 andi t1, t1, 0xff # t1 has the original lower 8 bits
1223 andi t1, t1, 0xff # t1 has the new lower 8 bits
1224 srl t0, t0, 8 # preserve original top 24 bits
1237 * atomic_subtract_8(uint8_t *a, uint8_t b)
1242 LEAF(atomic_subtract_8)
1244 srl a0, a0, 2 # round down address to be 32-bit aligned
1249 andi t1, t1, 0xff # t1 has the original lower 8 bits
1251 andi t1, t1, 0xff # t1 has the new lower 8 bits
1252 srl t0, t0, 8 # preserve original top 24 bits
1260 END(atomic_subtract_8)
1263 * atomic 64-bit register read/write assembly language support routines.
1266 .set noreorder # Noreorder is default style!
1268 #if !defined(__mips_n64) && !defined(__mips_n32)
1270 * I don't know if these routines have the right number of
1271 * NOPs in it for all processors. XXX
1273 * Maybe it would be better to just leave this undefined in that case.
1275 LEAF(atomic_store_64)
1276 mfc0 t1, MIPS_COP_0_STATUS
1277 and t2, t1, ~MIPS_SR_INT_IE
1278 mtc0 t2, MIPS_COP_0_STATUS
1289 mtc0 t1,MIPS_COP_0_STATUS
1296 END(atomic_store_64)
1298 LEAF(atomic_load_64)
1299 mfc0 t1, MIPS_COP_0_STATUS
1300 and t2, t1, ~MIPS_SR_INT_IE
1301 mtc0 t2, MIPS_COP_0_STATUS
1312 mtc0 t1,MIPS_COP_0_STATUS
1322 #if defined(DDB) || defined(DEBUG)
1326 and v0, a0, 3 # unaligned ?
1328 PTR_L t1, PC_CURPCB(t1)
1330 PTR_S v1, U_PCB_ONFAULT(t1)
1334 PTR_S zero, U_PCB_ONFAULT(t1)
1340 PTR_S zero, U_PCB_ONFAULT(t1)
1350 and v0, a0, 3 # unaligned ?
1352 PTR_L t1, PC_CURPCB(t1)
1354 PTR_S v1, U_PCB_ONFAULT(t1)
1358 PTR_S zero, U_PCB_ONFAULT(t1)
1364 PTR_S zero, U_PCB_ONFAULT(t1)
1372 #endif /* DDB || DEBUG */
1376 break MIPS_BREAK_SOVER_VAL
1382 mfc0 v0, MIPS_COP_0_STATUS # Later the "real" spl value!
1383 REG_S s0, (SZREG * PREG_S0)(a0)
1384 REG_S s1, (SZREG * PREG_S1)(a0)
1385 REG_S s2, (SZREG * PREG_S2)(a0)
1386 REG_S s3, (SZREG * PREG_S3)(a0)
1387 REG_S s4, (SZREG * PREG_S4)(a0)
1388 REG_S s5, (SZREG * PREG_S5)(a0)
1389 REG_S s6, (SZREG * PREG_S6)(a0)
1390 REG_S s7, (SZREG * PREG_S7)(a0)
1391 REG_S s8, (SZREG * PREG_S8)(a0)
1392 REG_S sp, (SZREG * PREG_SP)(a0)
1393 REG_S ra, (SZREG * PREG_RA)(a0)
1394 REG_S v0, (SZREG * PREG_SR)(a0)
1396 li v0, 0 # setjmp return
1400 REG_L v0, (SZREG * PREG_SR)(a0)
1401 REG_L ra, (SZREG * PREG_RA)(a0)
1402 REG_L s0, (SZREG * PREG_S0)(a0)
1403 REG_L s1, (SZREG * PREG_S1)(a0)
1404 REG_L s2, (SZREG * PREG_S2)(a0)
1405 REG_L s3, (SZREG * PREG_S3)(a0)
1406 REG_L s4, (SZREG * PREG_S4)(a0)
1407 REG_L s5, (SZREG * PREG_S5)(a0)
1408 REG_L s6, (SZREG * PREG_S6)(a0)
1409 REG_L s7, (SZREG * PREG_S7)(a0)
1410 REG_L s8, (SZREG * PREG_S8)(a0)
1411 REG_L sp, (SZREG * PREG_SP)(a0)
1412 mtc0 v0, MIPS_COP_0_STATUS # Later the "real" spl value!
1415 li v0, 1 # longjmp return
1420 lw t0, PC_CURTHREAD(t0)
1426 /* Define a new md function 'casuptr'. This atomically compares and sets
1427 a pointer that is in user space. It will be used as the basic primitive
1428 for a kernel supported user space lock implementation. */
1430 PTR_LI t0, VM_MAXUSER_ADDRESS /* verify address validity */
1431 blt a0, t0, fusufault /* trap faults */
1435 lw t1, PC_CURTHREAD(t1)
1438 PTR_LA t2, fusufault
1439 PTR_S t2, U_PCB_ONFAULT(t1)
1441 ll v0, 0(a0) /* try to load the old value */
1442 beq v0, a1, 2f /* compare */
1443 move t0, a2 /* setup value to write */
1444 sc t0, 0(a0) /* write if address still locked */
1445 beq t0, zero, 1b /* if it failed, spin */
1447 PTR_S zero, U_PCB_ONFAULT(t1) /* clean up */
1454 * void octeon_enable_shadow(void)
1455 * turns on access to CC and CCRes
1457 LEAF(octeon_enable_shadow)
1459 mtc0 t1, MIPS_COP_0_INFO
1462 END(octeon_enable_shadow)
1465 LEAF(octeon_get_shadow)
1466 mfc0 v0, MIPS_COP_0_INFO
1469 END(octeon_get_shadow)
1472 * octeon_set_control(addr, uint32_t val)
1474 LEAF(octeon_set_control)
1486 END(octeon_set_control)
1489 * octeon_get_control(addr)
1491 LEAF(octeon_get_control)
1494 /* dmfc0 a1, 9, 7 */
1500 END(octeon_get_control)
1507 #if defined(__mips_o32)
1508 mfc0 t0, MIPS_COP_0_STATUS # turn off interrupts
1509 and t1, t0, ~(MIPS_SR_INT_IE)
1510 mtc0 t1, MIPS_COP_0_STATUS
1517 #if _BYTE_ORDER == _BIG_ENDIAN
1519 dsra v1, v1, 32 # low word in v1
1520 dsra v0, v0, 32 # high word in v0
1522 dsra v1, v0, 32 # high word in v1
1524 dsra v0, v0, 32 # low word in v0
1527 mtc0 t0, MIPS_COP_0_STATUS # restore intr status.
1530 #else /* !__mips_o32 */
1532 #endif /* !__mips_o32 */
1543 #if defined(__mips_o32)
1544 mfc0 t0, MIPS_COP_0_STATUS # turn off interrupts
1545 and t1, t0, ~(MIPS_SR_INT_IE)
1546 mtc0 t1, MIPS_COP_0_STATUS
1552 # NOTE: a1 is padding!
1554 #if _BYTE_ORDER == _BIG_ENDIAN
1555 dsll a2, a2, 32 # high word in a2
1556 dsll a3, a3, 32 # low word in a3
1559 dsll a2, a2, 32 # low word in a2
1561 dsll a3, a3, 32 # high word in a3
1566 mtc0 t0, MIPS_COP_0_STATUS # restore intr status.
1569 #else /* !__mips_o32 */
1571 #endif /* !__mips_o32 */