2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * BSDI $Id: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp $
31 * Copyright (c) 2001 Jake Burkholder.
32 * All rights reserved.
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 #include <machine/asm.h>
57 __FBSDID("$FreeBSD$");
59 #include "opt_compat.h"
62 #include <machine/asi.h>
63 #include <machine/asmacros.h>
64 #include <machine/frame.h>
65 #include <machine/fsr.h>
66 #include <machine/intr_machdep.h>
67 #include <machine/ktr.h>
68 #include <machine/pcb.h>
69 #include <machine/pstate.h>
70 #include <machine/trap.h>
71 #include <machine/tsb.h>
72 #include <machine/tstate.h>
73 #include <machine/utrap.h>
74 #include <machine/wstate.h>
78 #define TSB_KERNEL_MASK 0x0
79 #define TSB_KERNEL 0x0
87 * Atomically set the reference bit in a TTE.
89 #define TTE_SET_BIT(r1, r2, r3, bit) \
90 add r1, TTE_DATA, r1 ; \
93 casxa [r1] ASI_N, r2, r3 ; \
98 #define TTE_SET_REF(r1, r2, r3) TTE_SET_BIT(r1, r2, r3, TD_REF)
99 #define TTE_SET_W(r1, r2, r3) TTE_SET_BIT(r1, r2, r3, TD_W)
102 * Macros for spilling and filling live windows.
104 * NOTE: These macros use exactly 16 instructions, and it is assumed that the
105 * handler will not use more than 24 instructions total, to leave room for
106 * resume vectors which occupy the last 8 instructions.
109 #define SPILL(storer, base, size, asi) \
110 storer %l0, [base + (0 * size)] asi ; \
111 storer %l1, [base + (1 * size)] asi ; \
112 storer %l2, [base + (2 * size)] asi ; \
113 storer %l3, [base + (3 * size)] asi ; \
114 storer %l4, [base + (4 * size)] asi ; \
115 storer %l5, [base + (5 * size)] asi ; \
116 storer %l6, [base + (6 * size)] asi ; \
117 storer %l7, [base + (7 * size)] asi ; \
118 storer %i0, [base + (8 * size)] asi ; \
119 storer %i1, [base + (9 * size)] asi ; \
120 storer %i2, [base + (10 * size)] asi ; \
121 storer %i3, [base + (11 * size)] asi ; \
122 storer %i4, [base + (12 * size)] asi ; \
123 storer %i5, [base + (13 * size)] asi ; \
124 storer %i6, [base + (14 * size)] asi ; \
125 storer %i7, [base + (15 * size)] asi
127 #define FILL(loader, base, size, asi) \
128 loader [base + (0 * size)] asi, %l0 ; \
129 loader [base + (1 * size)] asi, %l1 ; \
130 loader [base + (2 * size)] asi, %l2 ; \
131 loader [base + (3 * size)] asi, %l3 ; \
132 loader [base + (4 * size)] asi, %l4 ; \
133 loader [base + (5 * size)] asi, %l5 ; \
134 loader [base + (6 * size)] asi, %l6 ; \
135 loader [base + (7 * size)] asi, %l7 ; \
136 loader [base + (8 * size)] asi, %i0 ; \
137 loader [base + (9 * size)] asi, %i1 ; \
138 loader [base + (10 * size)] asi, %i2 ; \
139 loader [base + (11 * size)] asi, %i3 ; \
140 loader [base + (12 * size)] asi, %i4 ; \
141 loader [base + (13 * size)] asi, %i5 ; \
142 loader [base + (14 * size)] asi, %i6 ; \
143 loader [base + (15 * size)] asi, %i7
145 #define ERRATUM50(reg) mov reg, reg
147 #define KSTACK_SLOP 1024
150 * Sanity check the kernel stack and bail out if it's wrong.
151 * XXX: doesn't handle being on the panic stack.
153 #define KSTACK_CHECK \
155 stx %g1, [ASP_REG + 0] ; \
156 stx %g2, [ASP_REG + 8] ; \
157 add %sp, SPOFF, %g1 ; \
158 andcc %g1, (1 << PTR_SHIFT) - 1, %g0 ; \
159 bnz,a %xcc, tl1_kstack_fault ; \
161 ldx [PCPU(CURTHREAD)], %g2 ; \
162 ldx [%g2 + TD_KSTACK], %g2 ; \
163 add %g2, KSTACK_SLOP, %g2 ; \
164 subcc %g1, %g2, %g1 ; \
165 ble,a %xcc, tl1_kstack_fault ; \
167 set KSTACK_PAGES * PAGE_SIZE, %g2 ; \
169 bgt,a %xcc, tl1_kstack_fault ; \
171 ldx [ASP_REG + 8], %g2 ; \
172 ldx [ASP_REG + 0], %g1 ; \
179 ENTRY(tl1_kstack_fault)
185 #if KTR_COMPILE & KTR_TRAP
186 CATR(KTR_TRAP, "tl1_kstack_fault: tl=%#lx tpc=%#lx tnpc=%#lx"
187 , %g2, %g3, %g4, 7, 8, 9)
189 stx %g3, [%g2 + KTR_PARM1]
191 stx %g3, [%g2 + KTR_PARM1]
193 stx %g3, [%g2 + KTR_PARM1]
203 #if KTR_COMPILE & KTR_TRAP
205 "tl1_kstack_fault: sp=%#lx ks=%#lx cr=%#lx cs=%#lx ow=%#lx ws=%#lx"
206 , %g1, %g2, %g3, 7, 8, 9)
208 stx %g2, [%g1 + KTR_PARM1]
209 ldx [PCPU(CURTHREAD)], %g2
210 ldx [%g2 + TD_KSTACK], %g2
211 stx %g2, [%g1 + KTR_PARM2]
212 rdpr %canrestore, %g2
213 stx %g2, [%g1 + KTR_PARM3]
215 stx %g2, [%g1 + KTR_PARM4]
217 stx %g2, [%g1 + KTR_PARM5]
219 stx %g2, [%g1 + KTR_PARM6]
223 wrpr %g0, 0, %canrestore
224 wrpr %g0, 6, %cansave
225 wrpr %g0, 0, %otherwin
226 wrpr %g0, WSTATE_KERNEL, %wstate
228 sub ASP_REG, SPOFF + CCFSZ, %sp
233 mov T_KSTACK_FAULT | T_KERNEL, %o0
234 END(tl1_kstack_fault)
237 * Magic to resume from a spill or fill trap. If we get an alignment or an
238 * MMU fault during a spill or a fill, this macro will detect the fault and
239 * resume at a set instruction offset in the trap handler.
241 * To check if the previous trap was a spill/fill we convert the trapped pc
242 * to a trap type and verify that it is in the range of spill/fill vectors.
243 * The spill/fill vectors are types 0x80-0xff and 0x280-0x2ff, masking off the
244 * tl bit allows us to detect both ranges with one test.
247 * 0x80 <= (((%tpc - %tba) >> 5) & ~0x200) < 0x100
249 * To calculate the new pc we take advantage of the xor feature of wrpr.
250 * Forcing all the low bits of the trapped pc on we can produce any offset
251 * into the spill/fill vector. The size of a spill/fill trap vector is 0x80.
253 * 0x7f ^ 0x1f == 0x60
254 * 0x1f == (0x80 - 0x60) - 1
256 * Which are the offset and xor value used to resume from alignment faults.
260 * Determine if we have trapped inside of a spill/fill vector, and if so resume
261 * at a fixed instruction offset in the trap vector. Must be called on
264 #define RESUME_SPILLFILL_MAGIC(stxa_g0_sfsr, xor) \
266 stx %g1, [ASP_REG + 0] ; \
267 stx %g2, [ASP_REG + 8] ; \
271 sub %g1, %g2, %g2 ; \
273 andn %g2, 0x200, %g2 ; \
278 or %g1, 0x7f, %g1 ; \
279 wrpr %g1, xor, %tnpc ; \
281 ldx [ASP_REG + 8], %g2 ; \
282 ldx [ASP_REG + 0], %g1 ; \
285 9: ldx [ASP_REG + 8], %g2 ; \
286 ldx [ASP_REG + 0], %g1 ; \
290 * For certain faults we need to clear the SFSR MMU register before returning.
292 #define RSF_CLR_SFSR \
293 wr %g0, ASI_DMMU, %asi ; \
294 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
296 #define RSF_XOR(off) ((0x80 - off) - 1)
299 * Instruction offsets in spill and fill trap handlers for handling certain
300 * nested traps, and corresponding xor constants for wrpr.
302 #define RSF_OFF_ALIGN 0x60
303 #define RSF_OFF_MMU 0x70
305 #define RESUME_SPILLFILL_ALIGN \
306 RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_ALIGN))
307 #define RESUME_SPILLFILL_MMU \
308 RESUME_SPILLFILL_MAGIC(EMPTY, RSF_XOR(RSF_OFF_MMU))
309 #define RESUME_SPILLFILL_MMU_CLR_SFSR \
310 RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_MMU))
313 * Constant to add to %tnpc when taking a fill trap just before returning to
316 #define RSF_FILL_INC tl0_ret_fill_end - tl0_ret_fill
319 * Generate a T_SPILL or T_FILL trap if the window operation fails.
321 #define RSF_TRAP(type) \
322 ba %xcc, tl0_sftrap ; \
327 * Game over if the window operation fails.
329 #define RSF_FATAL(type) \
330 ba %xcc, rsf_fatal ; \
335 * Magic to resume from a failed fill a few instructions after the corrsponding
336 * restore. This is used on return from the kernel to usermode.
338 #define RSF_FILL_MAGIC \
340 add %g1, RSF_FILL_INC, %g1 ; \
341 wrpr %g1, 0, %tnpc ; \
346 * Spill to the pcb if a spill to the user stack in kernel mode fails.
348 #define RSF_SPILL_TOPCB \
349 ba,a %xcc, tl1_spill_topcb ; \
354 #if KTR_COMPILE & KTR_TRAP
355 CATR(KTR_TRAP, "rsf_fatal: bad window trap tt=%#lx type=%#lx"
356 , %g1, %g3, %g4, 7, 8, 9)
358 stx %g3, [%g1 + KTR_PARM1]
359 stx %g2, [%g1 + KTR_PARM2]
368 .comm intrnames, IV_NAMLEN
371 .comm intrcnt, IV_MAX * 8
375 * Trap table and associated macros
377 * Due to its size a trap table is an inherently hard thing to represent in
378 * code in a clean way. There are approximately 1024 vectors, of 8 or 32
379 * instructions each, many of which are identical. The way that this is
380 * layed out is the instructions (8 or 32) for the actual trap vector appear
381 * as an AS macro. In general this code branches to tl0_trap or tl1_trap,
382 * but if not supporting code can be placed just after the definition of the
383 * macro. The macros are then instantiated in a different section (.trap),
384 * which is setup to be placed by the linker at the beginning of .text, and the
385 * code around the macros is moved to the end of trap table. In this way the
386 * code that must be sequential in memory can be split up, and located near
387 * its supporting code so that it is easier to follow.
391 * Clean window traps occur when %cleanwin is zero to ensure that data
392 * is not leaked between address spaces in registers.
412 wrpr %l7, 0, %cleanwin
419 * Stack fixups for entry from user mode. We are still running on the
420 * user stack, and with its live registers, so we must save soon. We
421 * are on alternate globals so we do have some registers. Set the
422 * transitional window state, and do the save. If this traps we
423 * attempt to spill a window to the user stack. If this fails, we
424 * spill the window to the pcb and continue. Spilling to the pcb
427 * NOTE: Must be called with alternate globals and clobbers %g1.
432 wrpr %g1, WSTATE_TRANSITION, %wstate
436 .macro tl0_setup type
445 * Generic trap type. Call trap() with the specified type.
453 * This is used to suck up the massive swaths of reserved trap types.
454 * Generates count "reserved" trap vectors.
456 .macro tl0_reserved count
464 wrpr %g1, WSTATE_NESTED, %wstate
465 save %sp, -(CCFSZ + TF_SIZEOF), %sp
468 .macro tl1_setup type
473 mov \type | T_KERNEL, %o0
481 .macro tl1_reserved count
487 .macro tl0_insn_excptn
488 wrpr %g0, PSTATE_ALT, %pstate
489 wr %g0, ASI_IMMU, %asi
491 ldxa [%g0 + AA_IMMU_SFSR] %asi, %g4
493 * XXX in theory, a store to AA_IMMU_SFSR must be immediately
494 * followed by a DONE, FLUSH or RETRY for USIII. In practice,
495 * this triggers a RED state exception though.
497 stxa %g0, [%g0 + AA_IMMU_SFSR] %asi
499 ba %xcc, tl0_sfsr_trap
500 mov T_INSTRUCTION_EXCEPTION, %g2
504 .macro tl0_data_excptn
505 wrpr %g0, PSTATE_ALT, %pstate
506 wr %g0, ASI_DMMU, %asi
507 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
508 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
509 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
511 ba %xcc, tl0_sfsr_trap
512 mov T_DATA_EXCEPTION, %g2
517 wr %g0, ASI_DMMU, %asi
518 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
519 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
520 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
522 ba %xcc, tl0_sfsr_trap
523 mov T_MEM_ADDRESS_NOT_ALIGNED, %g2
537 .macro tl0_intr level, mask
545 #define INTR(level, traplvl) \
546 tl ## traplvl ## _intr level, 1 << level
548 #define TICK(traplvl) \
549 tl ## traplvl ## _intr PIL_TICK, 0x10001
551 #define INTR_LEVEL(tl) \
568 .macro tl0_intr_level
573 ldxa [%g0] ASI_INTR_RECEIVE, %g1
574 andcc %g1, IRSR_BUSY, %g0
575 bnz,a,pt %xcc, intr_vector
583 * Load the context and the virtual page number from the tag access
584 * register. We ignore the context.
586 wr %g0, ASI_IMMU, %asi
587 ldxa [%g0 + AA_IMMU_TAR] %asi, %g1
590 * Initialize the page size walker.
595 * Loop over all supported page sizes.
599 * Compute the page shift for the page size we are currently looking
604 add %g3, PAGE_SHIFT, %g3
607 * Extract the virtual page number from the contents of the tag
613 * Compute the TTE bucket address.
615 ldxa [%g0 + AA_IMMU_TSB] %asi, %g5
616 and %g3, TSB_BUCKET_MASK, %g4
617 sllx %g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
621 * Compute the TTE tag target.
623 sllx %g3, TV_SIZE_BITS, %g3
627 * Loop over the TTEs in this bucket.
631 * Load the TTE. Note that this instruction may fault, clobbering
632 * the contents of the tag access register, %g5, %g6, and %g7. We
633 * do not use %g5, and %g6 and %g7 are not used until this instruction
634 * completes successfully.
636 2: ldda [%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
639 * Check that it's valid and executable and that the TTE tags match.
642 andcc %g7, TD_EXEC, %g0
649 * We matched a TTE, load the TLB.
653 * Set the reference bit, if it's currently clear.
655 andcc %g7, TD_REF, %g0
656 bz,a,pn %xcc, tl0_immu_miss_set_ref
660 * Load the TTE tag and data into the TLB and retry the instruction.
662 stxa %g1, [%g0 + AA_IMMU_TAR] %asi
663 stxa %g7, [%g0] ASI_ITLB_DATA_IN_REG
667 * Advance to the next TTE in this bucket, and check the low bits
668 * of the bucket pointer to see if we've finished the bucket.
670 3: add %g4, 1 << TTE_SHIFT, %g4
671 andcc %g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
676 * See if we just checked the largest page size, and advance to the
684 * Not in user TSB, call C code.
686 ba,a %xcc, tl0_immu_miss_trap
690 ENTRY(tl0_immu_miss_set_ref)
692 * Set the reference bit.
694 TTE_SET_REF(%g4, %g2, %g3)
697 * May have become invalid during casxa, in which case start over.
703 * Load the TTE tag and data into the TLB and retry the instruction.
705 stxa %g1, [%g0 + AA_IMMU_TAR] %asi
706 stxa %g2, [%g0] ASI_ITLB_DATA_IN_REG
708 END(tl0_immu_miss_set_ref)
710 ENTRY(tl0_immu_miss_trap)
712 * Put back the contents of the tag access register, in case we
715 sethi %hi(KERNBASE), %g2
716 stxa %g1, [%g0 + AA_IMMU_TAR] %asi
720 * Switch to alternate globals.
722 wrpr %g0, PSTATE_ALT, %pstate
725 * Reload the tag access register.
727 ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
730 * Save the tag access register, and call common trap code.
737 mov T_INSTRUCTION_MISS, %o0
738 END(tl0_immu_miss_trap)
742 * Load the context and the virtual page number from the tag access
743 * register. We ignore the context.
745 wr %g0, ASI_DMMU, %asi
746 ldxa [%g0 + AA_DMMU_TAR] %asi, %g1
749 * Initialize the page size walker.
755 * Loop over all supported page sizes.
759 * Compute the page shift for the page size we are currently looking
764 add %g3, PAGE_SHIFT, %g3
767 * Extract the virtual page number from the contents of the tag
773 * Compute the TTE bucket address.
775 ldxa [%g0 + AA_DMMU_TSB] %asi, %g5
776 and %g3, TSB_BUCKET_MASK, %g4
777 sllx %g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
781 * Compute the TTE tag target.
783 sllx %g3, TV_SIZE_BITS, %g3
787 * Loop over the TTEs in this bucket.
791 * Load the TTE. Note that this instruction may fault, clobbering
792 * the contents of the tag access register, %g5, %g6, and %g7. We
793 * do not use %g5, and %g6 and %g7 are not used until this instruction
794 * completes successfully.
796 2: ldda [%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
799 * Check that it's valid and that the virtual page numbers match.
807 * We matched a TTE, load the TLB.
811 * Set the reference bit, if it's currently clear.
813 andcc %g7, TD_REF, %g0
814 bz,a,pn %xcc, tl0_dmmu_miss_set_ref
818 * Load the TTE tag and data into the TLB and retry the instruction.
820 stxa %g1, [%g0 + AA_DMMU_TAR] %asi
821 stxa %g7, [%g0] ASI_DTLB_DATA_IN_REG
825 * Advance to the next TTE in this bucket, and check the low bits
826 * of the bucket pointer to see if we've finished the bucket.
828 3: add %g4, 1 << TTE_SHIFT, %g4
829 andcc %g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
834 * See if we just checked the largest page size, and advance to the
842 * Not in user TSB, call C code.
844 ba,a %xcc, tl0_dmmu_miss_trap
848 ENTRY(tl0_dmmu_miss_set_ref)
850 * Set the reference bit.
852 TTE_SET_REF(%g4, %g2, %g3)
855 * May have become invalid during casxa, in which case start over.
861 * Load the TTE tag and data into the TLB and retry the instruction.
863 stxa %g1, [%g0 + AA_DMMU_TAR] %asi
864 stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG
866 END(tl0_dmmu_miss_set_ref)
868 ENTRY(tl0_dmmu_miss_trap)
870 * Put back the contents of the tag access register, in case we
873 stxa %g1, [%g0 + AA_DMMU_TAR] %asi
877 * Switch to alternate globals.
879 wrpr %g0, PSTATE_ALT, %pstate
882 * Check if we actually came from the kernel.
890 * Reload the tag access register.
892 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
895 * Save the tag access register and call common trap code.
905 * Handle faults during window spill/fill.
907 1: RESUME_SPILLFILL_MMU
910 * Reload the tag access register.
912 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
919 mov T_DATA_MISS | T_KERNEL, %o0
920 END(tl0_dmmu_miss_trap)
923 ba,a %xcc, tl0_dmmu_prot_1
928 ENTRY(tl0_dmmu_prot_1)
930 * Load the context and the virtual page number from the tag access
931 * register. We ignore the context.
933 wr %g0, ASI_DMMU, %asi
934 ldxa [%g0 + AA_DMMU_TAR] %asi, %g1
937 * Initialize the page size walker.
943 * Loop over all supported page sizes.
947 * Compute the page shift for the page size we are currently looking
952 add %g3, PAGE_SHIFT, %g3
955 * Extract the virtual page number from the contents of the tag
961 * Compute the TTE bucket address.
963 ldxa [%g0 + AA_DMMU_TSB] %asi, %g5
964 and %g3, TSB_BUCKET_MASK, %g4
965 sllx %g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
969 * Compute the TTE tag target.
971 sllx %g3, TV_SIZE_BITS, %g3
975 * Loop over the TTEs in this bucket.
979 * Load the TTE. Note that this instruction may fault, clobbering
980 * the contents of the tag access register, %g5, %g6, and %g7. We
981 * do not use %g5, and %g6 and %g7 are not used until this instruction
982 * completes successfully.
984 2: ldda [%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
987 * Check that it's valid and writable and that the virtual page
991 andcc %g7, TD_SW, %g0
998 * Set the hardware write bit.
1000 TTE_SET_W(%g4, %g2, %g3)
1003 * Delete the old TLB entry and clear the SFSR.
1005 srlx %g1, PAGE_SHIFT, %g3
1006 sllx %g3, PAGE_SHIFT, %g3
1007 stxa %g0, [%g3] ASI_DMMU_DEMAP
1008 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1012 * May have become invalid during casxa, in which case start over.
1018 * Load the TTE data into the TLB and retry the instruction.
1020 stxa %g1, [%g0 + AA_DMMU_TAR] %asi
1021 stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG
1025 * Check the low bits to see if we've finished the bucket.
1027 4: add %g4, 1 << TTE_SHIFT, %g4
1028 andcc %g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
1033 * See if we just checked the largest page size, and advance to the
1041 * Not in user TSB, call C code.
1043 ba,a %xcc, tl0_dmmu_prot_trap
1045 END(tl0_dmmu_prot_1)
1047 ENTRY(tl0_dmmu_prot_trap)
1049 * Put back the contents of the tag access register, in case we
1052 stxa %g1, [%g0 + AA_DMMU_TAR] %asi
1056 * Switch to alternate globals.
1058 wrpr %g0, PSTATE_ALT, %pstate
1061 * Check if we actually came from the kernel.
1069 * Load the SFAR, SFSR and TAR.
1071 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
1072 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
1073 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
1074 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1078 * Save the MMU registers and call common trap code.
1087 mov T_DATA_PROTECTION, %o0
1090 * Handle faults during window spill/fill.
1092 1: RESUME_SPILLFILL_MMU_CLR_SFSR
1095 * Load the SFAR, SFSR and TAR. Clear the SFSR.
1097 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
1098 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
1099 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
1100 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1110 mov T_DATA_PROTECTION | T_KERNEL, %o0
1111 END(tl0_dmmu_prot_trap)
1113 .macro tl0_spill_0_n
1114 wr %g0, ASI_AIUP, %asi
1115 SPILL(stxa, %sp + SPOFF, 8, %asi)
1123 .macro tl0_spill_1_n
1124 wr %g0, ASI_AIUP, %asi
1125 SPILL(stwa, %sp, 4, %asi)
1134 wr %g0, ASI_AIUP, %asi
1135 FILL(ldxa, %sp + SPOFF, 8, %asi)
1144 wr %g0, ASI_AIUP, %asi
1145 FILL(lduwa, %sp, 4, %asi)
1155 and %g1, TSTATE_CWP_MASK, %g1
1164 .macro tl0_spill_bad count
1171 .macro tl0_fill_bad count
1187 .macro tl0_fp_restore
1188 ba,a %xcc, tl0_fp_restore
1193 ENTRY(tl0_fp_restore)
1194 ldx [PCB_REG + PCB_FLAGS], %g1
1195 andn %g1, PCB_FEF, %g1
1196 stx %g1, [PCB_REG + PCB_FLAGS]
1198 wr %g0, FPRS_FEF, %fprs
1199 wr %g0, ASI_BLK_S, %asi
1200 ldda [PCB_REG + PCB_UFP + (0 * 64)] %asi, %f0
1201 ldda [PCB_REG + PCB_UFP + (1 * 64)] %asi, %f16
1202 ldda [PCB_REG + PCB_UFP + (2 * 64)] %asi, %f32
1203 ldda [PCB_REG + PCB_UFP + (3 * 64)] %asi, %f48
1208 .macro tl1_insn_excptn
1209 wrpr %g0, PSTATE_ALT, %pstate
1210 wr %g0, ASI_IMMU, %asi
1212 ldxa [%g0 + AA_IMMU_SFSR] %asi, %g4
1214 * XXX in theory, a store to AA_IMMU_SFSR must be immediately
1215 * followed by a DONE, FLUSH or RETRY for USIII. In practice,
1216 * this triggers a RED state exception though.
1218 stxa %g0, [%g0 + AA_IMMU_SFSR] %asi
1220 ba %xcc, tl1_insn_exceptn_trap
1221 mov T_INSTRUCTION_EXCEPTION | T_KERNEL, %g2
1225 ENTRY(tl1_insn_exceptn_trap)
1233 END(tl1_insn_exceptn_trap)
1235 .macro tl1_fp_disabled
1236 ba,a %xcc, tl1_fp_disabled_1
1241 ENTRY(tl1_fp_disabled_1)
1243 set fpu_fault_begin, %g2
1245 cmp %g1, fpu_fault_size
1249 wr %g0, FPRS_FEF, %fprs
1250 wr %g0, ASI_BLK_S, %asi
1251 ldda [PCB_REG + PCB_KFP + (0 * 64)] %asi, %f0
1252 ldda [PCB_REG + PCB_KFP + (1 * 64)] %asi, %f16
1253 ldda [PCB_REG + PCB_KFP + (2 * 64)] %asi, %f32
1254 ldda [PCB_REG + PCB_KFP + (3 * 64)] %asi, %f48
1262 mov T_FP_DISABLED | T_KERNEL, %o0
1263 END(tl1_fp_disabled_1)
1265 .macro tl1_data_excptn
1266 wrpr %g0, PSTATE_ALT, %pstate
1267 ba,a %xcc, tl1_data_excptn_trap
1272 ENTRY(tl1_data_excptn_trap)
1273 RESUME_SPILLFILL_MMU_CLR_SFSR
1274 ba %xcc, tl1_sfsr_trap
1275 mov T_DATA_EXCEPTION | T_KERNEL, %g2
1276 END(tl1_data_excptn_trap)
1279 ba,a %xcc, tl1_align_trap
1284 ENTRY(tl1_align_trap)
1285 RESUME_SPILLFILL_ALIGN
1286 ba %xcc, tl1_sfsr_trap
1287 mov T_MEM_ADDRESS_NOT_ALIGNED | T_KERNEL, %g2
1288 END(tl1_data_excptn_trap)
1290 ENTRY(tl1_sfsr_trap)
1291 wr %g0, ASI_DMMU, %asi
1292 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
1293 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
1294 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1306 .macro tl1_intr level, mask
1314 .macro tl1_intr_level
1318 .macro tl1_immu_miss
1320 * Load the context and the virtual page number from the tag access
1321 * register. We ignore the context.
1323 wr %g0, ASI_IMMU, %asi
1324 ldxa [%g0 + AA_IMMU_TAR] %asi, %g5
1327 * Compute the address of the TTE. The TSB mask and address of the
1328 * TSB are patched at startup.
1330 .globl tl1_immu_miss_patch_1
1331 tl1_immu_miss_patch_1:
1332 sethi %hi(TSB_KERNEL_MASK), %g6
1333 or %g6, %lo(TSB_KERNEL_MASK), %g6
1334 sethi %hi(TSB_KERNEL), %g7
1336 srlx %g5, TAR_VPN_SHIFT, %g5
1338 sllx %g6, TTE_SHIFT, %g6
1344 ldda [%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
1347 * Check that it's valid and executable and that the virtual page
1350 brgez,pn %g7, tl1_immu_miss_trap
1351 andcc %g7, TD_EXEC, %g0
1352 bz,pn %xcc, tl1_immu_miss_trap
1353 srlx %g6, TV_SIZE_BITS, %g6
1355 bne,pn %xcc, tl1_immu_miss_trap
1359 * Set the reference bit if it's currently clear.
1361 andcc %g7, TD_REF, %g0
1362 bz,a,pn %xcc, tl1_immu_miss_set_ref
1366 * Load the TTE data into the TLB and retry the instruction.
1368 stxa %g7, [%g0] ASI_ITLB_DATA_IN_REG
1373 ENTRY(tl1_immu_miss_set_ref)
1375 * Recompute the TTE address, which we clobbered loading the TTE.
1376 * The TSB mask and address of the TSB are patched at startup.
1378 .globl tl1_immu_miss_patch_2
1379 tl1_immu_miss_patch_2:
1380 sethi %hi(TSB_KERNEL_MASK), %g6
1381 or %g6, %lo(TSB_KERNEL_MASK), %g6
1382 sethi %hi(TSB_KERNEL), %g7
1385 sllx %g5, TTE_SHIFT, %g5
1389 * Set the reference bit.
1391 TTE_SET_REF(%g5, %g6, %g7)
1394 * May have become invalid during casxa, in which case start over.
1400 * Load the TTE data into the TLB and retry the instruction.
1402 stxa %g6, [%g0] ASI_ITLB_DATA_IN_REG
1404 END(tl1_immu_miss_set_ref)
1406 ENTRY(tl1_immu_miss_trap)
1408 * Switch to alternate globals.
1410 wrpr %g0, PSTATE_ALT, %pstate
1412 ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
1419 mov T_INSTRUCTION_MISS | T_KERNEL, %o0
1420 END(tl1_immu_miss_trap)
1422 .macro tl1_dmmu_miss
1424 * Load the context and the virtual page number from the tag access
1427 wr %g0, ASI_DMMU, %asi
1428 ldxa [%g0 + AA_DMMU_TAR] %asi, %g5
1431 * Extract the context from the contents of the tag access register.
1432 * If it's non-zero this is a fault on a user address. Note that the
1433 * faulting address is passed in %g1.
1435 sllx %g5, 64 - TAR_VPN_SHIFT, %g6
1436 brnz,a,pn %g6, tl1_dmmu_miss_user
1440 * Check for the direct mapped physical region. These addresses have
1441 * the high bit set so they are negative.
1443 brlz,pn %g5, tl1_dmmu_miss_direct
1447 * Compute the address of the TTE. The TSB mask and address of the
1448 * TSB are patched at startup.
1450 .globl tl1_dmmu_miss_patch_1
1451 tl1_dmmu_miss_patch_1:
1452 sethi %hi(TSB_KERNEL_MASK), %g6
1453 or %g6, %lo(TSB_KERNEL_MASK), %g6
1454 sethi %hi(TSB_KERNEL), %g7
1456 srlx %g5, TAR_VPN_SHIFT, %g5
1458 sllx %g6, TTE_SHIFT, %g6
1464 ldda [%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
1467 * Check that it's valid and that the virtual page numbers match.
1469 brgez,pn %g7, tl1_dmmu_miss_trap
1470 srlx %g6, TV_SIZE_BITS, %g6
1472 bne,pn %xcc, tl1_dmmu_miss_trap
1476 * Set the reference bit if it's currently clear.
1478 andcc %g7, TD_REF, %g0
1479 bz,a,pt %xcc, tl1_dmmu_miss_set_ref
1483 * Load the TTE data into the TLB and retry the instruction.
1485 stxa %g7, [%g0] ASI_DTLB_DATA_IN_REG
1490 ENTRY(tl1_dmmu_miss_set_ref)
1492 * Recompute the TTE address, which we clobbered loading the TTE.
1493 * The TSB mask and address of the TSB are patched at startup.
1495 .globl tl1_dmmu_miss_patch_2
1496 tl1_dmmu_miss_patch_2:
1497 sethi %hi(TSB_KERNEL_MASK), %g6
1498 or %g6, %lo(TSB_KERNEL_MASK), %g6
1499 sethi %hi(TSB_KERNEL), %g7
1502 sllx %g5, TTE_SHIFT, %g5
1506 * Set the reference bit.
1508 TTE_SET_REF(%g5, %g6, %g7)
1511 * May have become invalid during casxa, in which case start over.
1517 * Load the TTE data into the TLB and retry the instruction.
1519 stxa %g6, [%g0] ASI_DTLB_DATA_IN_REG
1521 END(tl1_dmmu_miss_set_ref)
1523 ENTRY(tl1_dmmu_miss_trap)
1525 * Switch to alternate globals.
1527 wrpr %g0, PSTATE_ALT, %pstate
1529 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
1538 mov T_DATA_MISS | T_KERNEL, %o0
1539 END(tl1_dmmu_miss_trap)
1541 ENTRY(tl1_dmmu_miss_direct)
1543 * Mask off the high bits of the virtual address to get the physical
1544 * address, and or in the TTE bits. The virtual address bits that
1545 * correspond to the TTE valid and page size bits are left set, so
1546 * they don't have to be included in the TTE bits below. We know they
1547 * are set because the virtual address is in the upper va hole.
1549 setx TLB_DIRECT_TO_TTE_MASK, %g7, %g6
1551 or %g5, TD_CP | TD_CV | TD_W, %g5
1554 * Load the TTE data into the TLB and retry the instruction.
1556 stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG
1558 END(tl1_dmmu_miss_direct)
1560 .macro tl1_dmmu_prot
1561 ba,a %xcc, tl1_dmmu_prot_1
1566 ENTRY(tl1_dmmu_prot_1)
1568 * Load the context and the virtual page number from the tag access
1571 wr %g0, ASI_DMMU, %asi
1572 ldxa [%g0 + AA_DMMU_TAR] %asi, %g5
1575 * Extract the context from the contents of the tag access register.
1576 * If it's non-zero this is a fault on a user address. Note that the
1577 * faulting address is passed in %g1.
1579 sllx %g5, 64 - TAR_VPN_SHIFT, %g6
1580 brnz,a,pn %g6, tl1_dmmu_prot_user
1584 * Compute the address of the TTE. The TSB mask and address of the
1585 * TSB are patched at startup.
1587 .globl tl1_dmmu_prot_patch_1
1588 tl1_dmmu_prot_patch_1:
1589 sethi %hi(TSB_KERNEL_MASK), %g6
1590 or %g6, %lo(TSB_KERNEL_MASK), %g6
1591 sethi %hi(TSB_KERNEL), %g7
1593 srlx %g5, TAR_VPN_SHIFT, %g5
1595 sllx %g6, TTE_SHIFT, %g6
1601 ldda [%g6] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
1604 * Check that it's valid and writeable and that the virtual page
1607 brgez,pn %g7, tl1_dmmu_prot_trap
1608 andcc %g7, TD_SW, %g0
1609 bz,pn %xcc, tl1_dmmu_prot_trap
1610 srlx %g6, TV_SIZE_BITS, %g6
1612 bne,pn %xcc, tl1_dmmu_prot_trap
1616 * Delete the old TLB entry and clear the SFSR.
1618 sllx %g5, TAR_VPN_SHIFT, %g6
1619 or %g6, TLB_DEMAP_NUCLEUS, %g6
1620 stxa %g0, [%g6] ASI_DMMU_DEMAP
1621 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1625 * Recompute the TTE address, which we clobbered loading the TTE.
1626 * The TSB mask and address of the TSB are patched at startup.
1628 .globl tl1_dmmu_prot_patch_2
1629 tl1_dmmu_prot_patch_2:
1630 sethi %hi(TSB_KERNEL_MASK), %g6
1631 or %g6, %lo(TSB_KERNEL_MASK), %g6
1632 sethi %hi(TSB_KERNEL), %g7
1635 sllx %g5, TTE_SHIFT, %g5
1639 * Set the hardware write bit.
1641 TTE_SET_W(%g5, %g6, %g7)
1644 * May have become invalid during casxa, in which case start over.
1650 * Load the TTE data into the TLB and retry the instruction.
1652 stxa %g6, [%g0] ASI_DTLB_DATA_IN_REG
1654 END(tl1_dmmu_prot_1)
1656 ENTRY(tl1_dmmu_prot_trap)
1658 * Switch to alternate globals.
1660 wrpr %g0, PSTATE_ALT, %pstate
1663 * Load the SFAR, SFSR and TAR. Clear the SFSR.
1665 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
1666 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
1667 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
1668 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1678 mov T_DATA_PROTECTION | T_KERNEL, %o0
1679 END(tl1_dmmu_prot_trap)
1681 .macro tl1_spill_0_n
1682 SPILL(stx, %sp + SPOFF, 8, EMPTY)
1690 .macro tl1_spill_2_n
1691 wr %g0, ASI_AIUP, %asi
1692 SPILL(stxa, %sp + SPOFF, 8, %asi)
1700 .macro tl1_spill_3_n
1701 wr %g0, ASI_AIUP, %asi
1702 SPILL(stwa, %sp, 4, %asi)
1710 .macro tl1_spill_7_n
1712 bnz,a,pn %xcc, tl1_spill_0_n
1715 SPILL(stw, %sp, 4, EMPTY)
1723 .macro tl1_spill_0_o
1724 wr %g0, ASI_AIUP, %asi
1725 SPILL(stxa, %sp + SPOFF, 8, %asi)
1733 .macro tl1_spill_1_o
1734 wr %g0, ASI_AIUP, %asi
1735 SPILL(stwa, %sp, 4, %asi)
1743 .macro tl1_spill_2_o
1749 FILL(ldx, %sp + SPOFF, 8, EMPTY)
1758 wr %g0, ASI_AIUP, %asi
1759 FILL(ldxa, %sp + SPOFF, 8, %asi)
1768 wr %g0, ASI_AIUP, %asi
1769 FILL(lduwa, %sp, 4, %asi)
1779 bnz,a,pt %xcc, tl1_fill_0_n
1782 FILL(lduw, %sp, 4, EMPTY)
1791 * This is used to spill windows that are still occupied with user
1792 * data on kernel entry to the pcb.
1794 ENTRY(tl1_spill_topcb)
1795 wrpr %g0, PSTATE_ALT, %pstate
1797 /* Free some globals for our use. */
1799 stx %g1, [ASP_REG + 0]
1800 stx %g2, [ASP_REG + 8]
1801 stx %g3, [ASP_REG + 16]
1803 ldx [PCB_REG + PCB_NSAVED], %g1
1805 sllx %g1, PTR_SHIFT, %g2
1806 add %g2, PCB_REG, %g2
1807 stx %sp, [%g2 + PCB_RWSP]
1809 sllx %g1, RW_SHIFT, %g2
1810 add %g2, PCB_REG, %g2
1811 SPILL(stx, %g2 + PCB_RW, 8, EMPTY)
1814 stx %g1, [PCB_REG + PCB_NSAVED]
1816 #if KTR_COMPILE & KTR_TRAP
1817 CATR(KTR_TRAP, "tl1_spill_topcb: pc=%#lx npc=%#lx sp=%#lx nsaved=%d"
1818 , %g1, %g2, %g3, 7, 8, 9)
1820 stx %g2, [%g1 + KTR_PARM1]
1822 stx %g2, [%g1 + KTR_PARM2]
1823 stx %sp, [%g1 + KTR_PARM3]
1824 ldx [PCB_REG + PCB_NSAVED], %g2
1825 stx %g2, [%g1 + KTR_PARM4]
1831 ldx [ASP_REG + 16], %g3
1832 ldx [ASP_REG + 8], %g2
1833 ldx [ASP_REG + 0], %g1
1836 END(tl1_spill_topcb)
1838 .macro tl1_spill_bad count
1845 .macro tl1_fill_bad count
1852 .macro tl1_soft count
1854 tl1_gen T_SOFT | T_KERNEL
1859 .globl tl_trap_begin
1867 tl0_reserved 8 ! 0x0-0x7
1869 tl0_insn_excptn ! 0x8
1870 tl0_reserved 1 ! 0x9
1872 tl0_gen T_INSTRUCTION_ERROR ! 0xa
1873 tl0_reserved 5 ! 0xb-0xf
1875 tl0_gen T_ILLEGAL_INSTRUCTION ! 0x10
1877 tl0_gen T_PRIVILEGED_OPCODE ! 0x11
1878 tl0_reserved 14 ! 0x12-0x1f
1880 tl0_gen T_FP_DISABLED ! 0x20
1882 tl0_gen T_FP_EXCEPTION_IEEE_754 ! 0x21
1884 tl0_gen T_FP_EXCEPTION_OTHER ! 0x22
1886 tl0_gen T_TAG_OVERFLOW ! 0x23
1890 tl0_gen T_DIVISION_BY_ZERO ! 0x28
1891 tl0_reserved 7 ! 0x29-0x2f
1893 tl0_data_excptn ! 0x30
1894 tl0_reserved 1 ! 0x31
1896 tl0_gen T_DATA_ERROR ! 0x32
1897 tl0_reserved 1 ! 0x33
1901 tl0_gen T_RESERVED ! 0x35
1903 tl0_gen T_RESERVED ! 0x36
1905 tl0_gen T_PRIVILEGED_ACTION ! 0x37
1906 tl0_reserved 9 ! 0x38-0x40
1908 tl0_intr_level ! 0x41-0x4f
1909 tl0_reserved 16 ! 0x50-0x5f
1913 tl0_gen T_PA_WATCHPOINT ! 0x61
1915 tl0_gen T_VA_WATCHPOINT ! 0x62
1917 tl0_gen T_CORRECTED_ECC_ERROR ! 0x63
1919 tl0_immu_miss ! 0x64
1921 tl0_dmmu_miss ! 0x68
1923 tl0_dmmu_prot ! 0x6c
1924 tl0_reserved 16 ! 0x70-0x7f
1926 tl0_spill_0_n ! 0x80
1928 tl0_spill_1_n ! 0x84
1929 tl0_spill_bad 14 ! 0x88-0xbf
1934 tl0_fill_bad 14 ! 0xc8-0xff
1936 tl0_gen T_SYSCALL ! 0x100
1937 tl0_gen T_BREAKPOINT ! 0x101
1938 tl0_gen T_DIVISION_BY_ZERO ! 0x102
1939 tl0_reserved 1 ! 0x103
1940 tl0_gen T_CLEAN_WINDOW ! 0x104
1941 tl0_gen T_RANGE_CHECK ! 0x105
1942 tl0_gen T_FIX_ALIGNMENT ! 0x106
1943 tl0_gen T_INTEGER_OVERFLOW ! 0x107
1944 tl0_gen T_SYSCALL ! 0x108
1945 tl0_gen T_SYSCALL ! 0x109
1946 tl0_fp_restore ! 0x10a
1947 tl0_reserved 5 ! 0x10b-0x10f
1948 tl0_gen T_TRAP_INSTRUCTION_16 ! 0x110
1949 tl0_gen T_TRAP_INSTRUCTION_17 ! 0x111
1950 tl0_gen T_TRAP_INSTRUCTION_18 ! 0x112
1951 tl0_gen T_TRAP_INSTRUCTION_19 ! 0x113
1952 tl0_gen T_TRAP_INSTRUCTION_20 ! 0x114
1953 tl0_gen T_TRAP_INSTRUCTION_21 ! 0x115
1954 tl0_gen T_TRAP_INSTRUCTION_22 ! 0x116
1955 tl0_gen T_TRAP_INSTRUCTION_23 ! 0x117
1956 tl0_gen T_TRAP_INSTRUCTION_24 ! 0x118
1957 tl0_gen T_TRAP_INSTRUCTION_25 ! 0x119
1958 tl0_gen T_TRAP_INSTRUCTION_26 ! 0x11a
1959 tl0_gen T_TRAP_INSTRUCTION_27 ! 0x11b
1960 tl0_gen T_TRAP_INSTRUCTION_28 ! 0x11c
1961 tl0_gen T_TRAP_INSTRUCTION_29 ! 0x11d
1962 tl0_gen T_TRAP_INSTRUCTION_30 ! 0x11e
1963 tl0_gen T_TRAP_INSTRUCTION_31 ! 0x11f
1964 tl0_reserved 32 ! 0x120-0x13f
1965 tl0_gen T_SYSCALL ! 0x140
1967 tl0_gen T_SYSCALL ! 0x142
1968 tl0_gen T_SYSCALL ! 0x143
1969 tl0_reserved 188 ! 0x144-0x1ff
1972 tl1_reserved 8 ! 0x200-0x207
1974 tl1_insn_excptn ! 0x208
1975 tl1_reserved 1 ! 0x209
1977 tl1_gen T_INSTRUCTION_ERROR ! 0x20a
1978 tl1_reserved 5 ! 0x20b-0x20f
1980 tl1_gen T_ILLEGAL_INSTRUCTION ! 0x210
1982 tl1_gen T_PRIVILEGED_OPCODE ! 0x211
1983 tl1_reserved 14 ! 0x212-0x21f
1985 tl1_fp_disabled ! 0x220
1987 tl1_gen T_FP_EXCEPTION_IEEE_754 ! 0x221
1989 tl1_gen T_FP_EXCEPTION_OTHER ! 0x222
1991 tl1_gen T_TAG_OVERFLOW ! 0x223
1993 clean_window ! 0x224
1995 tl1_gen T_DIVISION_BY_ZERO ! 0x228
1996 tl1_reserved 7 ! 0x229-0x22f
1998 tl1_data_excptn ! 0x230
1999 tl1_reserved 1 ! 0x231
2001 tl1_gen T_DATA_ERROR ! 0x232
2002 tl1_reserved 1 ! 0x233
2006 tl1_gen T_RESERVED ! 0x235
2008 tl1_gen T_RESERVED ! 0x236
2010 tl1_gen T_PRIVILEGED_ACTION ! 0x237
2011 tl1_reserved 9 ! 0x238-0x240
2013 tl1_intr_level ! 0x241-0x24f
2014 tl1_reserved 16 ! 0x250-0x25f
2018 tl1_gen T_PA_WATCHPOINT ! 0x261
2020 tl1_gen T_VA_WATCHPOINT ! 0x262
2022 tl1_gen T_CORRECTED_ECC_ERROR ! 0x263
2024 tl1_immu_miss ! 0x264
2026 tl1_dmmu_miss ! 0x268
2028 tl1_dmmu_prot ! 0x26c
2029 tl1_reserved 16 ! 0x270-0x27f
2031 tl1_spill_0_n ! 0x280
2032 tl1_spill_bad 1 ! 0x284
2034 tl1_spill_2_n ! 0x288
2036 tl1_spill_3_n ! 0x28c
2037 tl1_spill_bad 3 ! 0x290-0x29b
2039 tl1_spill_7_n ! 0x29c
2041 tl1_spill_0_o ! 0x2a0
2043 tl1_spill_1_o ! 0x2a4
2045 tl1_spill_2_o ! 0x2a8
2046 tl1_spill_bad 5 ! 0x2ac-0x2bf
2048 tl1_fill_0_n ! 0x2c0
2049 tl1_fill_bad 1 ! 0x2c4
2051 tl1_fill_2_n ! 0x2c8
2053 tl1_fill_3_n ! 0x2cc
2054 tl1_fill_bad 3 ! 0x2d0-0x2db
2056 tl1_fill_7_n ! 0x2dc
2057 tl1_fill_bad 8 ! 0x2e0-0x2ff
2058 tl1_reserved 1 ! 0x300
2060 tl1_gen T_BREAKPOINT ! 0x301
2061 tl1_gen T_RSTRWP_PHYS ! 0x302
2062 tl1_gen T_RSTRWP_VIRT ! 0x303
2063 tl1_reserved 252 ! 0x304-0x3ff
2070 * User trap entry point
2072 * void tl0_utrap(u_long type, u_long o1, u_long o2, u_long tar, u_long sfar,
2075 * This handles redirecting a trap back to usermode as a user trap. The user
2076 * program must have first registered a trap handler with the kernel using
2077 * sysarch(SPARC_UTRAP_INSTALL). The trap handler is passed enough state
2078 * for it to return to the trapping code directly, it will not return through
2079 * the kernel. The trap type is passed in %o0, all out registers must be
2080 * passed through to tl0_trap or to usermode untouched. Note that the
2081 * parameters passed in out registers may be used by the user trap handler.
2082 * Do not change the registers they are passed in or you will break the ABI.
2084 * If the trap type allows user traps, setup state to execute the user trap
2085 * handler and bounce back to usermode, otherwise branch to tl0_trap.
2089 * Check if the trap type allows user traps.
2092 bge,a,pt %xcc, tl0_trap
2096 * Load the user trap handler from the utrap table.
2098 ldx [PCPU(CURTHREAD)], %l0
2099 ldx [%l0 + TD_PROC], %l0
2100 ldx [%l0 + P_MD + MD_UTRAP], %l0
2101 brz,pt %l0, tl0_trap
2102 sllx %o0, PTR_SHIFT, %l1
2103 ldx [%l0 + %l1], %l0
2104 brz,a,pt %l0, tl0_trap
2108 * If the save we did on entry to the kernel had to spill a window
2109 * to the pcb, pretend we took a spill trap instead. Any windows
2110 * that are in the pcb must be copied out or the fill handler will
2111 * not be able to find them, since the user trap handler returns
2112 * directly to the trapping code. Note that we only support precise
2113 * user traps, which implies that the condition that caused the trap
2114 * in the first place is still valid, so it will occur again when we
2115 * re-execute the trapping instruction.
2117 ldx [PCB_REG + PCB_NSAVED], %l1
2118 brnz,a,pn %l1, tl0_trap
2122 * Pass %fsr in %l4, %tstate in %l5, %tpc in %l6 and %tnpc in %l7.
2123 * The ABI specifies only %l6 and %l7, but we need to pass %fsr or
2124 * it may be clobbered by an interrupt before the user trap code
2125 * can read it, and we must pass %tstate in order to restore %ccr
2126 * and %asi. The %fsr must be stored to memory, so we use the
2127 * temporary stack for that.
2130 or %l1, FPRS_FEF, %l2
2143 * Setup %tnpc to return to.
2148 * Setup %wstate for return, clear WSTATE_TRANSITION.
2151 and %l1, WSTATE_NORMAL_MASK, %l1
2152 wrpr %l1, 0, %wstate
2155 * Setup %tstate for return, change the saved cwp to point to the
2156 * current window instead of the window at the time of the trap.
2158 andn %l5, TSTATE_CWP_MASK, %l1
2160 wrpr %l1, %l2, %tstate
2163 * Setup %sp. Userland processes will crash if this is not setup.
2168 * Execute the user trap handler.
2174 * (Real) User trap entry point
2176 * void tl0_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfsr,
2179 * The following setup has been performed:
2180 * - the windows have been split and the active user window has been saved
2181 * (maybe just to the pcb)
2182 * - we are on alternate globals and interrupts are disabled
2184 * We switch to the kernel stack, build a trapframe, switch to normal
2185 * globals, enable interrupts and call trap.
2187 * NOTE: We must be very careful setting up the per-cpu pointer. We know that
2188 * it has been pre-set in alternate globals, so we read it from there and setup
2189 * the normal %g7 *before* enabling interrupts. This avoids any possibility
2190 * of cpu migration and using the wrong pcpup.
2194 * Force kernel store order.
2196 wrpr %g0, PSTATE_ALT, %pstate
2205 #if KTR_COMPILE & KTR_TRAP
2207 "tl0_trap: td=%p type=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2208 , %g1, %g2, %g3, 7, 8, 9)
2209 ldx [PCPU(CURTHREAD)], %g2
2210 stx %g2, [%g1 + KTR_PARM1]
2211 stx %o0, [%g1 + KTR_PARM2]
2213 stx %g2, [%g1 + KTR_PARM3]
2214 stx %l1, [%g1 + KTR_PARM4]
2215 stx %l2, [%g1 + KTR_PARM5]
2216 stx %i6, [%g1 + KTR_PARM6]
2220 1: and %l5, WSTATE_NORMAL_MASK, %l5
2221 sllx %l5, WSTATE_OTHER_SHIFT, %l5
2222 wrpr %l5, WSTATE_KERNEL, %wstate
2223 rdpr %canrestore, %l6
2224 wrpr %l6, 0, %otherwin
2225 wrpr %g0, 0, %canrestore
2227 sub PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
2229 stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2230 stx %o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2231 stx %o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2232 stx %o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2233 stx %o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2235 stx %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2236 stx %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2237 stx %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2238 stx %l3, [%sp + SPOFF + CCFSZ + TF_Y]
2239 stx %l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2240 stx %l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
2242 wr %g0, FPRS_FEF, %fprs
2243 stx %fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2245 stx %l6, [%sp + SPOFF + CCFSZ + TF_GSR]
2250 wrpr %g0, PSTATE_NORMAL, %pstate
2252 stx %g6, [%sp + SPOFF + CCFSZ + TF_G6]
2253 stx %g7, [%sp + SPOFF + CCFSZ + TF_G7]
2257 wrpr %g0, PSTATE_KERNEL, %pstate
2259 stx %i0, [%sp + SPOFF + CCFSZ + TF_O0]
2260 stx %i1, [%sp + SPOFF + CCFSZ + TF_O1]
2261 stx %i2, [%sp + SPOFF + CCFSZ + TF_O2]
2262 stx %i3, [%sp + SPOFF + CCFSZ + TF_O3]
2263 stx %i4, [%sp + SPOFF + CCFSZ + TF_O4]
2264 stx %i5, [%sp + SPOFF + CCFSZ + TF_O5]
2265 stx %i6, [%sp + SPOFF + CCFSZ + TF_O6]
2266 stx %i7, [%sp + SPOFF + CCFSZ + TF_O7]
2268 stx %g1, [%sp + SPOFF + CCFSZ + TF_G1]
2269 stx %g2, [%sp + SPOFF + CCFSZ + TF_G2]
2270 stx %g3, [%sp + SPOFF + CCFSZ + TF_G3]
2271 stx %g4, [%sp + SPOFF + CCFSZ + TF_G4]
2272 stx %g5, [%sp + SPOFF + CCFSZ + TF_G5]
2274 set tl0_ret - 8, %o7
2276 add %sp, CCFSZ + SPOFF, %o0
2280 * void tl0_intr(u_int level, u_int mask)
2284 * Force kernel store order.
2286 wrpr %g0, PSTATE_ALT, %pstate
2295 #if KTR_COMPILE & KTR_INTR
2297 "tl0_intr: td=%p level=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2298 , %g1, %g2, %g3, 7, 8, 9)
2299 ldx [PCPU(CURTHREAD)], %g2
2300 stx %g2, [%g1 + KTR_PARM1]
2301 stx %o0, [%g1 + KTR_PARM2]
2303 stx %g2, [%g1 + KTR_PARM3]
2304 stx %l1, [%g1 + KTR_PARM4]
2305 stx %l2, [%g1 + KTR_PARM5]
2306 stx %i6, [%g1 + KTR_PARM6]
2311 wr %o1, 0, %clear_softint
2313 and %l5, WSTATE_NORMAL_MASK, %l5
2314 sllx %l5, WSTATE_OTHER_SHIFT, %l5
2315 wrpr %l5, WSTATE_KERNEL, %wstate
2316 rdpr %canrestore, %l6
2317 wrpr %l6, 0, %otherwin
2318 wrpr %g0, 0, %canrestore
2320 sub PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
2322 stx %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2323 stx %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2324 stx %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2325 stx %l3, [%sp + SPOFF + CCFSZ + TF_Y]
2326 stx %l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2327 stx %l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
2329 wr %g0, FPRS_FEF, %fprs
2330 stx %fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2332 stx %l6, [%sp + SPOFF + CCFSZ + TF_GSR]
2336 mov T_INTERRUPT, %o1
2338 stx %o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2339 stx %o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
2343 wrpr %g0, PSTATE_NORMAL, %pstate
2345 stx %g1, [%sp + SPOFF + CCFSZ + TF_G1]
2346 stx %g2, [%sp + SPOFF + CCFSZ + TF_G2]
2347 stx %g3, [%sp + SPOFF + CCFSZ + TF_G3]
2348 stx %g4, [%sp + SPOFF + CCFSZ + TF_G4]
2349 stx %g5, [%sp + SPOFF + CCFSZ + TF_G5]
2350 stx %g6, [%sp + SPOFF + CCFSZ + TF_G6]
2351 stx %g7, [%sp + SPOFF + CCFSZ + TF_G7]
2355 wrpr %g0, PSTATE_KERNEL, %pstate
2357 stx %i0, [%sp + SPOFF + CCFSZ + TF_O0]
2358 stx %i1, [%sp + SPOFF + CCFSZ + TF_O1]
2359 stx %i2, [%sp + SPOFF + CCFSZ + TF_O2]
2360 stx %i3, [%sp + SPOFF + CCFSZ + TF_O3]
2361 stx %i4, [%sp + SPOFF + CCFSZ + TF_O4]
2362 stx %i5, [%sp + SPOFF + CCFSZ + TF_O5]
2363 stx %i6, [%sp + SPOFF + CCFSZ + TF_O6]
2364 stx %i7, [%sp + SPOFF + CCFSZ + TF_O7]
2366 SET(intr_handlers, %l1, %l0)
2367 sllx %l3, IH_SHIFT, %l1
2368 ldx [%l0 + %l1], %l1
2369 KASSERT(%l1, "tl0_intr: ih null")
2371 add %sp, CCFSZ + SPOFF, %o0
2373 /* %l3 contains PIL */
2374 SET(intrcnt, %l1, %l2)
2375 prefetcha [%l2] ASI_N, 1
2376 SET(pil_countp, %l1, %l0)
2378 lduh [%l0 + %l1], %l0
2385 lduw [PCPU(CNT) + V_INTR], %l0
2387 stw %l0, [PCPU(CNT) + V_INTR]
2394 * Initiate return to usermode.
2396 * Called with a trapframe on the stack. The window that was setup in
2397 * tl0_trap may have been used by "fast" trap handlers that pretend to be
2398 * leaf functions, so all ins and locals may have been clobbered since
2401 * This code is rather long and complicated.
2405 * Check for pending asts atomically with returning. We must raise
2406 * the PIL before checking, and if no asts are found the PIL must
2407 * remain raised until the retry is executed, or we risk missing asts
2408 * caused by interrupts occuring after the test. If the PIL is
2409 * lowered, as it is when we call ast, the check must be re-executed.
2411 wrpr %g0, PIL_TICK, %pil
2412 ldx [PCPU(CURTHREAD)], %l0
2413 lduw [%l0 + TD_FLAGS], %l1
2414 set TDF_ASTPENDING | TDF_NEEDRESCHED, %l2
2420 * We have an AST. Re-enable interrupts and handle it, then restart
2421 * the return sequence.
2425 add %sp, CCFSZ + SPOFF, %o0
2430 * Check for windows that were spilled to the pcb and need to be
2431 * copied out. This must be the last thing that is done before the
2432 * return to usermode. If there are still user windows in the cpu
2433 * and we call a nested function after this, which causes them to be
2434 * spilled to the pcb, they will not be copied out and the stack will
2437 1: ldx [PCB_REG + PCB_NSAVED], %l1
2442 stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2444 add %sp, SPOFF + CCFSZ, %o0
2449 * Restore the out and most global registers from the trapframe.
2450 * The ins will become the outs when we restore below.
2452 2: ldx [%sp + SPOFF + CCFSZ + TF_O0], %i0
2453 ldx [%sp + SPOFF + CCFSZ + TF_O1], %i1
2454 ldx [%sp + SPOFF + CCFSZ + TF_O2], %i2
2455 ldx [%sp + SPOFF + CCFSZ + TF_O3], %i3
2456 ldx [%sp + SPOFF + CCFSZ + TF_O4], %i4
2457 ldx [%sp + SPOFF + CCFSZ + TF_O5], %i5
2458 ldx [%sp + SPOFF + CCFSZ + TF_O6], %i6
2459 ldx [%sp + SPOFF + CCFSZ + TF_O7], %i7
2461 ldx [%sp + SPOFF + CCFSZ + TF_G1], %g1
2462 ldx [%sp + SPOFF + CCFSZ + TF_G2], %g2
2463 ldx [%sp + SPOFF + CCFSZ + TF_G3], %g3
2464 ldx [%sp + SPOFF + CCFSZ + TF_G4], %g4
2465 ldx [%sp + SPOFF + CCFSZ + TF_G5], %g5
2468 * Load everything we need to restore below before disabling
2471 ldx [%sp + SPOFF + CCFSZ + TF_FPRS], %l0
2472 ldx [%sp + SPOFF + CCFSZ + TF_GSR], %l1
2473 ldx [%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2474 ldx [%sp + SPOFF + CCFSZ + TF_TPC], %l3
2475 ldx [%sp + SPOFF + CCFSZ + TF_TSTATE], %l4
2476 ldx [%sp + SPOFF + CCFSZ + TF_Y], %l5
2477 ldx [%sp + SPOFF + CCFSZ + TF_WSTATE], %l6
2480 * Disable interrupts to restore the special globals. They are not
2481 * saved and restored for all kernel traps, so an interrupt at the
2482 * wrong time would clobber them.
2484 wrpr %g0, PSTATE_NORMAL, %pstate
2486 ldx [%sp + SPOFF + CCFSZ + TF_G6], %g6
2487 ldx [%sp + SPOFF + CCFSZ + TF_G7], %g7
2490 * Switch to alternate globals. This frees up some registers we
2491 * can use after the restore changes our window.
2493 wrpr %g0, PSTATE_ALT, %pstate
2496 * Drop %pil to zero. It must have been zero at the time of the
2497 * trap, since we were in usermode, but it was raised above in
2498 * order to check for asts atomically. We have interrupts disabled
2499 * so any interrupts will not be serviced until we complete the
2500 * return to usermode.
2505 * Save %fprs in an alternate global so it can be restored after the
2506 * restore instruction below. If we restore it before the restore,
2507 * and the restore traps we may run for a while with floating point
2508 * enabled in the kernel, which we want to avoid.
2513 * Restore %fsr and %gsr. These need floating point enabled in %fprs,
2514 * so we set it temporarily and then clear it.
2516 wr %g0, FPRS_FEF, %fprs
2517 ldx [%sp + SPOFF + CCFSZ + TF_FSR], %fsr
2522 * Restore program counters. This could be done after the restore
2523 * but we're out of alternate globals to store them in...
2529 * Save %tstate in an alternate global and clear the %cwp field. %cwp
2530 * will be affected by the restore below and we need to make sure it
2531 * points to the current window at that time, not the window that was
2532 * active at the time of the trap.
2534 andn %l4, TSTATE_CWP_MASK, %g2
2537 * Restore %y. Could also be below if we had more alternate globals.
2542 * Setup %wstate for return. We need to restore the user window state
2543 * which we saved in wstate.other when we trapped. We also need to
2544 * set the transition bit so the restore will be handled specially
2545 * if it traps, use the xor feature of wrpr to do that.
2547 srlx %l6, WSTATE_OTHER_SHIFT, %g3
2548 wrpr %g3, WSTATE_TRANSITION, %wstate
2551 * Setup window management registers for return. If not all user
2552 * windows were spilled in the kernel %otherwin will be non-zero,
2553 * so we need to transfer it to %canrestore to correctly restore
2554 * those windows. Otherwise everything gets set to zero and the
2555 * restore below will fill a window directly from the user stack.
2558 wrpr %o0, 0, %canrestore
2559 wrpr %g0, 0, %otherwin
2560 wrpr %o0, 0, %cleanwin
2563 * Now do the restore. If this instruction causes a fill trap which
2564 * fails to fill a window from the user stack, we will resume at
2565 * tl0_ret_fill_end and call back into the kernel.
2571 * We made it. We're back in the window that was active at the time
2572 * of the trap, and ready to return to usermode.
2576 * Restore %frps. This was saved in an alternate global above.
2581 * Fixup %tstate so the saved %cwp points to the current window and
2585 wrpr %g2, %g4, %tstate
2588 * Restore the user window state. The transition bit was set above
2589 * for special handling of the restore, this clears it.
2591 wrpr %g3, 0, %wstate
2593 #if KTR_COMPILE & KTR_TRAP
2594 CATR(KTR_TRAP, "tl0_ret: td=%#lx pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2595 , %g2, %g3, %g4, 7, 8, 9)
2596 ldx [PCPU(CURTHREAD)], %g3
2597 stx %g3, [%g2 + KTR_PARM1]
2599 stx %g3, [%g2 + KTR_PARM2]
2601 stx %g3, [%g2 + KTR_PARM3]
2603 stx %g3, [%g2 + KTR_PARM4]
2604 stx %sp, [%g2 + KTR_PARM5]
2609 * Return to usermode.
2614 #if KTR_COMPILE & KTR_TRAP
2615 CATR(KTR_TRAP, "tl0_ret: fill magic ps=%#lx ws=%#lx sp=%#lx"
2616 , %l0, %l1, %l2, 7, 8, 9)
2618 stx %l1, [%l0 + KTR_PARM1]
2619 stx %l5, [%l0 + KTR_PARM2]
2620 stx %sp, [%l0 + KTR_PARM3]
2625 * The restore above caused a fill trap and the fill handler was
2626 * unable to fill a window from the user stack. The special fill
2627 * handler recognized this and punted, sending us here. We need
2628 * to carefully undo any state that was restored before the restore
2629 * was executed and call trap again. Trap will copyin a window
2630 * from the user stack which will fault in the page we need so the
2631 * restore above will succeed when we try again. If this fails
2632 * the process has trashed its stack, so we kill it.
2636 * Restore the kernel window state. This was saved in %l6 above, and
2637 * since the restore failed we're back in the same window.
2639 wrpr %l6, 0, %wstate
2642 * Restore the normal globals which have predefined values in the
2643 * kernel. We clobbered them above restoring the user's globals
2644 * so this is very important.
2645 * XXX PSTATE_ALT must already be set.
2647 wrpr %g0, PSTATE_ALT, %pstate
2650 wrpr %g0, PSTATE_NORMAL, %pstate
2653 wrpr %g0, PSTATE_KERNEL, %pstate
2656 * Simulate a fill trap and then start the whole return sequence over
2657 * again. This is special because it only copies in 1 window, not 2
2658 * as we would for a normal failed fill. This may be the first time
2659 * the process has been run, so there may not be 2 windows worth of
2663 stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2665 add %sp, SPOFF + CCFSZ, %o0
2671 * Kernel trap entry point
2673 * void tl1_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
2676 * This is easy because the stack is already setup and the windows don't need
2677 * to be split. We build a trapframe and call trap(), the same as above, but
2678 * the outs don't need to be saved.
2688 #if KTR_COMPILE & KTR_TRAP
2689 CATR(KTR_TRAP, "tl1_trap: td=%p type=%#lx pil=%#lx pc=%#lx sp=%#lx"
2690 , %g1, %g2, %g3, 7, 8, 9)
2691 ldx [PCPU(CURTHREAD)], %g2
2692 stx %g2, [%g1 + KTR_PARM1]
2693 stx %o0, [%g1 + KTR_PARM2]
2694 stx %l3, [%g1 + KTR_PARM3]
2695 stx %l1, [%g1 + KTR_PARM4]
2696 stx %i6, [%g1 + KTR_PARM5]
2702 and %l5, WSTATE_OTHER_MASK, %l5
2703 wrpr %l5, WSTATE_KERNEL, %wstate
2705 stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2706 stx %o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2707 stx %o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2708 stx %o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2709 stx %o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2711 stx %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2712 stx %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2713 stx %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2714 stx %l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2715 stx %l4, [%sp + SPOFF + CCFSZ + TF_Y]
2719 wrpr %g0, PSTATE_NORMAL, %pstate
2721 stx %g6, [%sp + SPOFF + CCFSZ + TF_G6]
2722 stx %g7, [%sp + SPOFF + CCFSZ + TF_G7]
2726 wrpr %g0, PSTATE_KERNEL, %pstate
2728 stx %i0, [%sp + SPOFF + CCFSZ + TF_O0]
2729 stx %i1, [%sp + SPOFF + CCFSZ + TF_O1]
2730 stx %i2, [%sp + SPOFF + CCFSZ + TF_O2]
2731 stx %i3, [%sp + SPOFF + CCFSZ + TF_O3]
2732 stx %i4, [%sp + SPOFF + CCFSZ + TF_O4]
2733 stx %i5, [%sp + SPOFF + CCFSZ + TF_O5]
2734 stx %i6, [%sp + SPOFF + CCFSZ + TF_O6]
2735 stx %i7, [%sp + SPOFF + CCFSZ + TF_O7]
2737 stx %g1, [%sp + SPOFF + CCFSZ + TF_G1]
2738 stx %g2, [%sp + SPOFF + CCFSZ + TF_G2]
2739 stx %g3, [%sp + SPOFF + CCFSZ + TF_G3]
2740 stx %g4, [%sp + SPOFF + CCFSZ + TF_G4]
2741 stx %g5, [%sp + SPOFF + CCFSZ + TF_G5]
2743 set tl1_ret - 8, %o7
2745 add %sp, CCFSZ + SPOFF, %o0
2749 ldx [%sp + SPOFF + CCFSZ + TF_O0], %i0
2750 ldx [%sp + SPOFF + CCFSZ + TF_O1], %i1
2751 ldx [%sp + SPOFF + CCFSZ + TF_O2], %i2
2752 ldx [%sp + SPOFF + CCFSZ + TF_O3], %i3
2753 ldx [%sp + SPOFF + CCFSZ + TF_O4], %i4
2754 ldx [%sp + SPOFF + CCFSZ + TF_O5], %i5
2755 ldx [%sp + SPOFF + CCFSZ + TF_O6], %i6
2756 ldx [%sp + SPOFF + CCFSZ + TF_O7], %i7
2758 ldx [%sp + SPOFF + CCFSZ + TF_G1], %g1
2759 ldx [%sp + SPOFF + CCFSZ + TF_G2], %g2
2760 ldx [%sp + SPOFF + CCFSZ + TF_G3], %g3
2761 ldx [%sp + SPOFF + CCFSZ + TF_G4], %g4
2762 ldx [%sp + SPOFF + CCFSZ + TF_G5], %g5
2764 ldx [%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
2765 ldx [%sp + SPOFF + CCFSZ + TF_TPC], %l1
2766 ldx [%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2767 ldx [%sp + SPOFF + CCFSZ + TF_PIL], %l3
2768 ldx [%sp + SPOFF + CCFSZ + TF_Y], %l4
2770 set VM_MIN_PROM_ADDRESS, %l5
2774 set VM_MAX_PROM_ADDRESS, %l5
2779 wrpr %g0, PSTATE_NORMAL, %pstate
2781 ldx [%sp + SPOFF + CCFSZ + TF_G6], %g6
2782 ldx [%sp + SPOFF + CCFSZ + TF_G7], %g7
2784 1: wrpr %g0, PSTATE_ALT, %pstate
2786 andn %l0, TSTATE_CWP_MASK, %g1
2798 wrpr %g1, %g4, %tstate
2802 #if KTR_COMPILE & KTR_TRAP
2803 CATR(KTR_TRAP, "tl1_ret: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
2804 , %g2, %g3, %g4, 7, 8, 9)
2805 ldx [PCPU(CURTHREAD)], %g3
2806 stx %g3, [%g2 + KTR_PARM1]
2808 stx %g3, [%g2 + KTR_PARM2]
2810 stx %g3, [%g2 + KTR_PARM3]
2812 stx %g3, [%g2 + KTR_PARM4]
2813 stx %sp, [%g2 + KTR_PARM5]
2821 * void tl1_intr(u_int level, u_int mask)
2831 #if KTR_COMPILE & KTR_INTR
2833 "tl1_intr: td=%p level=%#x pil=%#lx pc=%#lx sp=%#lx"
2834 , %g1, %g2, %g3, 7, 8, 9)
2835 ldx [PCPU(CURTHREAD)], %g2
2836 stx %g2, [%g1 + KTR_PARM1]
2837 stx %o0, [%g1 + KTR_PARM2]
2838 stx %l3, [%g1 + KTR_PARM3]
2839 stx %l1, [%g1 + KTR_PARM4]
2840 stx %i6, [%g1 + KTR_PARM5]
2845 wr %o1, 0, %clear_softint
2849 and %l5, WSTATE_OTHER_MASK, %l5
2850 wrpr %l5, WSTATE_KERNEL, %wstate
2852 stx %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2853 stx %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2854 stx %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2855 stx %l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2856 stx %l4, [%sp + SPOFF + CCFSZ + TF_Y]
2859 mov T_INTERRUPT | T_KERNEL, %o1
2861 stx %o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2862 stx %o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
2864 stx %i6, [%sp + SPOFF + CCFSZ + TF_O6]
2865 stx %i7, [%sp + SPOFF + CCFSZ + TF_O7]
2869 wrpr %g0, PSTATE_NORMAL, %pstate
2871 stx %g1, [%sp + SPOFF + CCFSZ + TF_G1]
2872 stx %g2, [%sp + SPOFF + CCFSZ + TF_G2]
2873 stx %g3, [%sp + SPOFF + CCFSZ + TF_G3]
2874 stx %g4, [%sp + SPOFF + CCFSZ + TF_G4]
2875 stx %g5, [%sp + SPOFF + CCFSZ + TF_G5]
2879 wrpr %g0, PSTATE_KERNEL, %pstate
2881 SET(intr_handlers, %l5, %l4)
2882 sllx %l7, IH_SHIFT, %l5
2883 ldx [%l4 + %l5], %l5
2884 KASSERT(%l5, "tl1_intr: ih null")
2886 add %sp, CCFSZ + SPOFF, %o0
2888 /* %l7 contains PIL */
2889 SET(intrcnt, %l5, %l4)
2890 prefetcha [%l4] ASI_N, 1
2891 SET(pil_countp, %l5, %l6)
2893 lduh [%l5 + %l6], %l5
2900 lduw [PCPU(CNT) + V_INTR], %l4
2902 stw %l4, [PCPU(CNT) + V_INTR]
2904 ldx [%sp + SPOFF + CCFSZ + TF_Y], %l4
2906 ldx [%sp + SPOFF + CCFSZ + TF_G1], %g1
2907 ldx [%sp + SPOFF + CCFSZ + TF_G2], %g2
2908 ldx [%sp + SPOFF + CCFSZ + TF_G3], %g3
2909 ldx [%sp + SPOFF + CCFSZ + TF_G4], %g4
2910 ldx [%sp + SPOFF + CCFSZ + TF_G5], %g5
2912 wrpr %g0, PSTATE_ALT, %pstate
2914 andn %l0, TSTATE_CWP_MASK, %g1
2925 wrpr %g1, %g4, %tstate
2929 #if KTR_COMPILE & KTR_INTR
2930 CATR(KTR_INTR, "tl1_intr: td=%#x pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
2931 , %g2, %g3, %g4, 7, 8, 9)
2932 ldx [PCPU(CURTHREAD)], %g3
2933 stx %g3, [%g2 + KTR_PARM1]
2935 stx %g3, [%g2 + KTR_PARM2]
2937 stx %g3, [%g2 + KTR_PARM3]
2939 stx %g3, [%g2 + KTR_PARM4]
2940 stx %sp, [%g2 + KTR_PARM5]
2952 * Freshly forked processes come here when switched to for the first time.
2953 * The arguments to fork_exit() have been setup in the locals, we must move
2956 ENTRY(fork_trampoline)
2957 #if KTR_COMPILE & KTR_PROC
2958 CATR(KTR_PROC, "fork_trampoline: td=%p (%s) cwp=%#lx"
2959 , %g1, %g2, %g3, 7, 8, 9)
2960 ldx [PCPU(CURTHREAD)], %g2
2961 stx %g2, [%g1 + KTR_PARM1]
2962 ldx [%g2 + TD_PROC], %g2
2963 add %g2, P_COMM, %g2
2964 stx %g2, [%g1 + KTR_PARM2]
2966 stx %g2, [%g1 + KTR_PARM3]
2975 END(fork_trampoline)