2 * Copyright (c) 1997 Berkeley Software Design, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * BSDI $Id: locore.s,v 1.36.2.15 1999/08/23 22:34:41 cp Exp $
31 * Copyright (c) 2001 Jake Burkholder.
32 * All rights reserved.
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56 #include <machine/asm.h>
57 __FBSDID("$FreeBSD$");
59 #include "opt_compat.h"
62 #include <machine/asi.h>
63 #include <machine/asmacros.h>
64 #include <machine/frame.h>
65 #include <machine/fsr.h>
66 #include <machine/intr_machdep.h>
67 #include <machine/ktr.h>
68 #include <machine/pcb.h>
69 #include <machine/pstate.h>
70 #include <machine/trap.h>
71 #include <machine/tsb.h>
72 #include <machine/tstate.h>
73 #include <machine/utrap.h>
74 #include <machine/wstate.h>
79 #define TSB_KERNEL 0x0
80 #define TSB_KERNEL_MASK 0x0
81 #define TSB_KERNEL_PHYS 0x0
82 #define TSB_KERNEL_PHYS_END 0x0
83 #define TSB_QUAD_LDD 0x0
91 * Atomically set a bit in a TTE.
93 #define TTE_SET_BIT(r1, r2, r3, bit, a, asi) \
94 add r1, TTE_DATA, r1 ; \
95 LD(x, a) [r1] asi, r2 ; \
97 CAS(x, a) [r1] asi, r2, r3 ; \
102 #define TTE_SET_REF(r1, r2, r3, a, asi) TTE_SET_BIT(r1, r2, r3, TD_REF, a, asi)
103 #define TTE_SET_W(r1, r2, r3, a, asi) TTE_SET_BIT(r1, r2, r3, TD_W, a, asi)
106 * Macros for spilling and filling live windows.
108 * NOTE: These macros use exactly 16 instructions, and it is assumed that the
109 * handler will not use more than 24 instructions total, to leave room for
110 * resume vectors which occupy the last 8 instructions.
113 #define SPILL(storer, base, size, asi) \
114 storer %l0, [base + (0 * size)] asi ; \
115 storer %l1, [base + (1 * size)] asi ; \
116 storer %l2, [base + (2 * size)] asi ; \
117 storer %l3, [base + (3 * size)] asi ; \
118 storer %l4, [base + (4 * size)] asi ; \
119 storer %l5, [base + (5 * size)] asi ; \
120 storer %l6, [base + (6 * size)] asi ; \
121 storer %l7, [base + (7 * size)] asi ; \
122 storer %i0, [base + (8 * size)] asi ; \
123 storer %i1, [base + (9 * size)] asi ; \
124 storer %i2, [base + (10 * size)] asi ; \
125 storer %i3, [base + (11 * size)] asi ; \
126 storer %i4, [base + (12 * size)] asi ; \
127 storer %i5, [base + (13 * size)] asi ; \
128 storer %i6, [base + (14 * size)] asi ; \
129 storer %i7, [base + (15 * size)] asi
131 #define FILL(loader, base, size, asi) \
132 loader [base + (0 * size)] asi, %l0 ; \
133 loader [base + (1 * size)] asi, %l1 ; \
134 loader [base + (2 * size)] asi, %l2 ; \
135 loader [base + (3 * size)] asi, %l3 ; \
136 loader [base + (4 * size)] asi, %l4 ; \
137 loader [base + (5 * size)] asi, %l5 ; \
138 loader [base + (6 * size)] asi, %l6 ; \
139 loader [base + (7 * size)] asi, %l7 ; \
140 loader [base + (8 * size)] asi, %i0 ; \
141 loader [base + (9 * size)] asi, %i1 ; \
142 loader [base + (10 * size)] asi, %i2 ; \
143 loader [base + (11 * size)] asi, %i3 ; \
144 loader [base + (12 * size)] asi, %i4 ; \
145 loader [base + (13 * size)] asi, %i5 ; \
146 loader [base + (14 * size)] asi, %i6 ; \
147 loader [base + (15 * size)] asi, %i7
149 #define ERRATUM50(reg) mov reg, reg
151 #define KSTACK_SLOP 1024
154 * Sanity check the kernel stack and bail out if it's wrong.
155 * XXX: doesn't handle being on the panic stack.
157 #define KSTACK_CHECK \
159 stx %g1, [ASP_REG + 0] ; \
160 stx %g2, [ASP_REG + 8] ; \
161 add %sp, SPOFF, %g1 ; \
162 andcc %g1, (1 << PTR_SHIFT) - 1, %g0 ; \
163 bnz,a %xcc, tl1_kstack_fault ; \
165 ldx [PCPU(CURTHREAD)], %g2 ; \
166 ldx [%g2 + TD_KSTACK], %g2 ; \
167 add %g2, KSTACK_SLOP, %g2 ; \
168 subcc %g1, %g2, %g1 ; \
169 ble,a %xcc, tl1_kstack_fault ; \
171 set KSTACK_PAGES * PAGE_SIZE, %g2 ; \
173 bgt,a %xcc, tl1_kstack_fault ; \
175 ldx [ASP_REG + 8], %g2 ; \
176 ldx [ASP_REG + 0], %g1 ; \
183 ENTRY(tl1_kstack_fault)
189 #if KTR_COMPILE & KTR_TRAP
190 CATR(KTR_TRAP, "tl1_kstack_fault: tl=%#lx tpc=%#lx tnpc=%#lx"
191 , %g2, %g3, %g4, 7, 8, 9)
193 stx %g3, [%g2 + KTR_PARM1]
195 stx %g3, [%g2 + KTR_PARM1]
197 stx %g3, [%g2 + KTR_PARM1]
207 #if KTR_COMPILE & KTR_TRAP
209 "tl1_kstack_fault: sp=%#lx ks=%#lx cr=%#lx cs=%#lx ow=%#lx ws=%#lx"
210 , %g1, %g2, %g3, 7, 8, 9)
212 stx %g2, [%g1 + KTR_PARM1]
213 ldx [PCPU(CURTHREAD)], %g2
214 ldx [%g2 + TD_KSTACK], %g2
215 stx %g2, [%g1 + KTR_PARM2]
216 rdpr %canrestore, %g2
217 stx %g2, [%g1 + KTR_PARM3]
219 stx %g2, [%g1 + KTR_PARM4]
221 stx %g2, [%g1 + KTR_PARM5]
223 stx %g2, [%g1 + KTR_PARM6]
227 wrpr %g0, 0, %canrestore
228 wrpr %g0, 6, %cansave
229 wrpr %g0, 0, %otherwin
230 wrpr %g0, WSTATE_KERNEL, %wstate
232 sub ASP_REG, SPOFF + CCFSZ, %sp
237 mov T_KSTACK_FAULT | T_KERNEL, %o0
238 END(tl1_kstack_fault)
241 * Magic to resume from a spill or fill trap. If we get an alignment or an
242 * MMU fault during a spill or a fill, this macro will detect the fault and
243 * resume at a set instruction offset in the trap handler.
245 * To check if the previous trap was a spill/fill we convert the trapped pc
246 * to a trap type and verify that it is in the range of spill/fill vectors.
247 * The spill/fill vectors are types 0x80-0xff and 0x280-0x2ff, masking off the
248 * tl bit allows us to detect both ranges with one test.
251 * 0x80 <= (((%tpc - %tba) >> 5) & ~0x200) < 0x100
253 * To calculate the new pc we take advantage of the xor feature of wrpr.
254 * Forcing all the low bits of the trapped pc on we can produce any offset
255 * into the spill/fill vector. The size of a spill/fill trap vector is 0x80.
257 * 0x7f ^ 0x1f == 0x60
258 * 0x1f == (0x80 - 0x60) - 1
260 * Which are the offset and xor value used to resume from alignment faults.
264 * Determine if we have trapped inside of a spill/fill vector, and if so resume
265 * at a fixed instruction offset in the trap vector. Must be called on
268 #define RESUME_SPILLFILL_MAGIC(stxa_g0_sfsr, xor) \
270 stx %g1, [ASP_REG + 0] ; \
271 stx %g2, [ASP_REG + 8] ; \
275 sub %g1, %g2, %g2 ; \
277 andn %g2, 0x200, %g2 ; \
282 or %g1, 0x7f, %g1 ; \
283 wrpr %g1, xor, %tnpc ; \
285 ldx [ASP_REG + 8], %g2 ; \
286 ldx [ASP_REG + 0], %g1 ; \
289 9: ldx [ASP_REG + 8], %g2 ; \
290 ldx [ASP_REG + 0], %g1 ; \
294 * For certain faults we need to clear the SFSR MMU register before returning.
296 #define RSF_CLR_SFSR \
297 wr %g0, ASI_DMMU, %asi ; \
298 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
300 #define RSF_XOR(off) ((0x80 - off) - 1)
303 * Instruction offsets in spill and fill trap handlers for handling certain
304 * nested traps, and corresponding xor constants for wrpr.
306 #define RSF_OFF_ALIGN 0x60
307 #define RSF_OFF_MMU 0x70
309 #define RESUME_SPILLFILL_ALIGN \
310 RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_ALIGN))
311 #define RESUME_SPILLFILL_MMU \
312 RESUME_SPILLFILL_MAGIC(EMPTY, RSF_XOR(RSF_OFF_MMU))
313 #define RESUME_SPILLFILL_MMU_CLR_SFSR \
314 RESUME_SPILLFILL_MAGIC(RSF_CLR_SFSR, RSF_XOR(RSF_OFF_MMU))
317 * Constant to add to %tnpc when taking a fill trap just before returning to
320 #define RSF_FILL_INC tl0_ret_fill_end - tl0_ret_fill
323 * Generate a T_SPILL or T_FILL trap if the window operation fails.
325 #define RSF_TRAP(type) \
326 ba %xcc, tl0_sftrap ; \
331 * Game over if the window operation fails.
333 #define RSF_FATAL(type) \
334 ba %xcc, rsf_fatal ; \
339 * Magic to resume from a failed fill a few instructions after the corrsponding
340 * restore. This is used on return from the kernel to usermode.
342 #define RSF_FILL_MAGIC \
344 add %g1, RSF_FILL_INC, %g1 ; \
345 wrpr %g1, 0, %tnpc ; \
350 * Spill to the pcb if a spill to the user stack in kernel mode fails.
352 #define RSF_SPILL_TOPCB \
353 ba,a %xcc, tl1_spill_topcb ; \
358 #if KTR_COMPILE & KTR_TRAP
359 CATR(KTR_TRAP, "rsf_fatal: bad window trap tt=%#lx type=%#lx"
360 , %g1, %g3, %g4, 7, 8, 9)
362 stx %g3, [%g1 + KTR_PARM1]
363 stx %g2, [%g1 + KTR_PARM2]
374 .globl intrnames, sintrnames
376 .space (IV_MAX + PIL_MAX) * (MAXCOMLEN + 1)
378 .quad (IV_MAX + PIL_MAX) * (MAXCOMLEN + 1)
380 .globl intrcnt, sintrcnt
382 .space (IV_MAX + PIL_MAX) * 8
384 .quad (IV_MAX + PIL_MAX) * 8
389 * Trap table and associated macros
391 * Due to its size a trap table is an inherently hard thing to represent in
392 * code in a clean way. There are approximately 1024 vectors, of 8 or 32
393 * instructions each, many of which are identical. The way that this is
394 * laid out is the instructions (8 or 32) for the actual trap vector appear
395 * as an AS macro. In general this code branches to tl0_trap or tl1_trap,
396 * but if not supporting code can be placed just after the definition of the
397 * macro. The macros are then instantiated in a different section (.trap),
398 * which is setup to be placed by the linker at the beginning of .text, and the
399 * code around the macros is moved to the end of trap table. In this way the
400 * code that must be sequential in memory can be split up, and located near
401 * its supporting code so that it is easier to follow.
405 * Clean window traps occur when %cleanwin is zero to ensure that data
406 * is not leaked between address spaces in registers.
426 wrpr %l7, 0, %cleanwin
433 * Stack fixups for entry from user mode. We are still running on the
434 * user stack, and with its live registers, so we must save soon. We
435 * are on alternate globals so we do have some registers. Set the
436 * transitional window state, and do the save. If this traps we
437 * attempt to spill a window to the user stack. If this fails, we
438 * spill the window to the pcb and continue. Spilling to the pcb
441 * NOTE: Must be called with alternate globals and clobbers %g1.
446 wrpr %g1, WSTATE_TRANSITION, %wstate
450 .macro tl0_setup type
459 * Generic trap type. Call trap() with the specified type.
467 * This is used to suck up the massive swaths of reserved trap types.
468 * Generates count "reserved" trap vectors.
470 .macro tl0_reserved count
478 wrpr %g1, WSTATE_NESTED, %wstate
479 save %sp, -(CCFSZ + TF_SIZEOF), %sp
482 .macro tl1_setup type
487 mov \type | T_KERNEL, %o0
495 .macro tl1_reserved count
501 .macro tl0_insn_excptn
502 wrpr %g0, PSTATE_ALT, %pstate
503 wr %g0, ASI_IMMU, %asi
505 ldxa [%g0 + AA_IMMU_SFSR] %asi, %g4
507 * XXX in theory, a store to AA_IMMU_SFSR must be immediately
508 * followed by a DONE, FLUSH or RETRY for USIII. In practice,
509 * this triggers a RED state exception though.
511 stxa %g0, [%g0 + AA_IMMU_SFSR] %asi
513 ba %xcc, tl0_sfsr_trap
514 mov T_INSTRUCTION_EXCEPTION, %g2
518 .macro tl0_data_excptn
519 wrpr %g0, PSTATE_ALT, %pstate
520 wr %g0, ASI_DMMU, %asi
521 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
522 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
523 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
525 ba %xcc, tl0_sfsr_trap
526 mov T_DATA_EXCEPTION, %g2
531 wr %g0, ASI_DMMU, %asi
532 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
533 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
534 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
536 ba %xcc, tl0_sfsr_trap
537 mov T_MEM_ADDRESS_NOT_ALIGNED, %g2
551 .macro tl0_intr level, mask
559 #define INTR(level, traplvl) \
560 tl ## traplvl ## _intr level, 1 << level
562 #define TICK(traplvl) \
563 tl ## traplvl ## _intr PIL_TICK, 0x10001
565 #define INTR_LEVEL(tl) \
582 .macro tl0_intr_level
587 ldxa [%g0] ASI_INTR_RECEIVE, %g1
588 andcc %g1, IRSR_BUSY, %g0
589 bnz,a,pt %xcc, intr_vector
591 ba,a,pt %xcc, intr_vector_stray
598 * Load the context and the virtual page number from the tag access
599 * register. We ignore the context.
601 wr %g0, ASI_IMMU, %asi
602 ldxa [%g0 + AA_IMMU_TAR] %asi, %g1
605 * Initialize the page size walker.
610 * Loop over all supported page sizes.
614 * Compute the page shift for the page size we are currently looking
619 add %g3, PAGE_SHIFT, %g3
622 * Extract the virtual page number from the contents of the tag
628 * Compute the TTE bucket address.
630 ldxa [%g0 + AA_IMMU_TSB] %asi, %g5
631 and %g3, TSB_BUCKET_MASK, %g4
632 sllx %g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
636 * Compute the TTE tag target.
638 sllx %g3, TV_SIZE_BITS, %g3
642 * Loop over the TTEs in this bucket.
646 * Load the TTE. Note that this instruction may fault, clobbering
647 * the contents of the tag access register, %g5, %g6, and %g7. We
648 * do not use %g5, and %g6 and %g7 are not used until this instruction
649 * completes successfully.
651 2: ldda [%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
654 * Check that it's valid and executable and that the TTE tags match.
657 andcc %g7, TD_EXEC, %g0
664 * We matched a TTE, load the TLB.
668 * Set the reference bit, if it's currently clear.
670 andcc %g7, TD_REF, %g0
671 bz,a,pn %xcc, tl0_immu_miss_set_ref
675 * Load the TTE tag and data into the TLB and retry the instruction.
677 stxa %g1, [%g0 + AA_IMMU_TAR] %asi
678 stxa %g7, [%g0] ASI_ITLB_DATA_IN_REG
682 * Advance to the next TTE in this bucket, and check the low bits
683 * of the bucket pointer to see if we've finished the bucket.
685 3: add %g4, 1 << TTE_SHIFT, %g4
686 andcc %g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
691 * See if we just checked the largest page size, and advance to the
699 * Not in user TSB, call C code.
701 ba,a %xcc, tl0_immu_miss_trap
705 ENTRY(tl0_immu_miss_set_ref)
707 * Set the reference bit.
709 TTE_SET_REF(%g4, %g2, %g3, a, ASI_N)
712 * May have become invalid during casxa, in which case start over.
718 * Load the TTE tag and data into the TLB and retry the instruction.
720 stxa %g1, [%g0 + AA_IMMU_TAR] %asi
721 stxa %g2, [%g0] ASI_ITLB_DATA_IN_REG
723 END(tl0_immu_miss_set_ref)
725 ENTRY(tl0_immu_miss_trap)
727 * Put back the contents of the tag access register, in case we
730 sethi %hi(KERNBASE), %g2
731 stxa %g1, [%g0 + AA_IMMU_TAR] %asi
735 * Switch to alternate globals.
737 wrpr %g0, PSTATE_ALT, %pstate
740 * Reload the tag access register.
742 ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
745 * Save the tag access register, and call common trap code.
752 mov T_INSTRUCTION_MISS, %o0
753 END(tl0_immu_miss_trap)
757 * Load the context and the virtual page number from the tag access
758 * register. We ignore the context.
760 wr %g0, ASI_DMMU, %asi
761 ldxa [%g0 + AA_DMMU_TAR] %asi, %g1
764 * Initialize the page size walker.
770 * Loop over all supported page sizes.
774 * Compute the page shift for the page size we are currently looking
779 add %g3, PAGE_SHIFT, %g3
782 * Extract the virtual page number from the contents of the tag
788 * Compute the TTE bucket address.
790 ldxa [%g0 + AA_DMMU_TSB] %asi, %g5
791 and %g3, TSB_BUCKET_MASK, %g4
792 sllx %g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
796 * Compute the TTE tag target.
798 sllx %g3, TV_SIZE_BITS, %g3
802 * Loop over the TTEs in this bucket.
806 * Load the TTE. Note that this instruction may fault, clobbering
807 * the contents of the tag access register, %g5, %g6, and %g7. We
808 * do not use %g5, and %g6 and %g7 are not used until this instruction
809 * completes successfully.
811 2: ldda [%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
814 * Check that it's valid and that the virtual page numbers match.
822 * We matched a TTE, load the TLB.
826 * Set the reference bit, if it's currently clear.
828 andcc %g7, TD_REF, %g0
829 bz,a,pn %xcc, tl0_dmmu_miss_set_ref
833 * Load the TTE tag and data into the TLB and retry the instruction.
835 stxa %g1, [%g0 + AA_DMMU_TAR] %asi
836 stxa %g7, [%g0] ASI_DTLB_DATA_IN_REG
840 * Advance to the next TTE in this bucket, and check the low bits
841 * of the bucket pointer to see if we've finished the bucket.
843 3: add %g4, 1 << TTE_SHIFT, %g4
844 andcc %g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
849 * See if we just checked the largest page size, and advance to the
857 * Not in user TSB, call C code.
859 ba,a %xcc, tl0_dmmu_miss_trap
863 ENTRY(tl0_dmmu_miss_set_ref)
865 * Set the reference bit.
867 TTE_SET_REF(%g4, %g2, %g3, a, ASI_N)
870 * May have become invalid during casxa, in which case start over.
876 * Load the TTE tag and data into the TLB and retry the instruction.
878 stxa %g1, [%g0 + AA_DMMU_TAR] %asi
879 stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG
881 END(tl0_dmmu_miss_set_ref)
883 ENTRY(tl0_dmmu_miss_trap)
885 * Put back the contents of the tag access register, in case we
888 stxa %g1, [%g0 + AA_DMMU_TAR] %asi
892 * Switch to alternate globals.
894 wrpr %g0, PSTATE_ALT, %pstate
897 * Check if we actually came from the kernel.
905 * Reload the tag access register.
907 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
910 * Save the tag access register and call common trap code.
920 * Handle faults during window spill/fill.
922 1: RESUME_SPILLFILL_MMU
925 * Reload the tag access register.
927 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
934 mov T_DATA_MISS | T_KERNEL, %o0
935 END(tl0_dmmu_miss_trap)
938 ba,a %xcc, tl0_dmmu_prot_1
943 ENTRY(tl0_dmmu_prot_1)
945 * Load the context and the virtual page number from the tag access
946 * register. We ignore the context.
948 wr %g0, ASI_DMMU, %asi
949 ldxa [%g0 + AA_DMMU_TAR] %asi, %g1
952 * Initialize the page size walker.
958 * Loop over all supported page sizes.
962 * Compute the page shift for the page size we are currently looking
967 add %g3, PAGE_SHIFT, %g3
970 * Extract the virtual page number from the contents of the tag
976 * Compute the TTE bucket address.
978 ldxa [%g0 + AA_DMMU_TSB] %asi, %g5
979 and %g3, TSB_BUCKET_MASK, %g4
980 sllx %g4, TSB_BUCKET_SHIFT + TTE_SHIFT, %g4
984 * Compute the TTE tag target.
986 sllx %g3, TV_SIZE_BITS, %g3
990 * Loop over the TTEs in this bucket.
994 * Load the TTE. Note that this instruction may fault, clobbering
995 * the contents of the tag access register, %g5, %g6, and %g7. We
996 * do not use %g5, and %g6 and %g7 are not used until this instruction
997 * completes successfully.
999 2: ldda [%g4] ASI_NUCLEUS_QUAD_LDD, %g6 /*, %g7 */
1002 * Check that it's valid and writable and that the virtual page
1006 andcc %g7, TD_SW, %g0
1013 * Set the hardware write bit.
1015 TTE_SET_W(%g4, %g2, %g3, a, ASI_N)
1018 * Delete the old TLB entry and clear the SFSR.
1020 srlx %g1, PAGE_SHIFT, %g3
1021 sllx %g3, PAGE_SHIFT, %g3
1022 stxa %g0, [%g3] ASI_DMMU_DEMAP
1023 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1027 * May have become invalid during casxa, in which case start over.
1033 * Load the TTE data into the TLB and retry the instruction.
1035 stxa %g1, [%g0 + AA_DMMU_TAR] %asi
1036 stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG
1040 * Check the low bits to see if we've finished the bucket.
1042 4: add %g4, 1 << TTE_SHIFT, %g4
1043 andcc %g4, (1 << (TSB_BUCKET_SHIFT + TTE_SHIFT)) - 1, %g0
1048 * See if we just checked the largest page size, and advance to the
1056 * Not in user TSB, call C code.
1058 ba,a %xcc, tl0_dmmu_prot_trap
1060 END(tl0_dmmu_prot_1)
1062 ENTRY(tl0_dmmu_prot_trap)
1064 * Put back the contents of the tag access register, in case we
1067 stxa %g1, [%g0 + AA_DMMU_TAR] %asi
1071 * Switch to alternate globals.
1073 wrpr %g0, PSTATE_ALT, %pstate
1076 * Check if we actually came from the kernel.
1084 * Load the SFAR, SFSR and TAR.
1086 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
1087 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
1088 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
1089 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1093 * Save the MMU registers and call common trap code.
1102 mov T_DATA_PROTECTION, %o0
1105 * Handle faults during window spill/fill.
1107 1: RESUME_SPILLFILL_MMU_CLR_SFSR
1110 * Load the SFAR, SFSR and TAR. Clear the SFSR.
1112 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
1113 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
1114 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
1115 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1125 mov T_DATA_PROTECTION | T_KERNEL, %o0
1126 END(tl0_dmmu_prot_trap)
1128 .macro tl0_spill_0_n
1129 wr %g0, ASI_AIUP, %asi
1130 SPILL(stxa, %sp + SPOFF, 8, %asi)
1138 .macro tl0_spill_1_n
1139 wr %g0, ASI_AIUP, %asi
1140 SPILL(stwa, %sp, 4, %asi)
1149 wr %g0, ASI_AIUP, %asi
1150 FILL(ldxa, %sp + SPOFF, 8, %asi)
1159 wr %g0, ASI_AIUP, %asi
1160 FILL(lduwa, %sp, 4, %asi)
1170 and %g1, TSTATE_CWP_MASK, %g1
1179 .macro tl0_spill_bad count
1186 .macro tl0_fill_bad count
1202 .macro tl0_fp_restore
1203 ba,a %xcc, tl0_fp_restore
1208 ENTRY(tl0_fp_restore)
1209 ldx [PCB_REG + PCB_FLAGS], %g1
1210 andn %g1, PCB_FEF, %g1
1211 stx %g1, [PCB_REG + PCB_FLAGS]
1213 wr %g0, FPRS_FEF, %fprs
1214 wr %g0, ASI_BLK_S, %asi
1215 ldda [PCB_REG + PCB_UFP + (0 * 64)] %asi, %f0
1216 ldda [PCB_REG + PCB_UFP + (1 * 64)] %asi, %f16
1217 ldda [PCB_REG + PCB_UFP + (2 * 64)] %asi, %f32
1218 ldda [PCB_REG + PCB_UFP + (3 * 64)] %asi, %f48
1223 .macro tl1_insn_excptn
1224 wrpr %g0, PSTATE_ALT, %pstate
1225 wr %g0, ASI_IMMU, %asi
1227 ldxa [%g0 + AA_IMMU_SFSR] %asi, %g4
1229 * XXX in theory, a store to AA_IMMU_SFSR must be immediately
1230 * followed by a DONE, FLUSH or RETRY for USIII. In practice,
1231 * this triggers a RED state exception though.
1233 stxa %g0, [%g0 + AA_IMMU_SFSR] %asi
1235 ba %xcc, tl1_insn_exceptn_trap
1236 mov T_INSTRUCTION_EXCEPTION | T_KERNEL, %g2
1240 ENTRY(tl1_insn_exceptn_trap)
1248 END(tl1_insn_exceptn_trap)
1250 .macro tl1_fp_disabled
1251 ba,a %xcc, tl1_fp_disabled_1
1256 ENTRY(tl1_fp_disabled_1)
1258 set fpu_fault_begin, %g2
1260 cmp %g1, fpu_fault_size
1264 wr %g0, FPRS_FEF, %fprs
1265 wr %g0, ASI_BLK_S, %asi
1266 ldda [PCB_REG + PCB_KFP + (0 * 64)] %asi, %f0
1267 ldda [PCB_REG + PCB_KFP + (1 * 64)] %asi, %f16
1268 ldda [PCB_REG + PCB_KFP + (2 * 64)] %asi, %f32
1269 ldda [PCB_REG + PCB_KFP + (3 * 64)] %asi, %f48
1277 mov T_FP_DISABLED | T_KERNEL, %o0
1278 END(tl1_fp_disabled_1)
1280 .macro tl1_data_excptn
1281 wrpr %g0, PSTATE_ALT, %pstate
1282 ba,a %xcc, tl1_data_excptn_trap
1287 ENTRY(tl1_data_excptn_trap)
1288 RESUME_SPILLFILL_MMU_CLR_SFSR
1289 ba %xcc, tl1_sfsr_trap
1290 mov T_DATA_EXCEPTION | T_KERNEL, %g2
1291 END(tl1_data_excptn_trap)
1294 wrpr %g0, PSTATE_ALT, %pstate
1295 ba,a %xcc, tl1_align_trap
1300 ENTRY(tl1_align_trap)
1301 RESUME_SPILLFILL_ALIGN
1302 ba %xcc, tl1_sfsr_trap
1303 mov T_MEM_ADDRESS_NOT_ALIGNED | T_KERNEL, %g2
1306 ENTRY(tl1_sfsr_trap)
1307 wr %g0, ASI_DMMU, %asi
1308 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
1309 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
1310 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1322 .macro tl1_intr level, mask
1330 .macro tl1_intr_level
1334 .macro tl1_immu_miss
1336 * Load the context and the virtual page number from the tag access
1337 * register. We ignore the context.
1339 wr %g0, ASI_IMMU, %asi
1340 ldxa [%g0 + AA_IMMU_TAR] %asi, %g5
1343 * Compute the address of the TTE. The TSB mask and address of the
1344 * TSB are patched at startup.
1346 .globl tl1_immu_miss_patch_tsb_1
1347 tl1_immu_miss_patch_tsb_1:
1348 sethi %uhi(TSB_KERNEL), %g6
1349 or %g6, %ulo(TSB_KERNEL), %g6
1351 sethi %hi(TSB_KERNEL), %g7
1353 .globl tl1_immu_miss_patch_tsb_mask_1
1354 tl1_immu_miss_patch_tsb_mask_1:
1355 sethi %hi(TSB_KERNEL_MASK), %g6
1356 or %g6, %lo(TSB_KERNEL_MASK), %g6
1358 srlx %g5, TAR_VPN_SHIFT, %g5
1360 sllx %g6, TTE_SHIFT, %g6
1366 .globl tl1_immu_miss_patch_quad_ldd_1
1367 tl1_immu_miss_patch_quad_ldd_1:
1368 ldda [%g6] TSB_QUAD_LDD, %g6 /*, %g7 */
1371 * Check that it's valid and executable and that the virtual page
1374 brgez,pn %g7, tl1_immu_miss_trap
1375 andcc %g7, TD_EXEC, %g0
1376 bz,pn %xcc, tl1_immu_miss_trap
1377 srlx %g6, TV_SIZE_BITS, %g6
1379 bne,pn %xcc, tl1_immu_miss_trap
1383 * Set the reference bit if it's currently clear.
1385 andcc %g7, TD_REF, %g0
1386 bz,a,pn %xcc, tl1_immu_miss_set_ref
1390 * Load the TTE data into the TLB and retry the instruction.
1392 stxa %g7, [%g0] ASI_ITLB_DATA_IN_REG
1397 ENTRY(tl1_immu_miss_set_ref)
1399 * Recompute the TTE address, which we clobbered loading the TTE.
1400 * The TSB mask and address of the TSB are patched at startup.
1402 .globl tl1_immu_miss_patch_tsb_2
1403 tl1_immu_miss_patch_tsb_2:
1404 sethi %uhi(TSB_KERNEL), %g6
1405 or %g6, %ulo(TSB_KERNEL), %g6
1407 sethi %hi(TSB_KERNEL), %g7
1409 .globl tl1_immu_miss_patch_tsb_mask_2
1410 tl1_immu_miss_patch_tsb_mask_2:
1411 sethi %hi(TSB_KERNEL_MASK), %g6
1412 or %g6, %lo(TSB_KERNEL_MASK), %g6
1415 sllx %g5, TTE_SHIFT, %g5
1419 * Set the reference bit.
1421 .globl tl1_immu_miss_patch_asi_1
1422 tl1_immu_miss_patch_asi_1:
1423 wr %g0, TSB_ASI, %asi
1424 TTE_SET_REF(%g5, %g6, %g7, a, %asi)
1427 * May have become invalid during casxa, in which case start over.
1433 * Load the TTE data into the TLB and retry the instruction.
1435 stxa %g6, [%g0] ASI_ITLB_DATA_IN_REG
1437 END(tl1_immu_miss_set_ref)
1439 ENTRY(tl1_immu_miss_trap)
1441 * Switch to alternate globals.
1443 wrpr %g0, PSTATE_ALT, %pstate
1445 ldxa [%g0 + AA_IMMU_TAR] %asi, %g2
1452 mov T_INSTRUCTION_MISS | T_KERNEL, %o0
1453 END(tl1_immu_miss_trap)
1455 .macro tl1_dmmu_miss
1457 * Load the context and the virtual page number from the tag access
1460 wr %g0, ASI_DMMU, %asi
1461 ldxa [%g0 + AA_DMMU_TAR] %asi, %g5
1464 * Extract the context from the contents of the tag access register.
1465 * If it's non-zero this is a fault on a user address. Note that the
1466 * faulting address is passed in %g1.
1468 sllx %g5, 64 - TAR_VPN_SHIFT, %g6
1469 brnz,a,pn %g6, tl1_dmmu_miss_user
1473 * Check for the direct mapped physical region. These addresses have
1474 * the high bit set so they are negative.
1476 brlz,pn %g5, tl1_dmmu_miss_direct
1480 * Compute the address of the TTE. The TSB mask and address of the
1481 * TSB are patched at startup.
1483 .globl tl1_dmmu_miss_patch_tsb_1
1484 tl1_dmmu_miss_patch_tsb_1:
1485 sethi %uhi(TSB_KERNEL), %g6
1486 or %g6, %ulo(TSB_KERNEL), %g6
1488 sethi %hi(TSB_KERNEL), %g7
1490 .globl tl1_dmmu_miss_patch_tsb_mask_1
1491 tl1_dmmu_miss_patch_tsb_mask_1:
1492 sethi %hi(TSB_KERNEL_MASK), %g6
1493 or %g6, %lo(TSB_KERNEL_MASK), %g6
1495 srlx %g5, TAR_VPN_SHIFT, %g5
1497 sllx %g6, TTE_SHIFT, %g6
1503 .globl tl1_dmmu_miss_patch_quad_ldd_1
1504 tl1_dmmu_miss_patch_quad_ldd_1:
1505 ldda [%g6] TSB_QUAD_LDD, %g6 /*, %g7 */
1508 * Check that it's valid and that the virtual page numbers match.
1510 brgez,pn %g7, tl1_dmmu_miss_trap
1511 srlx %g6, TV_SIZE_BITS, %g6
1513 bne,pn %xcc, tl1_dmmu_miss_trap
1517 * Set the reference bit if it's currently clear.
1519 andcc %g7, TD_REF, %g0
1520 bz,a,pt %xcc, tl1_dmmu_miss_set_ref
1524 * Load the TTE data into the TLB and retry the instruction.
1526 stxa %g7, [%g0] ASI_DTLB_DATA_IN_REG
1531 ENTRY(tl1_dmmu_miss_set_ref)
1533 * Recompute the TTE address, which we clobbered loading the TTE.
1534 * The TSB mask and address of the TSB are patched at startup.
1536 .globl tl1_dmmu_miss_patch_tsb_mask_2
1537 tl1_dmmu_miss_patch_tsb_2:
1538 sethi %uhi(TSB_KERNEL), %g6
1539 or %g6, %ulo(TSB_KERNEL), %g6
1541 sethi %hi(TSB_KERNEL), %g7
1543 .globl tl1_dmmu_miss_patch_tsb_2
1544 tl1_dmmu_miss_patch_tsb_mask_2:
1545 sethi %hi(TSB_KERNEL_MASK), %g6
1546 or %g6, %lo(TSB_KERNEL_MASK), %g6
1549 sllx %g5, TTE_SHIFT, %g5
1553 * Set the reference bit.
1555 .globl tl1_dmmu_miss_patch_asi_1
1556 tl1_dmmu_miss_patch_asi_1:
1557 wr %g0, TSB_ASI, %asi
1558 TTE_SET_REF(%g5, %g6, %g7, a, %asi)
1561 * May have become invalid during casxa, in which case start over.
1567 * Load the TTE data into the TLB and retry the instruction.
1569 stxa %g6, [%g0] ASI_DTLB_DATA_IN_REG
1571 END(tl1_dmmu_miss_set_ref)
1573 ENTRY(tl1_dmmu_miss_trap)
1575 * Switch to alternate globals.
1577 wrpr %g0, PSTATE_ALT, %pstate
1579 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
1588 mov T_DATA_MISS | T_KERNEL, %o0
1589 END(tl1_dmmu_miss_trap)
1591 ENTRY(tl1_dmmu_miss_direct)
1593 * Mask off the high bits of the virtual address to get the physical
1594 * address, and or in the TTE bits. The virtual address bits that
1595 * correspond to the TTE valid and page size bits are left set, so
1596 * they don't have to be included in the TTE bits below. We know they
1597 * are set because the virtual address is in the upper va hole.
1598 * NB: if we are taking advantage of the ASI_ATOMIC_QUAD_LDD_PHYS
1599 * and we get a miss on the directly accessed kernel TSB we must not
1600 * set TD_CV in order to access it uniformly bypassing the D$.
1602 setx TLB_DIRECT_ADDRESS_MASK, %g7, %g4
1604 setx TLB_DIRECT_TO_TTE_MASK, %g7, %g6
1606 .globl tl1_dmmu_miss_direct_patch_tsb_phys_1
1607 tl1_dmmu_miss_direct_patch_tsb_phys_1:
1608 sethi %uhi(TSB_KERNEL_PHYS), %g3
1609 or %g3, %ulo(TSB_KERNEL_PHYS), %g3
1611 sethi %hi(TSB_KERNEL_PHYS), %g3
1615 or %g5, TD_CP | TD_W, %g5
1616 .globl tl1_dmmu_miss_direct_patch_tsb_phys_end_1
1617 tl1_dmmu_miss_direct_patch_tsb_phys_end_1:
1618 sethi %uhi(TSB_KERNEL_PHYS_END), %g3
1619 or %g3, %ulo(TSB_KERNEL_PHYS_END), %g3
1621 sethi %hi(TSB_KERNEL_PHYS_END), %g7
1628 1: or %g5, TD_CV, %g5
1631 * Load the TTE data into the TLB and retry the instruction.
1633 2: stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG
1635 END(tl1_dmmu_miss_direct)
1637 .macro tl1_dmmu_prot
1638 ba,a %xcc, tl1_dmmu_prot_1
1643 ENTRY(tl1_dmmu_prot_1)
1645 * Load the context and the virtual page number from the tag access
1648 wr %g0, ASI_DMMU, %asi
1649 ldxa [%g0 + AA_DMMU_TAR] %asi, %g5
1652 * Extract the context from the contents of the tag access register.
1653 * If it's non-zero this is a fault on a user address. Note that the
1654 * faulting address is passed in %g1.
1656 sllx %g5, 64 - TAR_VPN_SHIFT, %g6
1657 brnz,a,pn %g6, tl1_dmmu_prot_user
1661 * Compute the address of the TTE. The TSB mask and address of the
1662 * TSB are patched at startup.
1664 .globl tl1_dmmu_prot_patch_tsb_1
1665 tl1_dmmu_prot_patch_tsb_1:
1666 sethi %uhi(TSB_KERNEL), %g6
1667 or %g6, %ulo(TSB_KERNEL), %g6
1669 sethi %hi(TSB_KERNEL), %g7
1671 .globl tl1_dmmu_prot_patch_tsb_mask_1
1672 tl1_dmmu_prot_patch_tsb_mask_1:
1673 sethi %hi(TSB_KERNEL_MASK), %g6
1674 or %g6, %lo(TSB_KERNEL_MASK), %g6
1676 srlx %g5, TAR_VPN_SHIFT, %g5
1678 sllx %g6, TTE_SHIFT, %g6
1684 .globl tl1_dmmu_prot_patch_quad_ldd_1
1685 tl1_dmmu_prot_patch_quad_ldd_1:
1686 ldda [%g6] TSB_QUAD_LDD, %g6 /*, %g7 */
1689 * Check that it's valid and writeable and that the virtual page
1692 brgez,pn %g7, tl1_dmmu_prot_trap
1693 andcc %g7, TD_SW, %g0
1694 bz,pn %xcc, tl1_dmmu_prot_trap
1695 srlx %g6, TV_SIZE_BITS, %g6
1697 bne,pn %xcc, tl1_dmmu_prot_trap
1701 * Delete the old TLB entry and clear the SFSR.
1703 sllx %g5, TAR_VPN_SHIFT, %g6
1704 or %g6, TLB_DEMAP_NUCLEUS, %g6
1705 stxa %g0, [%g6] ASI_DMMU_DEMAP
1706 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1710 * Recompute the TTE address, which we clobbered loading the TTE.
1711 * The TSB mask and address of the TSB are patched at startup.
1713 .globl tl1_dmmu_prot_patch_tsb_2
1714 tl1_dmmu_prot_patch_tsb_2:
1715 sethi %uhi(TSB_KERNEL), %g6
1716 or %g6, %ulo(TSB_KERNEL), %g6
1718 sethi %hi(TSB_KERNEL), %g7
1720 .globl tl1_dmmu_prot_patch_tsb_mask_2
1721 tl1_dmmu_prot_patch_tsb_mask_2:
1722 sethi %hi(TSB_KERNEL_MASK), %g6
1723 or %g6, %lo(TSB_KERNEL_MASK), %g6
1725 sllx %g5, TTE_SHIFT, %g5
1729 * Set the hardware write bit.
1731 .globl tl1_dmmu_prot_patch_asi_1
1732 tl1_dmmu_prot_patch_asi_1:
1733 wr %g0, TSB_ASI, %asi
1734 TTE_SET_W(%g5, %g6, %g7, a, %asi)
1737 * May have become invalid during casxa, in which case start over.
1743 * Load the TTE data into the TLB and retry the instruction.
1745 stxa %g6, [%g0] ASI_DTLB_DATA_IN_REG
1747 END(tl1_dmmu_prot_1)
1749 ENTRY(tl1_dmmu_prot_trap)
1751 * Switch to alternate globals.
1753 wrpr %g0, PSTATE_ALT, %pstate
1756 * Load the SFAR, SFSR and TAR. Clear the SFSR.
1758 ldxa [%g0 + AA_DMMU_TAR] %asi, %g2
1759 ldxa [%g0 + AA_DMMU_SFAR] %asi, %g3
1760 ldxa [%g0 + AA_DMMU_SFSR] %asi, %g4
1761 stxa %g0, [%g0 + AA_DMMU_SFSR] %asi
1771 mov T_DATA_PROTECTION | T_KERNEL, %o0
1772 END(tl1_dmmu_prot_trap)
1774 .macro tl1_spill_0_n
1775 SPILL(stx, %sp + SPOFF, 8, EMPTY)
1783 .macro tl1_spill_2_n
1784 wr %g0, ASI_AIUP, %asi
1785 SPILL(stxa, %sp + SPOFF, 8, %asi)
1793 .macro tl1_spill_3_n
1794 wr %g0, ASI_AIUP, %asi
1795 SPILL(stwa, %sp, 4, %asi)
1803 .macro tl1_spill_7_n
1805 bnz,a,pn %xcc, tl1_spill_0_n
1808 SPILL(stw, %sp, 4, EMPTY)
1816 .macro tl1_spill_0_o
1817 wr %g0, ASI_AIUP, %asi
1818 SPILL(stxa, %sp + SPOFF, 8, %asi)
1826 .macro tl1_spill_1_o
1827 wr %g0, ASI_AIUP, %asi
1828 SPILL(stwa, %sp, 4, %asi)
1836 .macro tl1_spill_2_o
1842 FILL(ldx, %sp + SPOFF, 8, EMPTY)
1851 wr %g0, ASI_AIUP, %asi
1852 FILL(ldxa, %sp + SPOFF, 8, %asi)
1861 wr %g0, ASI_AIUP, %asi
1862 FILL(lduwa, %sp, 4, %asi)
1872 bnz,a,pt %xcc, tl1_fill_0_n
1875 FILL(lduw, %sp, 4, EMPTY)
1884 * This is used to spill windows that are still occupied with user
1885 * data on kernel entry to the pcb.
1887 ENTRY(tl1_spill_topcb)
1888 wrpr %g0, PSTATE_ALT, %pstate
1890 /* Free some globals for our use. */
1892 stx %g1, [ASP_REG + 0]
1893 stx %g2, [ASP_REG + 8]
1894 stx %g3, [ASP_REG + 16]
1896 ldx [PCB_REG + PCB_NSAVED], %g1
1898 sllx %g1, PTR_SHIFT, %g2
1899 add %g2, PCB_REG, %g2
1900 stx %sp, [%g2 + PCB_RWSP]
1902 sllx %g1, RW_SHIFT, %g2
1903 add %g2, PCB_REG, %g2
1904 SPILL(stx, %g2 + PCB_RW, 8, EMPTY)
1907 stx %g1, [PCB_REG + PCB_NSAVED]
1909 #if KTR_COMPILE & KTR_TRAP
1910 CATR(KTR_TRAP, "tl1_spill_topcb: pc=%#lx npc=%#lx sp=%#lx nsaved=%d"
1911 , %g1, %g2, %g3, 7, 8, 9)
1913 stx %g2, [%g1 + KTR_PARM1]
1915 stx %g2, [%g1 + KTR_PARM2]
1916 stx %sp, [%g1 + KTR_PARM3]
1917 ldx [PCB_REG + PCB_NSAVED], %g2
1918 stx %g2, [%g1 + KTR_PARM4]
1924 ldx [ASP_REG + 16], %g3
1925 ldx [ASP_REG + 8], %g2
1926 ldx [ASP_REG + 0], %g1
1929 END(tl1_spill_topcb)
1931 .macro tl1_spill_bad count
1938 .macro tl1_fill_bad count
1945 .macro tl1_soft count
1947 tl1_gen T_SOFT | T_KERNEL
1952 .globl tl_trap_begin
1960 tl0_reserved 8 ! 0x0-0x7
1962 tl0_insn_excptn ! 0x8
1963 tl0_reserved 1 ! 0x9
1965 tl0_gen T_INSTRUCTION_ERROR ! 0xa
1966 tl0_reserved 5 ! 0xb-0xf
1968 tl0_gen T_ILLEGAL_INSTRUCTION ! 0x10
1970 tl0_gen T_PRIVILEGED_OPCODE ! 0x11
1971 tl0_reserved 14 ! 0x12-0x1f
1973 tl0_gen T_FP_DISABLED ! 0x20
1975 tl0_gen T_FP_EXCEPTION_IEEE_754 ! 0x21
1977 tl0_gen T_FP_EXCEPTION_OTHER ! 0x22
1979 tl0_gen T_TAG_OVERFLOW ! 0x23
1983 tl0_gen T_DIVISION_BY_ZERO ! 0x28
1984 tl0_reserved 7 ! 0x29-0x2f
1986 tl0_data_excptn ! 0x30
1987 tl0_reserved 1 ! 0x31
1989 tl0_gen T_DATA_ERROR ! 0x32
1990 tl0_reserved 1 ! 0x33
1994 tl0_gen T_RESERVED ! 0x35
1996 tl0_gen T_RESERVED ! 0x36
1998 tl0_gen T_PRIVILEGED_ACTION ! 0x37
1999 tl0_reserved 9 ! 0x38-0x40
2001 tl0_intr_level ! 0x41-0x4f
2002 tl0_reserved 16 ! 0x50-0x5f
2006 tl0_gen T_PA_WATCHPOINT ! 0x61
2008 tl0_gen T_VA_WATCHPOINT ! 0x62
2010 tl0_gen T_CORRECTED_ECC_ERROR ! 0x63
2012 tl0_immu_miss ! 0x64
2014 tl0_dmmu_miss ! 0x68
2016 tl0_dmmu_prot ! 0x6c
2017 tl0_reserved 16 ! 0x70-0x7f
2019 tl0_spill_0_n ! 0x80
2021 tl0_spill_1_n ! 0x84
2022 tl0_spill_bad 14 ! 0x88-0xbf
2027 tl0_fill_bad 14 ! 0xc8-0xff
2029 tl0_gen T_SYSCALL ! 0x100
2030 tl0_gen T_BREAKPOINT ! 0x101
2031 tl0_gen T_DIVISION_BY_ZERO ! 0x102
2032 tl0_reserved 1 ! 0x103
2033 tl0_gen T_CLEAN_WINDOW ! 0x104
2034 tl0_gen T_RANGE_CHECK ! 0x105
2035 tl0_gen T_FIX_ALIGNMENT ! 0x106
2036 tl0_gen T_INTEGER_OVERFLOW ! 0x107
2037 tl0_gen T_SYSCALL ! 0x108
2038 tl0_gen T_SYSCALL ! 0x109
2039 tl0_fp_restore ! 0x10a
2040 tl0_reserved 5 ! 0x10b-0x10f
2041 tl0_gen T_TRAP_INSTRUCTION_16 ! 0x110
2042 tl0_gen T_TRAP_INSTRUCTION_17 ! 0x111
2043 tl0_gen T_TRAP_INSTRUCTION_18 ! 0x112
2044 tl0_gen T_TRAP_INSTRUCTION_19 ! 0x113
2045 tl0_gen T_TRAP_INSTRUCTION_20 ! 0x114
2046 tl0_gen T_TRAP_INSTRUCTION_21 ! 0x115
2047 tl0_gen T_TRAP_INSTRUCTION_22 ! 0x116
2048 tl0_gen T_TRAP_INSTRUCTION_23 ! 0x117
2049 tl0_gen T_TRAP_INSTRUCTION_24 ! 0x118
2050 tl0_gen T_TRAP_INSTRUCTION_25 ! 0x119
2051 tl0_gen T_TRAP_INSTRUCTION_26 ! 0x11a
2052 tl0_gen T_TRAP_INSTRUCTION_27 ! 0x11b
2053 tl0_gen T_TRAP_INSTRUCTION_28 ! 0x11c
2054 tl0_gen T_TRAP_INSTRUCTION_29 ! 0x11d
2055 tl0_gen T_TRAP_INSTRUCTION_30 ! 0x11e
2056 tl0_gen T_TRAP_INSTRUCTION_31 ! 0x11f
2057 tl0_reserved 32 ! 0x120-0x13f
2058 tl0_gen T_SYSCALL ! 0x140
2060 tl0_gen T_SYSCALL ! 0x142
2061 tl0_gen T_SYSCALL ! 0x143
2062 tl0_reserved 188 ! 0x144-0x1ff
2065 tl1_reserved 8 ! 0x200-0x207
2067 tl1_insn_excptn ! 0x208
2068 tl1_reserved 1 ! 0x209
2070 tl1_gen T_INSTRUCTION_ERROR ! 0x20a
2071 tl1_reserved 5 ! 0x20b-0x20f
2073 tl1_gen T_ILLEGAL_INSTRUCTION ! 0x210
2075 tl1_gen T_PRIVILEGED_OPCODE ! 0x211
2076 tl1_reserved 14 ! 0x212-0x21f
2078 tl1_fp_disabled ! 0x220
2080 tl1_gen T_FP_EXCEPTION_IEEE_754 ! 0x221
2082 tl1_gen T_FP_EXCEPTION_OTHER ! 0x222
2084 tl1_gen T_TAG_OVERFLOW ! 0x223
2086 clean_window ! 0x224
2088 tl1_gen T_DIVISION_BY_ZERO ! 0x228
2089 tl1_reserved 7 ! 0x229-0x22f
2091 tl1_data_excptn ! 0x230
2092 tl1_reserved 1 ! 0x231
2094 tl1_gen T_DATA_ERROR ! 0x232
2095 tl1_reserved 1 ! 0x233
2099 tl1_gen T_RESERVED ! 0x235
2101 tl1_gen T_RESERVED ! 0x236
2103 tl1_gen T_PRIVILEGED_ACTION ! 0x237
2104 tl1_reserved 9 ! 0x238-0x240
2106 tl1_intr_level ! 0x241-0x24f
2107 tl1_reserved 16 ! 0x250-0x25f
2111 tl1_gen T_PA_WATCHPOINT ! 0x261
2113 tl1_gen T_VA_WATCHPOINT ! 0x262
2115 tl1_gen T_CORRECTED_ECC_ERROR ! 0x263
2117 tl1_immu_miss ! 0x264
2119 tl1_dmmu_miss ! 0x268
2121 tl1_dmmu_prot ! 0x26c
2122 tl1_reserved 16 ! 0x270-0x27f
2124 tl1_spill_0_n ! 0x280
2125 tl1_spill_bad 1 ! 0x284
2127 tl1_spill_2_n ! 0x288
2129 tl1_spill_3_n ! 0x28c
2130 tl1_spill_bad 3 ! 0x290-0x29b
2132 tl1_spill_7_n ! 0x29c
2134 tl1_spill_0_o ! 0x2a0
2136 tl1_spill_1_o ! 0x2a4
2138 tl1_spill_2_o ! 0x2a8
2139 tl1_spill_bad 5 ! 0x2ac-0x2bf
2141 tl1_fill_0_n ! 0x2c0
2142 tl1_fill_bad 1 ! 0x2c4
2144 tl1_fill_2_n ! 0x2c8
2146 tl1_fill_3_n ! 0x2cc
2147 tl1_fill_bad 3 ! 0x2d0-0x2db
2149 tl1_fill_7_n ! 0x2dc
2150 tl1_fill_bad 8 ! 0x2e0-0x2ff
2151 tl1_reserved 1 ! 0x300
2153 tl1_gen T_BREAKPOINT ! 0x301
2154 tl1_gen T_RSTRWP_PHYS ! 0x302
2155 tl1_gen T_RSTRWP_VIRT ! 0x303
2156 tl1_reserved 252 ! 0x304-0x3ff
2163 * User trap entry point
2165 * void tl0_utrap(u_long type, u_long o1, u_long o2, u_long tar, u_long sfar,
2168 * This handles redirecting a trap back to usermode as a user trap. The user
2169 * program must have first registered a trap handler with the kernel using
2170 * sysarch(SPARC_UTRAP_INSTALL). The trap handler is passed enough state
2171 * for it to return to the trapping code directly, it will not return through
2172 * the kernel. The trap type is passed in %o0, all out registers must be
2173 * passed through to tl0_trap or to usermode untouched. Note that the
2174 * parameters passed in out registers may be used by the user trap handler.
2175 * Do not change the registers they are passed in or you will break the ABI.
2177 * If the trap type allows user traps, setup state to execute the user trap
2178 * handler and bounce back to usermode, otherwise branch to tl0_trap.
2182 * Check if the trap type allows user traps.
2185 bge,a,pt %xcc, tl0_trap
2189 * Load the user trap handler from the utrap table.
2191 ldx [PCPU(CURTHREAD)], %l0
2192 ldx [%l0 + TD_PROC], %l0
2193 ldx [%l0 + P_MD + MD_UTRAP], %l0
2194 brz,pt %l0, tl0_trap
2195 sllx %o0, PTR_SHIFT, %l1
2196 ldx [%l0 + %l1], %l0
2197 brz,a,pt %l0, tl0_trap
2201 * If the save we did on entry to the kernel had to spill a window
2202 * to the pcb, pretend we took a spill trap instead. Any windows
2203 * that are in the pcb must be copied out or the fill handler will
2204 * not be able to find them, since the user trap handler returns
2205 * directly to the trapping code. Note that we only support precise
2206 * user traps, which implies that the condition that caused the trap
2207 * in the first place is still valid, so it will occur again when we
2208 * re-execute the trapping instruction.
2210 ldx [PCB_REG + PCB_NSAVED], %l1
2211 brnz,a,pn %l1, tl0_trap
2215 * Pass %fsr in %l4, %tstate in %l5, %tpc in %l6 and %tnpc in %l7.
2216 * The ABI specifies only %l6 and %l7, but we need to pass %fsr or
2217 * it may be clobbered by an interrupt before the user trap code
2218 * can read it, and we must pass %tstate in order to restore %ccr
2219 * and %asi. The %fsr must be stored to memory, so we use the
2220 * temporary stack for that.
2223 or %l1, FPRS_FEF, %l2
2236 * Setup %tnpc to return to.
2241 * Setup %wstate for return, clear WSTATE_TRANSITION.
2244 and %l1, WSTATE_NORMAL_MASK, %l1
2245 wrpr %l1, 0, %wstate
2248 * Setup %tstate for return, change the saved cwp to point to the
2249 * current window instead of the window at the time of the trap.
2251 andn %l5, TSTATE_CWP_MASK, %l1
2253 wrpr %l1, %l2, %tstate
2256 * Setup %sp. Userland processes will crash if this is not setup.
2261 * Execute the user trap handler.
2267 * (Real) User trap entry point
2269 * void tl0_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfsr,
2272 * The following setup has been performed:
2273 * - the windows have been split and the active user window has been saved
2274 * (maybe just to the pcb)
2275 * - we are on alternate globals and interrupts are disabled
2277 * We switch to the kernel stack, build a trapframe, switch to normal
2278 * globals, enable interrupts and call trap.
2280 * NOTE: We must be very careful setting up the per-cpu pointer. We know that
2281 * it has been pre-set in alternate globals, so we read it from there and setup
2282 * the normal %g7 *before* enabling interrupts. This avoids any possibility
2283 * of cpu migration and using the wrong pcpup.
2287 * Force kernel store order.
2289 wrpr %g0, PSTATE_ALT, %pstate
2298 #if KTR_COMPILE & KTR_TRAP
2300 "tl0_trap: td=%p type=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2301 , %g1, %g2, %g3, 7, 8, 9)
2302 ldx [PCPU(CURTHREAD)], %g2
2303 stx %g2, [%g1 + KTR_PARM1]
2304 stx %o0, [%g1 + KTR_PARM2]
2306 stx %g2, [%g1 + KTR_PARM3]
2307 stx %l1, [%g1 + KTR_PARM4]
2308 stx %l2, [%g1 + KTR_PARM5]
2309 stx %i6, [%g1 + KTR_PARM6]
2313 1: and %l5, WSTATE_NORMAL_MASK, %l5
2314 sllx %l5, WSTATE_OTHER_SHIFT, %l5
2315 wrpr %l5, WSTATE_KERNEL, %wstate
2316 rdpr %canrestore, %l6
2317 wrpr %l6, 0, %otherwin
2318 wrpr %g0, 0, %canrestore
2320 sub PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
2322 stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2323 stx %o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2324 stx %o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2325 stx %o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2326 stx %o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2328 stx %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2329 stx %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2330 stx %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2331 stx %l3, [%sp + SPOFF + CCFSZ + TF_Y]
2332 stx %l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2333 stx %l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
2335 wr %g0, FPRS_FEF, %fprs
2336 stx %fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2338 stx %l6, [%sp + SPOFF + CCFSZ + TF_GSR]
2343 wrpr %g0, PSTATE_NORMAL, %pstate
2345 stx %g6, [%sp + SPOFF + CCFSZ + TF_G6]
2346 stx %g7, [%sp + SPOFF + CCFSZ + TF_G7]
2350 wrpr %g0, PSTATE_KERNEL, %pstate
2352 stx %i0, [%sp + SPOFF + CCFSZ + TF_O0]
2353 stx %i1, [%sp + SPOFF + CCFSZ + TF_O1]
2354 stx %i2, [%sp + SPOFF + CCFSZ + TF_O2]
2355 stx %i3, [%sp + SPOFF + CCFSZ + TF_O3]
2356 stx %i4, [%sp + SPOFF + CCFSZ + TF_O4]
2357 stx %i5, [%sp + SPOFF + CCFSZ + TF_O5]
2358 stx %i6, [%sp + SPOFF + CCFSZ + TF_O6]
2359 stx %i7, [%sp + SPOFF + CCFSZ + TF_O7]
2361 stx %g1, [%sp + SPOFF + CCFSZ + TF_G1]
2362 stx %g2, [%sp + SPOFF + CCFSZ + TF_G2]
2363 stx %g3, [%sp + SPOFF + CCFSZ + TF_G3]
2364 stx %g4, [%sp + SPOFF + CCFSZ + TF_G4]
2365 stx %g5, [%sp + SPOFF + CCFSZ + TF_G5]
2367 set tl0_ret - 8, %o7
2369 add %sp, CCFSZ + SPOFF, %o0
2373 * void tl0_intr(u_int level, u_int mask)
2377 * Force kernel store order.
2379 wrpr %g0, PSTATE_ALT, %pstate
2388 #if KTR_COMPILE & KTR_INTR
2390 "tl0_intr: td=%p level=%#x pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2391 , %g1, %g2, %g3, 7, 8, 9)
2392 ldx [PCPU(CURTHREAD)], %g2
2393 stx %g2, [%g1 + KTR_PARM1]
2394 stx %o0, [%g1 + KTR_PARM2]
2396 stx %g2, [%g1 + KTR_PARM3]
2397 stx %l1, [%g1 + KTR_PARM4]
2398 stx %l2, [%g1 + KTR_PARM5]
2399 stx %i6, [%g1 + KTR_PARM6]
2404 wr %o1, 0, %clear_softint
2406 and %l5, WSTATE_NORMAL_MASK, %l5
2407 sllx %l5, WSTATE_OTHER_SHIFT, %l5
2408 wrpr %l5, WSTATE_KERNEL, %wstate
2409 rdpr %canrestore, %l6
2410 wrpr %l6, 0, %otherwin
2411 wrpr %g0, 0, %canrestore
2413 sub PCB_REG, SPOFF + CCFSZ + TF_SIZEOF, %sp
2415 stx %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2416 stx %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2417 stx %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2418 stx %l3, [%sp + SPOFF + CCFSZ + TF_Y]
2419 stx %l4, [%sp + SPOFF + CCFSZ + TF_FPRS]
2420 stx %l5, [%sp + SPOFF + CCFSZ + TF_WSTATE]
2422 wr %g0, FPRS_FEF, %fprs
2423 stx %fsr, [%sp + SPOFF + CCFSZ + TF_FSR]
2425 stx %l6, [%sp + SPOFF + CCFSZ + TF_GSR]
2429 mov T_INTERRUPT, %o1
2431 stx %o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2432 stx %o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
2436 wrpr %g0, PSTATE_NORMAL, %pstate
2438 stx %g1, [%sp + SPOFF + CCFSZ + TF_G1]
2439 stx %g2, [%sp + SPOFF + CCFSZ + TF_G2]
2440 stx %g3, [%sp + SPOFF + CCFSZ + TF_G3]
2441 stx %g4, [%sp + SPOFF + CCFSZ + TF_G4]
2442 stx %g5, [%sp + SPOFF + CCFSZ + TF_G5]
2443 stx %g6, [%sp + SPOFF + CCFSZ + TF_G6]
2444 stx %g7, [%sp + SPOFF + CCFSZ + TF_G7]
2448 wrpr %g0, PSTATE_KERNEL, %pstate
2450 stx %i0, [%sp + SPOFF + CCFSZ + TF_O0]
2451 stx %i1, [%sp + SPOFF + CCFSZ + TF_O1]
2452 stx %i2, [%sp + SPOFF + CCFSZ + TF_O2]
2453 stx %i3, [%sp + SPOFF + CCFSZ + TF_O3]
2454 stx %i4, [%sp + SPOFF + CCFSZ + TF_O4]
2455 stx %i5, [%sp + SPOFF + CCFSZ + TF_O5]
2456 stx %i6, [%sp + SPOFF + CCFSZ + TF_O6]
2457 stx %i7, [%sp + SPOFF + CCFSZ + TF_O7]
2459 SET(intr_handlers, %l1, %l0)
2460 sllx %l3, IH_SHIFT, %l1
2461 ldx [%l0 + %l1], %l1
2462 KASSERT(%l1, "tl0_intr: ih null")
2464 add %sp, CCFSZ + SPOFF, %o0
2466 /* %l3 contains PIL */
2467 SET(intrcnt, %l1, %l2)
2468 prefetcha [%l2] ASI_N, 1
2469 SET(pil_countp, %l1, %l0)
2471 lduh [%l0 + %l1], %l0
2478 lduw [PCPU(CNT) + V_INTR], %l0
2480 stw %l0, [PCPU(CNT) + V_INTR]
2487 * Initiate return to usermode.
2489 * Called with a trapframe on the stack. The window that was setup in
2490 * tl0_trap may have been used by "fast" trap handlers that pretend to be
2491 * leaf functions, so all ins and locals may have been clobbered since
2494 * This code is rather long and complicated.
2498 * Check for pending asts atomically with returning. We must raise
2499 * the PIL before checking, and if no asts are found the PIL must
2500 * remain raised until the retry is executed, or we risk missing asts
2501 * caused by interrupts occurring after the test. If the PIL is
2502 * lowered, as it is when we call ast, the check must be re-executed.
2504 wrpr %g0, PIL_TICK, %pil
2505 ldx [PCPU(CURTHREAD)], %l0
2506 lduw [%l0 + TD_FLAGS], %l1
2507 set TDF_ASTPENDING | TDF_NEEDRESCHED, %l2
2513 * We have an AST. Re-enable interrupts and handle it, then restart
2514 * the return sequence.
2518 add %sp, CCFSZ + SPOFF, %o0
2523 * Check for windows that were spilled to the pcb and need to be
2524 * copied out. This must be the last thing that is done before the
2525 * return to usermode. If there are still user windows in the cpu
2526 * and we call a nested function after this, which causes them to be
2527 * spilled to the pcb, they will not be copied out and the stack will
2530 1: ldx [PCB_REG + PCB_NSAVED], %l1
2535 stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2537 add %sp, SPOFF + CCFSZ, %o0
2542 * Restore the out and most global registers from the trapframe.
2543 * The ins will become the outs when we restore below.
2545 2: ldx [%sp + SPOFF + CCFSZ + TF_O0], %i0
2546 ldx [%sp + SPOFF + CCFSZ + TF_O1], %i1
2547 ldx [%sp + SPOFF + CCFSZ + TF_O2], %i2
2548 ldx [%sp + SPOFF + CCFSZ + TF_O3], %i3
2549 ldx [%sp + SPOFF + CCFSZ + TF_O4], %i4
2550 ldx [%sp + SPOFF + CCFSZ + TF_O5], %i5
2551 ldx [%sp + SPOFF + CCFSZ + TF_O6], %i6
2552 ldx [%sp + SPOFF + CCFSZ + TF_O7], %i7
2554 ldx [%sp + SPOFF + CCFSZ + TF_G1], %g1
2555 ldx [%sp + SPOFF + CCFSZ + TF_G2], %g2
2556 ldx [%sp + SPOFF + CCFSZ + TF_G3], %g3
2557 ldx [%sp + SPOFF + CCFSZ + TF_G4], %g4
2558 ldx [%sp + SPOFF + CCFSZ + TF_G5], %g5
2561 * Load everything we need to restore below before disabling
2564 ldx [%sp + SPOFF + CCFSZ + TF_FPRS], %l0
2565 ldx [%sp + SPOFF + CCFSZ + TF_GSR], %l1
2566 ldx [%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2567 ldx [%sp + SPOFF + CCFSZ + TF_TPC], %l3
2568 ldx [%sp + SPOFF + CCFSZ + TF_TSTATE], %l4
2569 ldx [%sp + SPOFF + CCFSZ + TF_Y], %l5
2570 ldx [%sp + SPOFF + CCFSZ + TF_WSTATE], %l6
2573 * Disable interrupts to restore the special globals. They are not
2574 * saved and restored for all kernel traps, so an interrupt at the
2575 * wrong time would clobber them.
2577 wrpr %g0, PSTATE_NORMAL, %pstate
2579 ldx [%sp + SPOFF + CCFSZ + TF_G6], %g6
2580 ldx [%sp + SPOFF + CCFSZ + TF_G7], %g7
2583 * Switch to alternate globals. This frees up some registers we
2584 * can use after the restore changes our window.
2586 wrpr %g0, PSTATE_ALT, %pstate
2589 * Drop %pil to zero. It must have been zero at the time of the
2590 * trap, since we were in usermode, but it was raised above in
2591 * order to check for asts atomically. We have interrupts disabled
2592 * so any interrupts will not be serviced until we complete the
2593 * return to usermode.
2598 * Save %fprs in an alternate global so it can be restored after the
2599 * restore instruction below. If we restore it before the restore,
2600 * and the restore traps we may run for a while with floating point
2601 * enabled in the kernel, which we want to avoid.
2606 * Restore %fsr and %gsr. These need floating point enabled in %fprs,
2607 * so we set it temporarily and then clear it.
2609 wr %g0, FPRS_FEF, %fprs
2610 ldx [%sp + SPOFF + CCFSZ + TF_FSR], %fsr
2615 * Restore program counters. This could be done after the restore
2616 * but we're out of alternate globals to store them in...
2622 * Save %tstate in an alternate global and clear the %cwp field. %cwp
2623 * will be affected by the restore below and we need to make sure it
2624 * points to the current window at that time, not the window that was
2625 * active at the time of the trap.
2627 andn %l4, TSTATE_CWP_MASK, %g2
2630 * Save %y in an alternate global.
2635 * Setup %wstate for return. We need to restore the user window state
2636 * which we saved in wstate.other when we trapped. We also need to
2637 * set the transition bit so the restore will be handled specially
2638 * if it traps, use the xor feature of wrpr to do that.
2640 srlx %l6, WSTATE_OTHER_SHIFT, %g3
2641 wrpr %g3, WSTATE_TRANSITION, %wstate
2644 * Setup window management registers for return. If not all user
2645 * windows were spilled in the kernel %otherwin will be non-zero,
2646 * so we need to transfer it to %canrestore to correctly restore
2647 * those windows. Otherwise everything gets set to zero and the
2648 * restore below will fill a window directly from the user stack.
2651 wrpr %o0, 0, %canrestore
2652 wrpr %g0, 0, %otherwin
2653 wrpr %o0, 0, %cleanwin
2656 * Now do the restore. If this instruction causes a fill trap which
2657 * fails to fill a window from the user stack, we will resume at
2658 * tl0_ret_fill_end and call back into the kernel.
2664 * We made it. We're back in the window that was active at the time
2665 * of the trap, and ready to return to usermode.
2669 * Restore %frps. This was saved in an alternate global above.
2674 * Fixup %tstate so the saved %cwp points to the current window and
2678 wrpr %g2, %g1, %tstate
2681 * Restore the user window state. The transition bit was set above
2682 * for special handling of the restore, this clears it.
2684 wrpr %g3, 0, %wstate
2686 #if KTR_COMPILE & KTR_TRAP
2687 CATR(KTR_TRAP, "tl0_ret: td=%#lx pil=%#lx pc=%#lx npc=%#lx sp=%#lx"
2688 , %g1, %g2, %g3, 7, 8, 9)
2689 ldx [PCPU(CURTHREAD)], %g2
2690 stx %g2, [%g1 + KTR_PARM1]
2692 stx %g2, [%g1 + KTR_PARM2]
2694 stx %g2, [%g1 + KTR_PARM3]
2696 stx %g2, [%g1 + KTR_PARM4]
2697 stx %sp, [%g1 + KTR_PARM5]
2702 * Restore %y. Note that the CATR above clobbered it.
2707 * Return to usermode.
2712 #if KTR_COMPILE & KTR_TRAP
2713 CATR(KTR_TRAP, "tl0_ret: fill magic ps=%#lx ws=%#lx sp=%#lx"
2714 , %l0, %l1, %l2, 7, 8, 9)
2716 stx %l1, [%l0 + KTR_PARM1]
2717 stx %l6, [%l0 + KTR_PARM2]
2718 stx %sp, [%l0 + KTR_PARM3]
2722 * Restore %y clobbered by the CATR. This was saved in %l5 above.
2728 * The restore above caused a fill trap and the fill handler was
2729 * unable to fill a window from the user stack. The special fill
2730 * handler recognized this and punted, sending us here. We need
2731 * to carefully undo any state that was restored before the restore
2732 * was executed and call trap again. Trap will copyin a window
2733 * from the user stack which will fault in the page we need so the
2734 * restore above will succeed when we try again. If this fails
2735 * the process has trashed its stack, so we kill it.
2739 * Restore the kernel window state. This was saved in %l6 above, and
2740 * since the restore failed we're back in the same window.
2742 wrpr %l6, 0, %wstate
2745 * Restore the normal globals which have predefined values in the
2746 * kernel. We clobbered them above restoring the user's globals
2747 * so this is very important.
2748 * XXX PSTATE_ALT must already be set.
2750 wrpr %g0, PSTATE_ALT, %pstate
2753 wrpr %g0, PSTATE_NORMAL, %pstate
2756 wrpr %g0, PSTATE_KERNEL, %pstate
2759 * Simulate a fill trap and then start the whole return sequence over
2760 * again. This is special because it only copies in 1 window, not 2
2761 * as we would for a normal failed fill. This may be the first time
2762 * the process has been run, so there may not be 2 windows worth of
2766 stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2768 add %sp, SPOFF + CCFSZ, %o0
2774 * Kernel trap entry point
2776 * void tl1_trap(u_int type, u_long o1, u_long o2, u_long tar, u_long sfar,
2779 * This is easy because the stack is already setup and the windows don't need
2780 * to be split. We build a trapframe and call trap(), the same as above, but
2781 * the outs don't need to be saved.
2791 #if KTR_COMPILE & KTR_TRAP
2792 CATR(KTR_TRAP, "tl1_trap: td=%p type=%#lx pil=%#lx pc=%#lx sp=%#lx"
2793 , %g1, %g2, %g3, 7, 8, 9)
2794 ldx [PCPU(CURTHREAD)], %g2
2795 stx %g2, [%g1 + KTR_PARM1]
2796 stx %o0, [%g1 + KTR_PARM2]
2797 stx %l3, [%g1 + KTR_PARM3]
2798 stx %l1, [%g1 + KTR_PARM4]
2799 stx %i6, [%g1 + KTR_PARM5]
2805 and %l5, WSTATE_OTHER_MASK, %l5
2806 wrpr %l5, WSTATE_KERNEL, %wstate
2808 stx %o0, [%sp + SPOFF + CCFSZ + TF_TYPE]
2809 stx %o1, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2810 stx %o3, [%sp + SPOFF + CCFSZ + TF_TAR]
2811 stx %o4, [%sp + SPOFF + CCFSZ + TF_SFAR]
2812 stx %o5, [%sp + SPOFF + CCFSZ + TF_SFSR]
2814 stx %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2815 stx %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2816 stx %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2817 stx %l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2818 stx %l4, [%sp + SPOFF + CCFSZ + TF_Y]
2822 wrpr %g0, PSTATE_NORMAL, %pstate
2824 stx %g6, [%sp + SPOFF + CCFSZ + TF_G6]
2825 stx %g7, [%sp + SPOFF + CCFSZ + TF_G7]
2829 wrpr %g0, PSTATE_KERNEL, %pstate
2831 stx %i0, [%sp + SPOFF + CCFSZ + TF_O0]
2832 stx %i1, [%sp + SPOFF + CCFSZ + TF_O1]
2833 stx %i2, [%sp + SPOFF + CCFSZ + TF_O2]
2834 stx %i3, [%sp + SPOFF + CCFSZ + TF_O3]
2835 stx %i4, [%sp + SPOFF + CCFSZ + TF_O4]
2836 stx %i5, [%sp + SPOFF + CCFSZ + TF_O5]
2837 stx %i6, [%sp + SPOFF + CCFSZ + TF_O6]
2838 stx %i7, [%sp + SPOFF + CCFSZ + TF_O7]
2840 stx %g1, [%sp + SPOFF + CCFSZ + TF_G1]
2841 stx %g2, [%sp + SPOFF + CCFSZ + TF_G2]
2842 stx %g3, [%sp + SPOFF + CCFSZ + TF_G3]
2843 stx %g4, [%sp + SPOFF + CCFSZ + TF_G4]
2844 stx %g5, [%sp + SPOFF + CCFSZ + TF_G5]
2846 set tl1_ret - 8, %o7
2848 add %sp, CCFSZ + SPOFF, %o0
2852 ldx [%sp + SPOFF + CCFSZ + TF_O0], %i0
2853 ldx [%sp + SPOFF + CCFSZ + TF_O1], %i1
2854 ldx [%sp + SPOFF + CCFSZ + TF_O2], %i2
2855 ldx [%sp + SPOFF + CCFSZ + TF_O3], %i3
2856 ldx [%sp + SPOFF + CCFSZ + TF_O4], %i4
2857 ldx [%sp + SPOFF + CCFSZ + TF_O5], %i5
2858 ldx [%sp + SPOFF + CCFSZ + TF_O6], %i6
2859 ldx [%sp + SPOFF + CCFSZ + TF_O7], %i7
2861 ldx [%sp + SPOFF + CCFSZ + TF_G1], %g1
2862 ldx [%sp + SPOFF + CCFSZ + TF_G2], %g2
2863 ldx [%sp + SPOFF + CCFSZ + TF_G3], %g3
2864 ldx [%sp + SPOFF + CCFSZ + TF_G4], %g4
2865 ldx [%sp + SPOFF + CCFSZ + TF_G5], %g5
2867 ldx [%sp + SPOFF + CCFSZ + TF_TSTATE], %l0
2868 ldx [%sp + SPOFF + CCFSZ + TF_TPC], %l1
2869 ldx [%sp + SPOFF + CCFSZ + TF_TNPC], %l2
2870 ldx [%sp + SPOFF + CCFSZ + TF_PIL], %l3
2871 ldx [%sp + SPOFF + CCFSZ + TF_Y], %l4
2873 set VM_MIN_PROM_ADDRESS, %l5
2877 set VM_MAX_PROM_ADDRESS, %l5
2882 wrpr %g0, PSTATE_NORMAL, %pstate
2884 ldx [%sp + SPOFF + CCFSZ + TF_G6], %g6
2885 ldx [%sp + SPOFF + CCFSZ + TF_G7], %g7
2887 1: wrpr %g0, PSTATE_ALT, %pstate
2889 andn %l0, TSTATE_CWP_MASK, %g1
2903 wrpr %g1, %g2, %tstate
2905 #if KTR_COMPILE & KTR_TRAP
2906 CATR(KTR_TRAP, "tl1_ret: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
2907 , %g1, %g2, %g3, 7, 8, 9)
2908 ldx [PCPU(CURTHREAD)], %g2
2909 stx %g2, [%g1 + KTR_PARM1]
2911 stx %g2, [%g1 + KTR_PARM2]
2913 stx %g2, [%g1 + KTR_PARM3]
2915 stx %g2, [%g1 + KTR_PARM4]
2916 stx %sp, [%g1 + KTR_PARM5]
2926 * void tl1_intr(u_int level, u_int mask)
2936 #if KTR_COMPILE & KTR_INTR
2938 "tl1_intr: td=%p level=%#x pil=%#lx pc=%#lx sp=%#lx"
2939 , %g1, %g2, %g3, 7, 8, 9)
2940 ldx [PCPU(CURTHREAD)], %g2
2941 stx %g2, [%g1 + KTR_PARM1]
2942 stx %o0, [%g1 + KTR_PARM2]
2943 stx %l3, [%g1 + KTR_PARM3]
2944 stx %l1, [%g1 + KTR_PARM4]
2945 stx %i6, [%g1 + KTR_PARM5]
2950 wr %o1, 0, %clear_softint
2954 and %l5, WSTATE_OTHER_MASK, %l5
2955 wrpr %l5, WSTATE_KERNEL, %wstate
2957 stx %l0, [%sp + SPOFF + CCFSZ + TF_TSTATE]
2958 stx %l1, [%sp + SPOFF + CCFSZ + TF_TPC]
2959 stx %l2, [%sp + SPOFF + CCFSZ + TF_TNPC]
2960 stx %l3, [%sp + SPOFF + CCFSZ + TF_PIL]
2961 stx %l4, [%sp + SPOFF + CCFSZ + TF_Y]
2964 mov T_INTERRUPT | T_KERNEL, %o1
2966 stx %o0, [%sp + SPOFF + CCFSZ + TF_LEVEL]
2967 stx %o1, [%sp + SPOFF + CCFSZ + TF_TYPE]
2969 stx %i6, [%sp + SPOFF + CCFSZ + TF_O6]
2970 stx %i7, [%sp + SPOFF + CCFSZ + TF_O7]
2974 wrpr %g0, PSTATE_NORMAL, %pstate
2976 stx %g1, [%sp + SPOFF + CCFSZ + TF_G1]
2977 stx %g2, [%sp + SPOFF + CCFSZ + TF_G2]
2978 stx %g3, [%sp + SPOFF + CCFSZ + TF_G3]
2979 stx %g4, [%sp + SPOFF + CCFSZ + TF_G4]
2980 stx %g5, [%sp + SPOFF + CCFSZ + TF_G5]
2984 wrpr %g0, PSTATE_KERNEL, %pstate
2986 SET(intr_handlers, %l5, %l4)
2987 sllx %l7, IH_SHIFT, %l5
2988 ldx [%l4 + %l5], %l5
2989 KASSERT(%l5, "tl1_intr: ih null")
2991 add %sp, CCFSZ + SPOFF, %o0
2993 /* %l7 contains PIL */
2994 SET(intrcnt, %l5, %l4)
2995 prefetcha [%l4] ASI_N, 1
2996 SET(pil_countp, %l5, %l6)
2998 lduh [%l5 + %l6], %l5
3005 lduw [PCPU(CNT) + V_INTR], %l4
3007 stw %l4, [PCPU(CNT) + V_INTR]
3009 ldx [%sp + SPOFF + CCFSZ + TF_Y], %l4
3011 ldx [%sp + SPOFF + CCFSZ + TF_G1], %g1
3012 ldx [%sp + SPOFF + CCFSZ + TF_G2], %g2
3013 ldx [%sp + SPOFF + CCFSZ + TF_G3], %g3
3014 ldx [%sp + SPOFF + CCFSZ + TF_G4], %g4
3015 ldx [%sp + SPOFF + CCFSZ + TF_G5], %g5
3017 wrpr %g0, PSTATE_ALT, %pstate
3019 andn %l0, TSTATE_CWP_MASK, %g1
3032 wrpr %g1, %g2, %tstate
3034 #if KTR_COMPILE & KTR_INTR
3035 CATR(KTR_INTR, "tl1_intr: td=%#x pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
3036 , %g1, %g2, %g3, 7, 8, 9)
3037 ldx [PCPU(CURTHREAD)], %g2
3038 stx %g2, [%g1 + KTR_PARM1]
3040 stx %g2, [%g1 + KTR_PARM2]
3042 stx %g2, [%g1 + KTR_PARM3]
3044 stx %g2, [%g1 + KTR_PARM4]
3045 stx %sp, [%g1 + KTR_PARM5]
3059 * Freshly forked processes come here when switched to for the first time.
3060 * The arguments to fork_exit() have been setup in the locals, we must move
3063 ENTRY(fork_trampoline)
3064 #if KTR_COMPILE & KTR_PROC
3065 CATR(KTR_PROC, "fork_trampoline: td=%p (%s) cwp=%#lx"
3066 , %g1, %g2, %g3, 7, 8, 9)
3067 ldx [PCPU(CURTHREAD)], %g2
3068 stx %g2, [%g1 + KTR_PARM1]
3069 ldx [%g2 + TD_PROC], %g2
3070 add %g2, P_COMM, %g2
3071 stx %g2, [%g1 + KTR_PARM2]
3073 stx %g2, [%g1 + KTR_PARM3]
3082 END(fork_trampoline)