2 * Copyright (c) 2003,2004 Marcel Moolenaar
3 * Copyright (c) 2000 Doug Rabson
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <machine/asm.h>
29 __FBSDID("$FreeBSD$");
31 #include "opt_xtrace.h"
33 #include <machine/pte.h>
37 * Nested TLB restart tokens. These are used by the
38 * nested TLB handler for jumping back to the code
39 * where the nested TLB was caused.
41 #define NTLBRT_SAVE 0x12c12c
42 #define NTLBRT_RESTORE 0x12c12d
45 * ar.k7 = kernel memory stack
46 * ar.k6 = kernel register stack
47 * ar.k5 = EPC gateway page
51 .section .ivt.data, "aw"
61 .global ia64_xtrace_mask
62 .size ia64_xtrace_mask, 8
63 ia64_xtrace_mask: data8 0
66 .global ia64_xtrace_enabled
67 .size ia64_xtrace_enabled, 4
68 ia64_xtrace_enabled: data4 0
70 #define XTRACE_HOOK(offset) \
79 br.sptk ia64_xtrace_write ;; \
84 mov pr = r28, 0x1ffff ;; \
87 .section .ivt.text, "ax"
89 // We can only use r25, r26 & r27
90 ENTRY_NOPROFILE(ia64_xtrace_write, 0)
93 movl r26 = ia64_xtrace_enabled
104 cmp.eq p15,p0 = r0, r26
110 cmp.eq p15,p0 = r0, r27
115 st8 [r27] = r25, 8 // 0x00 IVT
121 st8 [r27] = r26, 8 // 0x08 ITC
127 st8 [r27] = r25, 8 // 0x10 IIP
133 st8 [r27] = r26, 8 // 0x18 IFA
139 st8 [r27] = r25, 8 // 0x20 ISR
145 st8 [r27] = r26, 8 // 0x28 IPSR
151 st8 [r27] = r25, 8 // 0x30 ITIR
157 st8 [r27] = r26, 8 // 0x38 IIPA
163 st8 [r27] = r25, 8 // 0x40 IFS
169 st8 [r27] = r26, 8 // 0x48 IIM
175 st8 [r27] = r25, 8 // 0x50 IHA
181 st8 [r27] = r26, 8 // 0x58 UNAT
187 st8 [r27] = r25, 8 // 0x60 RSC
193 st8 [r27] = r26, 8 // 0x68 BSP
199 st8 [r27] = r25, 8 // 0x70 PCPU/TLS
205 st8 [r27] = r26, 8 // 0x78 SP
206 movl r25 = ia64_xtrace_mask
222 END(ia64_xtrace_write)
226 #define XTRACE_HOOK(offset)
228 .section .ivt.text, "ax"
233 * exception_save: save interrupted state
236 * r16 address of bundle that contains the branch. The
237 * return address will be the next bundle.
238 * r17 the value to save as ifa in the trapframe. This
239 * normally is cr.ifa, but some interruptions set
240 * set cr.iim and not cr.ifa.
243 * p15 interrupted from user stack
244 * p14 interrupted from kernel stack
245 * p13 interrupted from user backing store
246 * p12 interrupted from kernel backing store
247 * p11 interrupts were enabled
248 * p10 interrupts were disabled
250 ENTRY_NOPROFILE(exception_save, 0)
258 cmp.le p14,p15=IA64_VM_MINKERN_REGION,r31
260 (p15) mov r23=ar.k7 // kernel memory stack
266 add r30=-SIZEOF_TRAPFRAME,r23
274 addl r29=NTLBRT_SAVE,r0 // 22-bit restart token.
279 * We have a 1KB aligned trapframe, pointed to by r30. We can't
280 * reliably write to the trapframe using virtual addressing, due
281 * to the fact that TC entries we depend on can be removed by:
282 * 1. ptc.g instructions issued by other threads/cores/CPUs, or
283 * 2. TC modifications in another thread on the same core.
284 * When our TC entry gets removed, we get nested TLB faults and
285 * since no state is saved, we can only deal with those when
286 * explicitly coded and expected.
287 * As such, we switch to physical addressing and account for the
288 * fact that the tpa instruction can cause a nested TLB fault.
289 * Since the data nested TLB fault does not preserve any state,
290 * we have to be careful what we clobber. Consequently, we have
291 * to be careful what we use here. Below a list of registers that
292 * are considered alive:
294 * r18=pr, r19=length, r20=unat, r21=rsc, r22=iip, r23=TOS
296 * r30=trapframe pointers
297 * p14,p15=memory stack switch
299 exception_save_restart:
300 tpa r24=r30 // Nested TLB fault possible
306 add r29=16,r19 // Clobber restart token
313 // r18=pr, r19=length, r20=unat, r21=rsc, r22=iip, r23=TOS
316 st8 [r30]=r19,16 // length
317 st8 [r31]=r0,16 // flags
321 st8.spill [r30]=sp,16 // sp
322 st8 [r31]=r20,16 // unat
332 // r18=pr, r19=rnat, r20=bspstore, r21=rsc, r22=iip, r23=rp
335 st8 [r30]=r23,16 // rp
336 st8 [r31]=r18,16 // pr
341 st8 [r30]=r24,16 // pfs
342 st8 [r31]=r20,16 // bspstore
352 // r18=fpsr, r19=rnat, r20=bspstore, r21=rsc, r22=iip, r23=ipsr
354 st8 [r30]=r19,16 // rnat
355 st8 [r31]=r0,16 // __spare
356 cmp.le p12,p13=IA64_VM_MINKERN_REGION,r24
360 st8.spill [r30]=r13,16 // tp
361 st8 [r31]=r21,16 // rsc
362 tbit.nz p11,p10=r23,14 // p11=interrupts enabled
366 (p13) mov r21=ar.k6 // kernel register stack
368 st8 [r30]=r18,16 // fpsr
369 (p13) dep r20=r20,r21,0,9 // align dirty registers
372 // r19=rnat, r20=bspstore, r22=iip, r23=ipsr
374 st8 [r31]=r23,16 // psr
375 (p13) mov ar.bspstore=r20
380 (p13) mov ar.rnat=r19
387 st8.spill [r30]=gp,16 // gp
391 // r18=ndirty, r19=ifs, r22=iip
393 st8 [r31]=r18,16 // ndirty
394 st8 [r30]=r19,16 // cfm
400 st8 [r31]=r22,16 // iip
405 st8 [r30]=r17,24 // ifa
406 st8 [r31]=r18,24 // isr
412 st8.spill [r30]=r2,16 // r2
414 st8.spill [r31]=r3,16 // r3
420 st8.spill [r30]=r8,16 // r8
422 st8.spill [r31]=r9,16 // r9
428 st8.spill [r30]=r10,16 // r10
430 st8.spill [r31]=r11,16 // r11
436 st8.spill [r30]=r14 // r14
438 st8.spill [r31]=r15 // r15
449 st8.spill [r2]=r16,16 // r16
451 st8.spill [r3]=r17,16 // r17
457 st8.spill [r2]=r18,16 // r18
459 st8.spill [r3]=r19,16 // r19
465 st8.spill [r2]=r20,16 // r20
467 st8.spill [r3]=r21,16 // r21
473 st8.spill [r2]=r22,16 // r22
475 st8.spill [r3]=r23,16 // r23
480 st8.spill [r2]=r24,16 // r24
482 st8.spill [r3]=r25,16 // r25
485 st8.spill [r2]=r26,16 // r26
487 st8.spill [r3]=r27,16 // r27
490 st8.spill [r2]=r28,16 // r28
492 st8.spill [r3]=r29,16 // r29
495 st8.spill [r2]=r30,16 // r30
497 st8.spill [r3]=r31,16 // r31
501 st8 [r2]=r14,16 // b6
507 st8 [r3]=r15,16 // b7
513 st8 [r2]=r16,16 // ccv
514 st8 [r3]=r10,16 // csd
519 st8 [r2]=r11,24 // ssd
525 stf.spill [r3]=f6,32 // f6
526 stf.spill [r2]=f7,32 // f7
528 stf.spill [r3]=f8,32 // f8
529 stf.spill [r2]=f9,32 // f9
531 stf.spill [r3]=f10,32 // f10
532 stf.spill [r2]=f11,32 // f11
534 stf.spill [r3]=f12,32 // f12
535 stf.spill [r2]=f13,32 // f13
537 stf.spill [r3]=f14 // f14
538 stf.spill [r2]=f15 // f15
547 ssm psr.dt|psr.ic|psr.dfh
560 * exception_restore: restore interrupted state
563 * sp+16 trapframe pointer
565 ENTRY_NOPROFILE(exception_restore, 0)
573 // The next instruction can fault. Let it be...
577 add r8=SIZEOF_SPECIAL+16,r9
580 add r2=SIZEOF_TRAPFRAME-16,r9
581 add r3=SIZEOF_TRAPFRAME-32,r9
585 ldf.fill f15=[r2],-32 // f15
586 ldf.fill f14=[r3],-32 // f14
591 ldf.fill f13=[r2],-32 // f13
592 ldf.fill f12=[r3],-32 // f12
597 ldf.fill f11=[r2],-32 // f11
598 ldf.fill f10=[r3],-32 // f10
603 ldf.fill f9=[r2],-32 // f9
604 ldf.fill f8=[r3],-32 // f8
609 ldf.fill f7=[r2],-24 // f7
610 ldf.fill f6=[r3],-16 // f6
615 ld8 r8=[r8] // unat (after)
622 ld8 r10=[r2],-16 // ssd
623 ld8 r11=[r3],-16 // csd
628 ld8 r14=[r2],-16 // ccv
629 ld8 r15=[r3],-16 // b7
634 ld8 r8=[r2],-16 // b6
639 ld8.fill r31=[r3],-16 // r31
640 ld8.fill r30=[r2],-16 // r30
645 ld8.fill r29=[r3],-16 // r29
646 ld8.fill r28=[r2],-16 // r28
648 ld8.fill r27=[r3],-16 // r27
649 ld8.fill r26=[r2],-16 // r26
651 ld8.fill r25=[r3],-16 // r25
652 ld8.fill r24=[r2],-16 // r24
654 ld8.fill r23=[r3],-16 // r23
655 ld8.fill r22=[r2],-16 // r22
657 ld8.fill r21=[r3],-16 // r21
658 ld8.fill r20=[r2],-16 // r20
660 ld8.fill r19=[r3],-16 // r19
661 ld8.fill r18=[r2],-16 // r18
665 ld8.fill r17=[r3],-16 // r17
666 ld8.fill r16=[r2],-16 // r16
671 ld8 r16=[r9] // tf_length
676 ld8.fill r15=[r3],-16 // r15
677 ld8.fill r14=[r2],-16 // r14
682 ld8.fill r11=[r3],-16 // r11
683 ld8.fill r10=[r2],-16 // r10
684 add r16=r16,sp // ar.k7
688 ld8.fill r9=[r3],-16 // r9
689 ld8.fill r8=[r2],-16 // r8
694 ld8.fill r3=[r3] // r3
695 ld8.fill r2=[r2] // r2
700 ld8.fill sp=[r31],16 // sp
701 ld8 r17=[r30],16 // unat
703 ld8 r29=[r31],16 // rp
704 ld8 r18=[r30],16 // pr
706 ld8 r28=[r31],16 // pfs
707 ld8 r20=[r30],24 // bspstore
710 ld8 r21=[r31],24 // rnat
713 ld8.fill r26=[r30],16 // tp
714 ld8 r22=[r31],16 // rsc
718 ld8 r23=[r30],16 // fpsr
719 ld8 r24=[r31],16 // psr
724 ld8.fill r1=[r30],16 // gp
725 ld8 r27=[r31],16 // ndirty
726 cmp.le p14,p15=IA64_VM_MINKERN_REGION,r28
736 // Switch register stack
737 alloc r30=ar.pfs,0,0,0,0 // discard current frame
738 shl r31=r27,16 // value for ar.rsc
742 // The loadrs can fault if the backing store is not currently
743 // mapped. We assured forward progress by getting everything we
744 // need from the trapframe so that we don't care if the CPU
745 // purges that translation when it needs to insert a new one for
746 // the backing store.
748 mov ar.rsc=r31 // setup for loadrs
750 addl r29=NTLBRT_RESTORE,r0 // 22-bit restart token
759 exception_restore_restart:
763 loadrs // load user regs
764 mov r29=0 // Clobber restart token
771 dep r31=0,r31,0,13 // 8KB aligned
798 END(exception_restore)
801 * Call exception_save_regs to preserve the interrupted state in a
802 * trapframe. Note that we don't use a call instruction because we
803 * must be careful not to lose track of the RSE state. We then call
804 * trap() with the value of _n_ as an argument to handle the
805 * exception. We arrange for trap() to return to exception_restore
806 * which will restore the interrupted state before executing an rfi to
809 #define CALL(_func_, _n_, _ifa_) \
813 br.sptk exception_save ;; \
816 alloc r15=ar.pfs,0,0,2,0 ;; \
823 br.call.sptk rp=_func_ ;; \
828 br.sptk exception_restore ;; \
831 #define IVT_ENTRY(name, offset) \
832 .org ia64_vector_table + offset; \
833 .global ivt_##name; \
836 .unwabi @svr4, 'I'; \
842 #define IVT_END(name) \
845 #ifdef COMPAT_FREEBSD32
846 #define IA32_TRAP ia32_trap
848 #define IA32_TRAP trap
852 * The IA64 Interrupt Vector Table (IVT) contains 20 slots with 64
853 * bundles per vector and 48 slots with 16 bundles per vector.
859 .global ia64_vector_table
860 .size ia64_vector_table, 32768
863 IVT_ENTRY(VHPT_Translation, 0x0000)
864 CALL(trap, 0, cr.ifa)
865 IVT_END(VHPT_Translation)
867 IVT_ENTRY(Instruction_TLB, 0x0400)
874 add r21=16,r18 // tag
875 add r20=24,r18 // collision chain
877 ld8 r21=[r21] // check VHPT tag
878 ld8 r20=[r20] // bucket head
880 cmp.ne p15,p0=r21,r19
883 ld8 r21=[r18] // read pte
885 itc.i r21 // insert pte
890 1: rsm psr.dt // turn off data translations
891 dep r20=0,r20,61,3 // convert vhpt ptr to physical
894 ld8 r20=[r20] // first entry
896 2: cmp.eq p15,p0=r0,r20 // done?
897 (p15) br.cond.spnt.few 9f // bail if done
899 add r21=16,r20 // tag location
901 ld8 r21=[r21] // read tag
903 cmp.ne p15,p0=r21,r19 // compare tags
904 (p15) br.cond.sptk.few 3f // if not, read next in chain
906 ld8 r21=[r20] // read pte
913 ld8 r22=[r20] // read rest of pte
915 dep r18=0,r18,61,3 // convert vhpt ptr to physical
917 add r20=16,r18 // address of tag
919 ld8.acq r23=[r20] // read old tag
921 dep r23=-1,r23,63,1 // set ti bit
923 st8.rel [r20]=r23 // store old tag + ti
925 mf // make sure everyone sees
927 st8 [r18]=r21,8 // store pte
931 st8.rel [r18]=r19 // store new tag
933 itc.i r21 // and place in TLB
937 mov pr=r17,0x1ffff // restore predicates
940 3: add r20=24,r20 // next in chain
942 ld8 r20=[r20] // read chain
946 mov pr=r17,0x1ffff // restore predicates
950 CALL(trap, 20, cr.ifa) // Page Not Present trap
951 IVT_END(Instruction_TLB)
953 IVT_ENTRY(Data_TLB, 0x0800)
960 add r21=16,r18 // tag
961 add r20=24,r18 // collision chain
963 ld8 r21=[r21] // check VHPT tag
964 ld8 r20=[r20] // bucket head
966 cmp.ne p15,p0=r21,r19
969 ld8 r21=[r18] // read pte
971 itc.d r21 // insert pte
976 1: rsm psr.dt // turn off data translations
977 dep r20=0,r20,61,3 // convert vhpt ptr to physical
980 ld8 r20=[r20] // first entry
982 2: cmp.eq p15,p0=r0,r20 // done?
983 (p15) br.cond.spnt.few 9f // bail if done
985 add r21=16,r20 // tag location
987 ld8 r21=[r21] // read tag
989 cmp.ne p15,p0=r21,r19 // compare tags
990 (p15) br.cond.sptk.few 3f // if not, read next in chain
992 ld8 r21=[r20] // read pte
999 ld8 r22=[r20] // read rest of pte
1001 dep r18=0,r18,61,3 // convert vhpt ptr to physical
1003 add r20=16,r18 // address of tag
1005 ld8.acq r23=[r20] // read old tag
1007 dep r23=-1,r23,63,1 // set ti bit
1009 st8.rel [r20]=r23 // store old tag + ti
1011 mf // make sure everyone sees
1013 st8 [r18]=r21,8 // store pte
1017 st8.rel [r18]=r19 // store new tag
1019 itc.d r21 // and place in TLB
1023 mov pr=r17,0x1ffff // restore predicates
1026 3: add r20=24,r20 // next in chain
1028 ld8 r20=[r20] // read chain
1032 mov pr=r17,0x1ffff // restore predicates
1036 CALL(trap, 20, cr.ifa) // Page Not Present trap
1039 IVT_ENTRY(Alternate_Instruction_TLB, 0x0c00)
1040 mov r16=cr.ifa // where did it happen
1041 mov r18=pr // save predicates
1043 extr.u r17=r16,61,3 // get region number
1044 mov r19=PTE_PRESENT+PTE_ACCESSED+PTE_DIRTY+PTE_PL_KERN+PTE_AR_RWX
1046 cmp.eq p13,p0=IA64_PBVM_RR,r17 // RR4?
1047 (p13) br.cond.sptk.few 4f
1049 cmp.ge p13,p0=5,r17 // RR0-RR5?
1050 cmp.eq p14,p15=7,r17 // RR7?
1051 (p13) br.cond.spnt.few 9f
1053 (p14) add r19=PTE_MA_WB,r19
1054 (p15) add r19=PTE_MA_UC,r19
1055 dep r17=0,r16,50,14 // clear bits above PPN
1057 1: dep r16=r19,r17,0,12 // put pte bits in 0..11
1060 mov pr=r18,0x1ffff // restore predicates
1065 add r19=PTE_MA_WB,r19
1066 movl r17=IA64_PBVM_BASE
1069 movl r16=IA64_PBVM_PGTBL
1071 extr.u r17=r17,IA64_PBVM_PAGE_SHIFT,61-IA64_PBVM_PAGE_SHIFT
1073 shladd r16=r17,3,r16
1078 9: mov pr=r18,0x1ffff // restore predicates
1079 CALL(trap, 3, cr.ifa)
1080 IVT_END(Alternate_Instruction_TLB)
1082 IVT_ENTRY(Alternate_Data_TLB, 0x1000)
1083 mov r16=cr.ifa // where did it happen
1084 mov r18=pr // save predicates
1086 extr.u r17=r16,61,3 // get region number
1087 mov r19=PTE_PRESENT+PTE_ACCESSED+PTE_DIRTY+PTE_PL_KERN+PTE_AR_RWX
1089 cmp.eq p13,p0=IA64_PBVM_RR,r17 // RR4?
1090 (p13) br.cond.sptk.few 4f
1092 cmp.ge p13,p0=5,r17 // RR0-RR5?
1093 cmp.eq p14,p15=7,r17 // RR7?
1094 (p13) br.cond.spnt.few 9f
1096 (p14) add r19=PTE_MA_WB,r19
1097 (p15) add r19=PTE_MA_UC,r19
1098 dep r17=0,r16,50,14 // clear bits above PPN
1100 1: dep r16=r19,r17,0,12 // put pte bits in 0..11
1103 mov pr=r18,0x1ffff // restore predicates
1108 add r19=PTE_MA_WB,r19
1109 movl r17=IA64_PBVM_BASE
1112 movl r16=IA64_PBVM_PGTBL
1114 extr.u r17=r17,IA64_PBVM_PAGE_SHIFT,61-IA64_PBVM_PAGE_SHIFT
1116 shladd r16=r17,3,r16
1121 9: mov pr=r18,0x1ffff // restore predicates
1122 CALL(trap, 4, cr.ifa)
1123 IVT_END(Alternate_Data_TLB)
1125 IVT_ENTRY(Data_Nested_TLB, 0x1400)
1126 // See exception_save_restart and exception_restore_restart for the
1127 // contexts that may cause a data nested TLB. We can only use the
1128 // banked general registers and predicates, but don't use:
1129 // p14 & p15 - Set in exception save
1130 // r16 & r17 - Arguments to exception save
1131 // r30 - Faulting address (modulo page size)
1132 // We assume r30 has the virtual addresses that relate to the data
1133 // nested TLB fault. The address does not have to be exact, as long
1134 // as it's in the same page. We use physical addressing to avoid
1135 // double nested faults. Since all virtual addresses we encounter
1136 // here are direct mapped region 7 addresses, we have no problem
1137 // constructing physical addresses.
1148 cmp.eq p12,p13=7,r27
1153 (p12) dep r28=0,r30,0,12
1154 (p13) extr.u r28=r30,3*PAGE_SHIFT-8, PAGE_SHIFT-3 // dir L0 index
1158 (p12) add r28=PTE_PRESENT+PTE_ACCESSED+PTE_DIRTY+PTE_PL_KERN+PTE_AR_RWX+PTE_MA_WB,r28
1159 (p13) movl r27=ia64_kptdir
1164 (p13) extr.u r26=r30,2*PAGE_SHIFT-5, PAGE_SHIFT-3 // dir L1 index
1165 (p12) br.cond.spnt.few 1f
1176 shladd r27=r28,3,r27
1178 ld8 r27=[r27] // dir L1 page
1179 extr.u r28=r30,PAGE_SHIFT,PAGE_SHIFT-5 // pte index
1183 shladd r27=r26,3,r27
1189 ld8 r27=[r27] // pte page
1195 ld8 r28=[r27] // pte
1197 or r28=PTE_DIRTY+PTE_ACCESSED,r28
1207 addl r26=NTLBRT_SAVE,r0
1208 addl r27=NTLBRT_RESTORE,r0
1213 cmp.eq p12,p0=r29,r26
1214 cmp.eq p13,p0=r29,r27
1219 (p12) br.cond.sptk.few exception_save_restart
1220 (p13) br.cond.sptk.few exception_restore_restart
1247 IVT_END(Data_Nested_TLB)
1249 IVT_ENTRY(Instruction_Key_Miss, 0x1800)
1250 CALL(trap, 6, cr.ifa)
1251 IVT_END(Instruction_Key_Miss)
1253 IVT_ENTRY(Data_Key_Miss, 0x1c00)
1254 CALL(trap, 7, cr.ifa)
1255 IVT_END(Data_Key_Miss)
1257 IVT_ENTRY(Dirty_Bit, 0x2000)
1264 add r20=24,r18 // collision chain
1266 ld8 r20=[r20] // bucket head
1268 rsm psr.dt // turn off data translations
1269 dep r20=0,r20,61,3 // convert vhpt ptr to physical
1272 ld8 r20=[r20] // first entry
1274 1: cmp.eq p15,p0=r0,r20 // done?
1275 (p15) br.cond.spnt.few 9f // bail if done
1277 add r21=16,r20 // tag location
1279 ld8 r21=[r21] // read tag
1281 cmp.ne p15,p0=r21,r19 // compare tags
1282 (p15) br.cond.sptk.few 2f // if not, read next in chain
1284 ld8 r21=[r20] // read pte
1285 mov r22=PTE_DIRTY+PTE_ACCESSED
1287 or r21=r22,r21 // set dirty & access bit
1289 st8 [r20]=r21,8 // store back
1291 ld8 r22=[r20] // read rest of pte
1293 dep r18=0,r18,61,3 // convert vhpt ptr to physical
1295 add r20=16,r18 // address of tag
1297 ld8.acq r23=[r20] // read old tag
1299 dep r23=-1,r23,63,1 // set ti bit
1301 st8.rel [r20]=r23 // store old tag + ti
1303 mf // make sure everyone sees
1305 st8 [r18]=r21,8 // store pte
1309 st8.rel [r18]=r19 // store new tag
1311 itc.d r21 // and place in TLB
1315 mov pr=r17,0x1ffff // restore predicates
1318 2: add r20=24,r20 // next in chain
1320 ld8 r20=[r20] // read chain
1324 mov pr=r17,0x1ffff // restore predicates
1328 CALL(trap, 8, cr.ifa) // die horribly
1331 IVT_ENTRY(Instruction_Access_Bit, 0x2400)
1338 add r20=24,r18 // collision chain
1340 ld8 r20=[r20] // bucket head
1342 rsm psr.dt // turn off data translations
1343 dep r20=0,r20,61,3 // convert vhpt ptr to physical
1346 ld8 r20=[r20] // first entry
1348 1: cmp.eq p15,p0=r0,r20 // done?
1349 (p15) br.cond.spnt.few 9f // bail if done
1351 add r21=16,r20 // tag location
1353 ld8 r21=[r21] // read tag
1355 cmp.ne p15,p0=r21,r19 // compare tags
1356 (p15) br.cond.sptk.few 2f // if not, read next in chain
1358 ld8 r21=[r20] // read pte
1359 mov r22=PTE_ACCESSED
1361 or r21=r22,r21 // set accessed bit
1363 st8 [r20]=r21,8 // store back
1365 ld8 r22=[r20] // read rest of pte
1367 dep r18=0,r18,61,3 // convert vhpt ptr to physical
1369 add r20=16,r18 // address of tag
1371 ld8.acq r23=[r20] // read old tag
1373 dep r23=-1,r23,63,1 // set ti bit
1375 st8.rel [r20]=r23 // store old tag + ti
1377 mf // make sure everyone sees
1379 st8 [r18]=r21,8 // store pte
1383 st8.rel [r18]=r19 // store new tag
1385 itc.i r21 // and place in TLB
1389 mov pr=r17,0x1ffff // restore predicates
1390 rfi // walker will retry the access
1392 2: add r20=24,r20 // next in chain
1394 ld8 r20=[r20] // read chain
1398 mov pr=r17,0x1ffff // restore predicates
1402 CALL(trap, 9, cr.ifa)
1403 IVT_END(Instruction_Access_Bit)
1405 IVT_ENTRY(Data_Access_Bit, 0x2800)
1412 add r20=24,r18 // collision chain
1414 ld8 r20=[r20] // bucket head
1416 rsm psr.dt // turn off data translations
1417 dep r20=0,r20,61,3 // convert vhpt ptr to physical
1420 ld8 r20=[r20] // first entry
1422 1: cmp.eq p15,p0=r0,r20 // done?
1423 (p15) br.cond.spnt.few 9f // bail if done
1425 add r21=16,r20 // tag location
1427 ld8 r21=[r21] // read tag
1429 cmp.ne p15,p0=r21,r19 // compare tags
1430 (p15) br.cond.sptk.few 2f // if not, read next in chain
1432 ld8 r21=[r20] // read pte
1433 mov r22=PTE_ACCESSED
1435 or r21=r22,r21 // set accessed bit
1437 st8 [r20]=r21,8 // store back
1439 ld8 r22=[r20] // read rest of pte
1441 dep r18=0,r18,61,3 // convert vhpt ptr to physical
1443 add r20=16,r18 // address of tag
1445 ld8.acq r23=[r20] // read old tag
1447 dep r23=-1,r23,63,1 // set ti bit
1449 st8.rel [r20]=r23 // store old tag + ti
1451 mf // make sure everyone sees
1453 st8 [r18]=r21,8 // store pte
1457 st8.rel [r18]=r19 // store new tag
1459 itc.d r21 // and place in TLB
1463 mov pr=r17,0x1ffff // restore predicates
1464 rfi // walker will retry the access
1466 2: add r20=24,r20 // next in chain
1468 ld8 r20=[r20] // read chain
1472 mov pr=r17,0x1ffff // restore predicates
1476 CALL(trap, 10, cr.ifa)
1477 IVT_END(Data_Access_Bit)
1479 IVT_ENTRY(Break_Instruction, 0x2c00)
1483 br.sptk exception_save
1487 alloc r15=ar.pfs,0,0,2,0
1502 br.call.sptk rp=trap
1508 br.sptk exception_restore
1511 IVT_END(Break_Instruction)
1513 IVT_ENTRY(External_Interrupt, 0x3000)
1517 br.sptk exception_save
1521 alloc r15=ar.pfs,0,0,1,0
1529 br.call.sptk rp=ia64_handle_intr
1535 br.sptk exception_restore
1538 IVT_END(External_Interrupt)
1540 IVT_ENTRY(Reserved_3400, 0x3400)
1541 CALL(trap, 13, cr.ifa)
1542 IVT_END(Reserved_3400)
1544 IVT_ENTRY(Reserved_3800, 0x3800)
1545 CALL(trap, 14, cr.ifa)
1546 IVT_END(Reserved_3800)
1548 IVT_ENTRY(Reserved_3c00, 0x3c00)
1549 CALL(trap, 15, cr.ifa)
1550 IVT_END(Reserved_3c00)
1552 IVT_ENTRY(Reserved_4000, 0x4000)
1553 CALL(trap, 16, cr.ifa)
1554 IVT_END(Reserved_4000)
1556 IVT_ENTRY(Reserved_4400, 0x4400)
1557 CALL(trap, 17, cr.ifa)
1558 IVT_END(Reserved_4400)
1560 IVT_ENTRY(Reserved_4800, 0x4800)
1561 CALL(trap, 18, cr.ifa)
1562 IVT_END(Reserved_4800)
1564 IVT_ENTRY(Reserved_4c00, 0x4c00)
1565 CALL(trap, 19, cr.ifa)
1566 IVT_END(Reserved_4c00)
1568 IVT_ENTRY(Page_Not_Present, 0x5000)
1569 CALL(trap, 20, cr.ifa)
1570 IVT_END(Page_Not_Present)
1572 IVT_ENTRY(Key_Permission, 0x5100)
1573 CALL(trap, 21, cr.ifa)
1574 IVT_END(Key_Permission)
1576 IVT_ENTRY(Instruction_Access_Rights, 0x5200)
1577 CALL(trap, 22, cr.ifa)
1578 IVT_END(Instruction_Access_Rights)
1580 IVT_ENTRY(Data_Access_Rights, 0x5300)
1581 CALL(trap, 23, cr.ifa)
1582 IVT_END(Data_Access_Rights)
1584 IVT_ENTRY(General_Exception, 0x5400)
1585 CALL(trap, 24, cr.ifa)
1586 IVT_END(General_Exception)
1588 IVT_ENTRY(Disabled_FP_Register, 0x5500)
1589 CALL(trap, 25, cr.ifa)
1590 IVT_END(Disabled_FP_Register)
1592 IVT_ENTRY(NaT_Consumption, 0x5600)
1593 CALL(trap, 26, cr.ifa)
1594 IVT_END(NaT_Consumption)
1596 IVT_ENTRY(Speculation, 0x5700)
1597 CALL(trap, 27, cr.iim)
1598 IVT_END(Speculation)
1600 IVT_ENTRY(Reserved_5800, 0x5800)
1601 CALL(trap, 28, cr.ifa)
1602 IVT_END(Reserved_5800)
1604 IVT_ENTRY(Debug, 0x5900)
1605 CALL(trap, 29, cr.ifa)
1608 IVT_ENTRY(Unaligned_Reference, 0x5a00)
1609 CALL(trap, 30, cr.ifa)
1610 IVT_END(Unaligned_Reference)
1612 IVT_ENTRY(Unsupported_Data_Reference, 0x5b00)
1613 CALL(trap, 31, cr.ifa)
1614 IVT_END(Unsupported_Data_Reference)
1616 IVT_ENTRY(Floating_Point_Fault, 0x5c00)
1617 CALL(trap, 32, cr.ifa)
1618 IVT_END(Floating_Point_Fault)
1620 IVT_ENTRY(Floating_Point_Trap, 0x5d00)
1621 CALL(trap, 33, cr.ifa)
1622 IVT_END(Floating_Point_Trap)
1624 IVT_ENTRY(Lower_Privilege_Transfer_Trap, 0x5e00)
1625 CALL(trap, 34, cr.ifa)
1626 IVT_END(Lower_Privilege_Transfer_Trap)
1628 IVT_ENTRY(Taken_Branch_Trap, 0x5f00)
1629 CALL(trap, 35, cr.ifa)
1630 IVT_END(Taken_Branch_Trap)
1632 IVT_ENTRY(Single_Step_Trap, 0x6000)
1633 CALL(trap, 36, cr.ifa)
1634 IVT_END(Single_Step_Trap)
1636 IVT_ENTRY(Reserved_6100, 0x6100)
1637 CALL(trap, 37, cr.ifa)
1638 IVT_END(Reserved_6100)
1640 IVT_ENTRY(Reserved_6200, 0x6200)
1641 CALL(trap, 38, cr.ifa)
1642 IVT_END(Reserved_6200)
1644 IVT_ENTRY(Reserved_6300, 0x6300)
1645 CALL(trap, 39, cr.ifa)
1646 IVT_END(Reserved_6300)
1648 IVT_ENTRY(Reserved_6400, 0x6400)
1649 CALL(trap, 40, cr.ifa)
1650 IVT_END(Reserved_6400)
1652 IVT_ENTRY(Reserved_6500, 0x6500)
1653 CALL(trap, 41, cr.ifa)
1654 IVT_END(Reserved_6500)
1656 IVT_ENTRY(Reserved_6600, 0x6600)
1657 CALL(trap, 42, cr.ifa)
1658 IVT_END(Reserved_6600)
1660 IVT_ENTRY(Reserved_6700, 0x6700)
1661 CALL(trap, 43, cr.ifa)
1662 IVT_END(Reserved_6700)
1664 IVT_ENTRY(Reserved_6800, 0x6800)
1665 CALL(trap, 44, cr.ifa)
1666 IVT_END(Reserved_6800)
1668 IVT_ENTRY(IA_32_Exception, 0x6900)
1669 CALL(IA32_TRAP, 45, cr.ifa)
1670 IVT_END(IA_32_Exception)
1672 IVT_ENTRY(IA_32_Intercept, 0x6a00)
1673 CALL(IA32_TRAP, 46, cr.iim)
1674 IVT_END(IA_32_Intercept)
1676 IVT_ENTRY(IA_32_Interrupt, 0x6b00)
1677 CALL(IA32_TRAP, 47, cr.ifa)
1678 IVT_END(IA_32_Interrupt)
1680 IVT_ENTRY(Reserved_6c00, 0x6c00)
1681 CALL(trap, 48, cr.ifa)
1682 IVT_END(Reserved_6c00)
1684 IVT_ENTRY(Reserved_6d00, 0x6d00)
1685 CALL(trap, 49, cr.ifa)
1686 IVT_END(Reserved_6d00)
1688 IVT_ENTRY(Reserved_6e00, 0x6e00)
1689 CALL(trap, 50, cr.ifa)
1690 IVT_END(Reserved_6e00)
1692 IVT_ENTRY(Reserved_6f00, 0x6f00)
1693 CALL(trap, 51, cr.ifa)
1694 IVT_END(Reserved_6f00)
1696 IVT_ENTRY(Reserved_7000, 0x7000)
1697 CALL(trap, 52, cr.ifa)
1698 IVT_END(Reserved_7000)
1700 IVT_ENTRY(Reserved_7100, 0x7100)
1701 CALL(trap, 53, cr.ifa)
1702 IVT_END(Reserved_7100)
1704 IVT_ENTRY(Reserved_7200, 0x7200)
1705 CALL(trap, 54, cr.ifa)
1706 IVT_END(Reserved_7200)
1708 IVT_ENTRY(Reserved_7300, 0x7300)
1709 CALL(trap, 55, cr.ifa)
1710 IVT_END(Reserved_7300)
1712 IVT_ENTRY(Reserved_7400, 0x7400)
1713 CALL(trap, 56, cr.ifa)
1714 IVT_END(Reserved_7400)
1716 IVT_ENTRY(Reserved_7500, 0x7500)
1717 CALL(trap, 57, cr.ifa)
1718 IVT_END(Reserved_7500)
1720 IVT_ENTRY(Reserved_7600, 0x7600)
1721 CALL(trap, 58, cr.ifa)
1722 IVT_END(Reserved_7600)
1724 IVT_ENTRY(Reserved_7700, 0x7700)
1725 CALL(trap, 59, cr.ifa)
1726 IVT_END(Reserved_7700)
1728 IVT_ENTRY(Reserved_7800, 0x7800)
1729 CALL(trap, 60, cr.ifa)
1730 IVT_END(Reserved_7800)
1732 IVT_ENTRY(Reserved_7900, 0x7900)
1733 CALL(trap, 61, cr.ifa)
1734 IVT_END(Reserved_7900)
1736 IVT_ENTRY(Reserved_7a00, 0x7a00)
1737 CALL(trap, 62, cr.ifa)
1738 IVT_END(Reserved_7a00)
1740 IVT_ENTRY(Reserved_7b00, 0x7b00)
1741 CALL(trap, 63, cr.ifa)
1742 IVT_END(Reserved_7b00)
1744 IVT_ENTRY(Reserved_7c00, 0x7c00)
1745 CALL(trap, 64, cr.ifa)
1746 IVT_END(Reserved_7c00)
1748 IVT_ENTRY(Reserved_7d00, 0x7d00)
1749 CALL(trap, 65, cr.ifa)
1750 IVT_END(Reserved_7d00)
1752 IVT_ENTRY(Reserved_7e00, 0x7e00)
1753 CALL(trap, 66, cr.ifa)
1754 IVT_END(Reserved_7e00)
1756 IVT_ENTRY(Reserved_7f00, 0x7f00)
1757 CALL(trap, 67, cr.ifa)
1758 IVT_END(Reserved_7f00)