2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1993 The Regents of the University of California.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <machine/asmacros.h>
36 #include <machine/specialreg.h>
37 #include <machine/pmap.h>
46 movq $PAGE_SIZE/8,%rcx
65 * pagecopy(%rdi=from, %rsi=to)
69 movq $PAGE_SIZE/8,%rcx
87 * The loop takes 29 bytes. Ensure that it doesn't cross a 32-byte
92 movnti %rax,(%rdi,%rdx)
93 movnti %rax,8(%rdi,%rdx)
94 movnti %rax,16(%rdi,%rdx)
95 movnti %rax,24(%rdi,%rdx)
104 * memcmpy(b1, b2, len)
116 movzbl (%rdi,%rcx,1),%eax
117 movzbl (%rsi,%rcx,1),%r8d
123 movzbl (%rdi,%rcx,1),%eax
124 movzbl (%rsi,%rcx,1),%r8d
130 movzbl (%rdi,%rcx,1),%eax
131 movzbl (%rsi,%rcx,1),%r8d
137 movzbl (%rdi,%rcx,1),%eax
138 movzbl (%rsi,%rcx,1),%r8d
198 * memmove(dst, src, cnt)
200 * Adapted from bcopy written by:
201 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
205 * Register state at entry is supposed to be as follows:
210 * The macro possibly clobbers the above and: rcx, r8.
211 * It does not clobber rax, r10 nor r11.
213 .macro MEMMOVE erms overlap begin end
218 cmpq %rcx,%r8 /* overlapping && src < dst? */
300 shrq $3,%rcx /* copy by 64-bit words */
304 andb $7,%cl /* any bytes left? */
399 andq $7,%rcx /* any fractional bytes? */
404 movq %rdx,%rcx /* copy remainder by 32-bit words */
428 MEMMOVE erms=0 overlap=1 begin=MEMMOVE_BEGIN end=MEMMOVE_END
432 MEMMOVE erms=1 overlap=1 begin=MEMMOVE_BEGIN end=MEMMOVE_END
436 * memcpy(dst, src, len)
439 * Note: memcpy does not support overlapping copies
442 MEMMOVE erms=0 overlap=0 begin=MEMMOVE_BEGIN end=MEMMOVE_END
446 MEMMOVE erms=1 overlap=0 begin=MEMMOVE_BEGIN end=MEMMOVE_END
450 * memset(dst, c, len)
458 movabs $0x0101010101010101,%r10
532 movq %r10,-8(%rdi,%rdx)
546 /* fillw(pat, base, cnt) */
547 /* %rdi,%rsi, %rdx */
559 /*****************************************************************************/
560 /* copyout and fubyte family */
561 /*****************************************************************************/
563 * Access user memory from inside the kernel. These routines should be
564 * the only places that do this.
566 * These routines set curpcb->pcb_onfault for the time they execute. When a
567 * protection violation occurs inside the functions, the trap handler
568 * returns to *curpcb->pcb_onfault instead of the function.
571 .macro SMAP_DISABLE smap
578 .macro SMAP_ENABLE smap
584 .macro COPYINOUT_BEGIN
588 movq %rax,PCB_ONFAULT(%r11)
592 .macro COPYINOUT_SMAP_END
598 * copyout(from_kernel, to_user, len)
601 .macro COPYOUT smap erms
603 movq PCPU(CURPCB),%r11
604 movq $copy_fault,PCB_ONFAULT(%r11)
607 * Check explicitly for non-user addresses. If 486 write protection
608 * is being used, this check is essential because we are in kernel
609 * mode so the h/w does not provide any protection against writing
614 * First, prevent address wrapping.
620 * XXX STOP USING VM_MAXUSER_ADDRESS.
621 * It is an end address, not a max, so every time it is used correctly it
622 * looks like there is an off by one error, and of course it caused an off
623 * by one error in several places.
625 movq $VM_MAXUSER_ADDRESS,%rcx
630 * Set return value to zero. Remaining failure mode goes through
636 * Set up arguments for MEMMOVE.
646 MEMMOVE erms=\erms overlap=0 begin=COPYINOUT_BEGIN end=COPYINOUT_SMAP_END
648 MEMMOVE erms=\erms overlap=0 begin=COPYINOUT_BEGIN end=COPYINOUT_END
653 ENTRY(copyout_nosmap_std)
654 COPYOUT smap=0 erms=0
655 END(copyout_nosmap_std)
657 ENTRY(copyout_smap_std)
658 COPYOUT smap=1 erms=0
659 END(copyout_smap_std)
661 ENTRY(copyout_nosmap_erms)
662 COPYOUT smap=0 erms=1
663 END(copyout_nosmap_erms)
665 ENTRY(copyout_smap_erms)
666 COPYOUT smap=1 erms=1
667 END(copyout_smap_erms)
670 * copyin(from_user, to_kernel, len)
673 .macro COPYIN smap erms
675 movq PCPU(CURPCB),%r11
676 movq $copy_fault,PCB_ONFAULT(%r11)
679 * make sure address is valid
684 movq $VM_MAXUSER_ADDRESS,%rcx
697 MEMMOVE erms=\erms overlap=0 begin=COPYINOUT_BEGIN end=COPYINOUT_SMAP_END
699 MEMMOVE erms=\erms overlap=0 begin=COPYINOUT_BEGIN end=COPYINOUT_END
704 ENTRY(copyin_nosmap_std)
706 END(copyin_nosmap_std)
708 ENTRY(copyin_smap_std)
712 ENTRY(copyin_nosmap_erms)
714 END(copyin_nosmap_erms)
716 ENTRY(copyin_smap_erms)
718 END(copyin_smap_erms)
721 /* Trap entry clears PSL.AC */
723 movq $0,PCB_ONFAULT(%r11)
729 * casueword32. Compare and set user integer. Returns -1 on fault,
730 * 0 if access was successful. Old value is written to *oldp.
731 * dst = %rdi, old = %esi, oldp = %rdx, new = %ecx
733 ENTRY(casueword32_nosmap)
735 movq PCPU(CURPCB),%r8
736 movq $fusufault,PCB_ONFAULT(%r8)
738 movq $VM_MAXUSER_ADDRESS-4,%rax
739 cmpq %rax,%rdi /* verify address is valid */
742 movl %esi,%eax /* old */
746 cmpxchgl %ecx,(%rdi) /* new = %ecx */
749 * The old value is in %eax. If the store succeeded it will be the
750 * value we expected (old) from before the store, otherwise it will
751 * be the current value. Save %eax into %esi to prepare the return
756 movq %rax,PCB_ONFAULT(%r8)
759 * Access the oldp after the pcb_onfault is cleared, to correctly
760 * catch corrupted pointer.
762 movl %esi,(%rdx) /* oldp = %rdx */
765 END(casueword32_nosmap)
767 ENTRY(casueword32_smap)
769 movq PCPU(CURPCB),%r8
770 movq $fusufault,PCB_ONFAULT(%r8)
772 movq $VM_MAXUSER_ADDRESS-4,%rax
773 cmpq %rax,%rdi /* verify address is valid */
776 movl %esi,%eax /* old */
781 cmpxchgl %ecx,(%rdi) /* new = %ecx */
785 * The old value is in %eax. If the store succeeded it will be the
786 * value we expected (old) from before the store, otherwise it will
787 * be the current value. Save %eax into %esi to prepare the return
792 movq %rax,PCB_ONFAULT(%r8)
795 * Access the oldp after the pcb_onfault is cleared, to correctly
796 * catch corrupted pointer.
798 movl %esi,(%rdx) /* oldp = %rdx */
801 END(casueword32_smap)
804 * casueword. Compare and set user long. Returns -1 on fault,
805 * 0 if access was successful. Old value is written to *oldp.
806 * dst = %rdi, old = %rsi, oldp = %rdx, new = %rcx
808 ENTRY(casueword_nosmap)
810 movq PCPU(CURPCB),%r8
811 movq $fusufault,PCB_ONFAULT(%r8)
813 movq $VM_MAXUSER_ADDRESS-4,%rax
814 cmpq %rax,%rdi /* verify address is valid */
817 movq %rsi,%rax /* old */
821 cmpxchgq %rcx,(%rdi) /* new = %rcx */
824 * The old value is in %rax. If the store succeeded it will be the
825 * value we expected (old) from before the store, otherwise it will
826 * be the current value.
830 movq %rax,PCB_ONFAULT(%r8)
834 END(casueword_nosmap)
836 ENTRY(casueword_smap)
838 movq PCPU(CURPCB),%r8
839 movq $fusufault,PCB_ONFAULT(%r8)
841 movq $VM_MAXUSER_ADDRESS-4,%rax
842 cmpq %rax,%rdi /* verify address is valid */
845 movq %rsi,%rax /* old */
850 cmpxchgq %rcx,(%rdi) /* new = %rcx */
854 * The old value is in %rax. If the store succeeded it will be the
855 * value we expected (old) from before the store, otherwise it will
856 * be the current value.
860 movq %rax,PCB_ONFAULT(%r8)
867 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
868 * byte from user memory.
869 * addr = %rdi, valp = %rsi
872 ENTRY(fueword_nosmap)
874 movq PCPU(CURPCB),%rcx
875 movq $fusufault,PCB_ONFAULT(%rcx)
877 movq $VM_MAXUSER_ADDRESS-8,%rax
878 cmpq %rax,%rdi /* verify address is valid */
883 movq %rax,PCB_ONFAULT(%rcx)
891 movq PCPU(CURPCB),%rcx
892 movq $fusufault,PCB_ONFAULT(%rcx)
894 movq $VM_MAXUSER_ADDRESS-8,%rax
895 cmpq %rax,%rdi /* verify address is valid */
902 movq %rax,PCB_ONFAULT(%rcx)
908 ENTRY(fueword32_nosmap)
910 movq PCPU(CURPCB),%rcx
911 movq $fusufault,PCB_ONFAULT(%rcx)
913 movq $VM_MAXUSER_ADDRESS-4,%rax
914 cmpq %rax,%rdi /* verify address is valid */
919 movq %rax,PCB_ONFAULT(%rcx)
923 END(fueword32_nosmap)
925 ENTRY(fueword32_smap)
927 movq PCPU(CURPCB),%rcx
928 movq $fusufault,PCB_ONFAULT(%rcx)
930 movq $VM_MAXUSER_ADDRESS-4,%rax
931 cmpq %rax,%rdi /* verify address is valid */
938 movq %rax,PCB_ONFAULT(%rcx)
944 ENTRY(fuword16_nosmap)
946 movq PCPU(CURPCB),%rcx
947 movq $fusufault,PCB_ONFAULT(%rcx)
949 movq $VM_MAXUSER_ADDRESS-2,%rax
954 movq $0,PCB_ONFAULT(%rcx)
961 movq PCPU(CURPCB),%rcx
962 movq $fusufault,PCB_ONFAULT(%rcx)
964 movq $VM_MAXUSER_ADDRESS-2,%rax
971 movq $0,PCB_ONFAULT(%rcx)
978 movq PCPU(CURPCB),%rcx
979 movq $fusufault,PCB_ONFAULT(%rcx)
981 movq $VM_MAXUSER_ADDRESS-1,%rax
986 movq $0,PCB_ONFAULT(%rcx)
993 movq PCPU(CURPCB),%rcx
994 movq $fusufault,PCB_ONFAULT(%rcx)
996 movq $VM_MAXUSER_ADDRESS-1,%rax
1003 movq $0,PCB_ONFAULT(%rcx)
1009 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
1011 * addr = %rdi, value = %rsi
1013 ENTRY(suword_nosmap)
1015 movq PCPU(CURPCB),%rcx
1016 movq $fusufault,PCB_ONFAULT(%rcx)
1018 movq $VM_MAXUSER_ADDRESS-8,%rax
1019 cmpq %rax,%rdi /* verify address validity */
1024 movq PCPU(CURPCB),%rcx
1025 movq %rax,PCB_ONFAULT(%rcx)
1032 movq PCPU(CURPCB),%rcx
1033 movq $fusufault,PCB_ONFAULT(%rcx)
1035 movq $VM_MAXUSER_ADDRESS-8,%rax
1036 cmpq %rax,%rdi /* verify address validity */
1043 movq PCPU(CURPCB),%rcx
1044 movq %rax,PCB_ONFAULT(%rcx)
1049 ENTRY(suword32_nosmap)
1051 movq PCPU(CURPCB),%rcx
1052 movq $fusufault,PCB_ONFAULT(%rcx)
1054 movq $VM_MAXUSER_ADDRESS-4,%rax
1055 cmpq %rax,%rdi /* verify address validity */
1060 movq PCPU(CURPCB),%rcx
1061 movq %rax,PCB_ONFAULT(%rcx)
1064 END(suword32_nosmap)
1066 ENTRY(suword32_smap)
1068 movq PCPU(CURPCB),%rcx
1069 movq $fusufault,PCB_ONFAULT(%rcx)
1071 movq $VM_MAXUSER_ADDRESS-4,%rax
1072 cmpq %rax,%rdi /* verify address validity */
1079 movq PCPU(CURPCB),%rcx
1080 movq %rax,PCB_ONFAULT(%rcx)
1085 ENTRY(suword16_nosmap)
1087 movq PCPU(CURPCB),%rcx
1088 movq $fusufault,PCB_ONFAULT(%rcx)
1090 movq $VM_MAXUSER_ADDRESS-2,%rax
1091 cmpq %rax,%rdi /* verify address validity */
1096 movq PCPU(CURPCB),%rcx /* restore trashed register */
1097 movq %rax,PCB_ONFAULT(%rcx)
1100 END(suword16_nosmap)
1102 ENTRY(suword16_smap)
1104 movq PCPU(CURPCB),%rcx
1105 movq $fusufault,PCB_ONFAULT(%rcx)
1107 movq $VM_MAXUSER_ADDRESS-2,%rax
1108 cmpq %rax,%rdi /* verify address validity */
1115 movq PCPU(CURPCB),%rcx /* restore trashed register */
1116 movq %rax,PCB_ONFAULT(%rcx)
1121 ENTRY(subyte_nosmap)
1123 movq PCPU(CURPCB),%rcx
1124 movq $fusufault,PCB_ONFAULT(%rcx)
1126 movq $VM_MAXUSER_ADDRESS-1,%rax
1127 cmpq %rax,%rdi /* verify address validity */
1133 movq PCPU(CURPCB),%rcx /* restore trashed register */
1134 movq %rax,PCB_ONFAULT(%rcx)
1141 movq PCPU(CURPCB),%rcx
1142 movq $fusufault,PCB_ONFAULT(%rcx)
1144 movq $VM_MAXUSER_ADDRESS-1,%rax
1145 cmpq %rax,%rdi /* verify address validity */
1153 movq PCPU(CURPCB),%rcx /* restore trashed register */
1154 movq %rax,PCB_ONFAULT(%rcx)
1160 /* Fault entry clears PSL.AC */
1162 movq PCPU(CURPCB),%rcx
1164 movq %rax,PCB_ONFAULT(%rcx)
1170 * copyinstr(from, to, maxlen, int *lencopied)
1171 * %rdi, %rsi, %rdx, %rcx
1173 * copy a string from 'from' to 'to', stop when a 0 character is reached.
1174 * return ENAMETOOLONG if string is longer than maxlen, and
1175 * EFAULT on protection violations. If lencopied is non-zero,
1176 * return the actual length in *lencopied.
1178 .macro COPYINSTR smap
1180 movq %rdx,%r8 /* %r8 = maxlen */
1181 movq PCPU(CURPCB),%r9
1182 movq $cpystrflt,PCB_ONFAULT(%r9)
1184 movq $VM_MAXUSER_ADDRESS,%rax
1186 /* make sure 'from' is within bounds */
1192 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
1200 jz copyinstr_toolong
1202 jz copyinstr_toolong_smap
1214 /* Success -- 0 byte reached */
1218 /* set *lencopied and return %eax */
1219 movq %rax,PCB_ONFAULT(%r9)
1236 ENTRY(copyinstr_nosmap)
1238 END(copyinstr_nosmap)
1240 ENTRY(copyinstr_smap)
1245 /* Fault entry clears PSL.AC */
1248 /* set *lencopied and return %eax */
1249 movq $0,PCB_ONFAULT(%r9)
1259 copyinstr_toolong_smap:
1262 /* rdx is zero - return ENAMETOOLONG or EFAULT */
1263 movq $VM_MAXUSER_ADDRESS,%rax
1266 movl $ENAMETOOLONG,%eax
1270 * copystr(from, to, maxlen, int *lencopied)
1271 * %rdi, %rsi, %rdx, %rcx
1275 movq %rdx,%r8 /* %r8 = maxlen */
1288 /* Success -- 0 byte reached */
1294 /* set *lencopied and return %rax */
1301 /* rdx is zero -- return ENAMETOOLONG */
1302 movl $ENAMETOOLONG,%eax
1307 * Handling of special amd64 registers and descriptor tables etc
1309 /* void lgdt(struct region_descriptor *rdp); */
1311 /* reload the descriptor table */
1314 /* flush the prefetch q */
1321 movl %eax,%fs /* Beware, use wrmsr to set 64 bit base */
1325 /* reload code selector by turning return into intersegmental return */
1333 /*****************************************************************************/
1334 /* setjump, longjump */
1335 /*****************************************************************************/
1338 movq %rbx,0(%rdi) /* save rbx */
1339 movq %rsp,8(%rdi) /* save rsp */
1340 movq %rbp,16(%rdi) /* save rbp */
1341 movq %r12,24(%rdi) /* save r12 */
1342 movq %r13,32(%rdi) /* save r13 */
1343 movq %r14,40(%rdi) /* save r14 */
1344 movq %r15,48(%rdi) /* save r15 */
1345 movq 0(%rsp),%rdx /* get rta */
1346 movq %rdx,56(%rdi) /* save rip */
1347 xorl %eax,%eax /* return(0); */
1352 movq 0(%rdi),%rbx /* restore rbx */
1353 movq 8(%rdi),%rsp /* restore rsp */
1354 movq 16(%rdi),%rbp /* restore rbp */
1355 movq 24(%rdi),%r12 /* restore r12 */
1356 movq 32(%rdi),%r13 /* restore r13 */
1357 movq 40(%rdi),%r14 /* restore r14 */
1358 movq 48(%rdi),%r15 /* restore r15 */
1359 movq 56(%rdi),%rdx /* get rta */
1360 movq %rdx,0(%rsp) /* put in return frame */
1361 xorl %eax,%eax /* return(1); */
1367 * Support for reading MSRs in the safe manner. (Instead of panic on #gp,
1371 /* int rdmsr_safe(u_int msr, uint64_t *data) */
1373 movq PCPU(CURPCB),%r8
1374 movq $msr_onfault,PCB_ONFAULT(%r8)
1376 rdmsr /* Read MSR pointed by %ecx. Returns
1377 hi byte in edx, lo in %eax */
1378 salq $32,%rdx /* sign-shift %rdx left */
1379 movl %eax,%eax /* zero-extend %eax -> %rax */
1383 movq %rax,PCB_ONFAULT(%r8)
1388 * Support for writing MSRs in the safe manner. (Instead of panic on #gp,
1392 /* int wrmsr_safe(u_int msr, uint64_t data) */
1394 movq PCPU(CURPCB),%r8
1395 movq $msr_onfault,PCB_ONFAULT(%r8)
1400 wrmsr /* Write MSR pointed by %ecx. Accepts
1401 hi byte in edx, lo in %eax. */
1403 movq %rax,PCB_ONFAULT(%r8)
1408 * MSR operations fault handler
1412 movq $0,PCB_ONFAULT(%r8)
1418 * void pmap_pti_pcid_invalidate(uint64_t ucr3, uint64_t kcr3);
1419 * Invalidates address space addressed by ucr3, then returns to kcr3.
1420 * Done in assembler to ensure no other memory accesses happen while
1424 ENTRY(pmap_pti_pcid_invalidate)
1427 movq %rdi,%cr3 /* to user page table */
1428 movq %rsi,%cr3 /* back to kernel */
1433 * void pmap_pti_pcid_invlpg(uint64_t ucr3, uint64_t kcr3, vm_offset_t va);
1434 * Invalidates virtual address va in address space ucr3, then returns to kcr3.
1437 ENTRY(pmap_pti_pcid_invlpg)
1440 movq %rdi,%cr3 /* to user page table */
1442 movq %rsi,%cr3 /* back to kernel */
1447 * void pmap_pti_pcid_invlrng(uint64_t ucr3, uint64_t kcr3, vm_offset_t sva,
1449 * Invalidates virtual addresses between sva and eva in address space ucr3,
1450 * then returns to kcr3.
1453 ENTRY(pmap_pti_pcid_invlrng)
1456 movq %rdi,%cr3 /* to user page table */
1458 addq $PAGE_SIZE,%rdx
1461 movq %rsi,%cr3 /* back to kernel */
1466 .macro ibrs_seq_label l
1469 .macro ibrs_call_label l
1472 .macro ibrs_seq count
1475 ibrs_call_label %(ll)
1477 ibrs_seq_label %(ll)
1483 /* all callers already saved %rax, %rdx, and %rcx */
1484 ENTRY(handle_ibrs_entry)
1485 cmpb $0,hw_ibrs_active(%rip)
1487 movl $MSR_IA32_SPEC_CTRL,%ecx
1489 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1490 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32,%edx
1492 movb $1,PCPU(IBPB_SET)
1493 testl $CPUID_STDEXT_SMEP,cpu_stdext_feature(%rip)
1497 END(handle_ibrs_entry)
1499 ENTRY(handle_ibrs_exit)
1500 cmpb $0,PCPU(IBPB_SET)
1502 movl $MSR_IA32_SPEC_CTRL,%ecx
1504 andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1505 andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
1507 movb $0,PCPU(IBPB_SET)
1509 END(handle_ibrs_exit)
1511 /* registers-neutral version, but needs stack */
1512 ENTRY(handle_ibrs_exit_rs)
1513 cmpb $0,PCPU(IBPB_SET)
1518 movl $MSR_IA32_SPEC_CTRL,%ecx
1520 andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1521 andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
1526 movb $0,PCPU(IBPB_SET)
1528 END(handle_ibrs_exit_rs)
1533 * Flush L1D cache. Load enough of the data from the kernel text
1534 * to flush existing L1D content.
1536 * N.B. The function does not follow ABI calling conventions, it corrupts %rbx.
1537 * The vmm.ko caller expects that only %rax, %rdx, %rbx, %rcx, %r9, and %rflags
1538 * registers are clobbered. The NMI handler caller only needs %r13 preserved.
1541 #define L1D_FLUSH_SIZE (64 * 1024)
1543 movq $-L1D_FLUSH_SIZE, %rcx
1545 * pass 1: Preload TLB.
1546 * Kernel text is mapped using superpages. TLB preload is
1547 * done for the benefit of older CPUs which split 2M page
1548 * into 4k TLB entries.
1550 1: movb L1D_FLUSH_SIZE(%r9, %rcx), %al
1551 addq $PAGE_SIZE, %rcx
1555 movq $-L1D_FLUSH_SIZE, %rcx
1556 /* pass 2: Read each cache line. */
1557 2: movb L1D_FLUSH_SIZE(%r9, %rcx), %al
1562 #undef L1D_FLUSH_SIZE
1565 ENTRY(flush_l1d_sw_abi)
1570 END(flush_l1d_sw_abi)