2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1993 The Regents of the University of California.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <machine/asmacros.h>
36 #include <machine/specialreg.h>
37 #include <machine/pmap.h>
46 movq $PAGE_SIZE/8,%rcx
65 * pagecopy(%rdi=from, %rsi=to)
69 movq $PAGE_SIZE/8,%rcx
87 * The loop takes 29 bytes. Ensure that it doesn't cross a 32-byte
92 movnti %rax,(%rdi,%rdx)
93 movnti %rax,8(%rdi,%rdx)
94 movnti %rax,16(%rdi,%rdx)
95 movnti %rax,24(%rdi,%rdx)
104 * memmove(dst, src, cnt)
106 * Adapted from bcopy written by:
107 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
116 cmpq %rcx,%rax /* overlapping && src < dst? */
119 shrq $3,%rcx /* copy by 64-bit words */
123 andq $7,%rcx /* any bytes left? */
137 addq %rcx,%rdi /* copy backwards */
141 andq $7,%rcx /* any fractional bytes? */
145 movq %rdx,%rcx /* copy remainder by 32-bit words */
164 cmpq %rcx,%rax /* overlapping && src < dst? */
174 addq %rcx,%rdi /* copy backwards */
188 * memcpy(dst, src, len)
191 * Note: memcpy does not support overlapping copies
197 shrq $3,%rcx /* copy by 64-bit words */
201 andq $7,%rcx /* any bytes left? */
223 * memset(dst, c, len)
231 movabs $0x0101010101010101,%rax
262 /* fillw(pat, base, cnt) */
263 /* %rdi,%rsi, %rdx */
275 /*****************************************************************************/
276 /* copyout and fubyte family */
277 /*****************************************************************************/
279 * Access user memory from inside the kernel. These routines should be
280 * the only places that do this.
282 * These routines set curpcb->pcb_onfault for the time they execute. When a
283 * protection violation occurs inside the functions, the trap handler
284 * returns to *curpcb->pcb_onfault instead of the function.
288 * copyout(from_kernel, to_user, len)
291 ENTRY(copyout_nosmap)
293 movq PCPU(CURPCB),%rax
294 movq $copyout_fault,PCB_ONFAULT(%rax)
295 testq %rdx,%rdx /* anything to do? */
299 * Check explicitly for non-user addresses. This check is essential
300 * because it prevents usermode from writing into the kernel. We do
301 * not verify anywhere else that the user did not specify a rogue
305 * First, prevent address wrapping.
311 * XXX STOP USING VM_MAXUSER_ADDRESS.
312 * It is an end address, not a max, so every time it is used correctly it
313 * looks like there is an off by one error, and of course it caused an off
314 * by one error in several places.
316 movq $VM_MAXUSER_ADDRESS,%rcx
321 /* bcopy(%rsi, %rdi, %rdx) */
338 movq PCPU(CURPCB),%rax
339 /* Trap entry clears PSL.AC */
340 movq $copyout_fault,PCB_ONFAULT(%rax)
341 testq %rdx,%rdx /* anything to do? */
345 * Check explicitly for non-user addresses. If 486 write protection
346 * is being used, this check is essential because we are in kernel
347 * mode so the h/w does not provide any protection against writing
352 * First, prevent address wrapping.
358 * XXX STOP USING VM_MAXUSER_ADDRESS.
359 * It is an end address, not a max, so every time it is used correctly it
360 * looks like there is an off by one error, and of course it caused an off
361 * by one error in several places.
363 movq $VM_MAXUSER_ADDRESS,%rcx
368 /* bcopy(%rsi, %rdi, %rdx) */
384 movq PCPU(CURPCB),%rdx
385 movq %rax,PCB_ONFAULT(%rdx)
391 movq PCPU(CURPCB),%rdx
392 movq $0,PCB_ONFAULT(%rdx)
399 * copyin(from_user, to_kernel, len)
404 movq PCPU(CURPCB),%rax
405 movq $copyin_fault,PCB_ONFAULT(%rax)
406 testq %rdx,%rdx /* anything to do? */
410 * make sure address is valid
415 movq $VM_MAXUSER_ADDRESS,%rcx
422 shrq $3,%rcx /* copy longword-wise */
426 andb $7,%cl /* copy remaining bytes */
436 movq PCPU(CURPCB),%rax
437 movq $copyin_fault,PCB_ONFAULT(%rax)
438 testq %rdx,%rdx /* anything to do? */
442 * make sure address is valid
447 movq $VM_MAXUSER_ADDRESS,%rcx
454 shrq $3,%rcx /* copy longword-wise */
459 andb $7,%cl /* copy remaining bytes */
467 movq PCPU(CURPCB),%rdx
468 movq %rax,PCB_ONFAULT(%rdx)
475 movq PCPU(CURPCB),%rdx
476 movq $0,PCB_ONFAULT(%rdx)
482 * casueword32. Compare and set user integer. Returns -1 on fault,
483 * 0 if access was successful. Old value is written to *oldp.
484 * dst = %rdi, old = %esi, oldp = %rdx, new = %ecx
486 ENTRY(casueword32_nosmap)
488 movq PCPU(CURPCB),%r8
489 movq $fusufault,PCB_ONFAULT(%r8)
491 movq $VM_MAXUSER_ADDRESS-4,%rax
492 cmpq %rax,%rdi /* verify address is valid */
495 movl %esi,%eax /* old */
499 cmpxchgl %ecx,(%rdi) /* new = %ecx */
502 * The old value is in %eax. If the store succeeded it will be the
503 * value we expected (old) from before the store, otherwise it will
504 * be the current value. Save %eax into %esi to prepare the return
509 movq %rax,PCB_ONFAULT(%r8)
512 * Access the oldp after the pcb_onfault is cleared, to correctly
513 * catch corrupted pointer.
515 movl %esi,(%rdx) /* oldp = %rdx */
518 END(casueword32_nosmap)
520 ENTRY(casueword32_smap)
522 movq PCPU(CURPCB),%r8
523 movq $fusufault,PCB_ONFAULT(%r8)
525 movq $VM_MAXUSER_ADDRESS-4,%rax
526 cmpq %rax,%rdi /* verify address is valid */
529 movl %esi,%eax /* old */
534 cmpxchgl %ecx,(%rdi) /* new = %ecx */
538 * The old value is in %eax. If the store succeeded it will be the
539 * value we expected (old) from before the store, otherwise it will
540 * be the current value. Save %eax into %esi to prepare the return
545 movq %rax,PCB_ONFAULT(%r8)
548 * Access the oldp after the pcb_onfault is cleared, to correctly
549 * catch corrupted pointer.
551 movl %esi,(%rdx) /* oldp = %rdx */
554 END(casueword32_smap)
557 * casueword. Compare and set user long. Returns -1 on fault,
558 * 0 if access was successful. Old value is written to *oldp.
559 * dst = %rdi, old = %rsi, oldp = %rdx, new = %rcx
561 ENTRY(casueword_nosmap)
563 movq PCPU(CURPCB),%r8
564 movq $fusufault,PCB_ONFAULT(%r8)
566 movq $VM_MAXUSER_ADDRESS-4,%rax
567 cmpq %rax,%rdi /* verify address is valid */
570 movq %rsi,%rax /* old */
574 cmpxchgq %rcx,(%rdi) /* new = %rcx */
577 * The old value is in %rax. If the store succeeded it will be the
578 * value we expected (old) from before the store, otherwise it will
579 * be the current value.
583 movq %rax,PCB_ONFAULT(%r8)
587 END(casueword_nosmap)
589 ENTRY(casueword_smap)
591 movq PCPU(CURPCB),%r8
592 movq $fusufault,PCB_ONFAULT(%r8)
594 movq $VM_MAXUSER_ADDRESS-4,%rax
595 cmpq %rax,%rdi /* verify address is valid */
598 movq %rsi,%rax /* old */
603 cmpxchgq %rcx,(%rdi) /* new = %rcx */
607 * The old value is in %rax. If the store succeeded it will be the
608 * value we expected (old) from before the store, otherwise it will
609 * be the current value.
613 movq %rax,PCB_ONFAULT(%r8)
620 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
621 * byte from user memory.
622 * addr = %rdi, valp = %rsi
625 ENTRY(fueword_nosmap)
627 movq PCPU(CURPCB),%rcx
628 movq $fusufault,PCB_ONFAULT(%rcx)
630 movq $VM_MAXUSER_ADDRESS-8,%rax
631 cmpq %rax,%rdi /* verify address is valid */
636 movq %rax,PCB_ONFAULT(%rcx)
644 movq PCPU(CURPCB),%rcx
645 movq $fusufault,PCB_ONFAULT(%rcx)
647 movq $VM_MAXUSER_ADDRESS-8,%rax
648 cmpq %rax,%rdi /* verify address is valid */
655 movq %rax,PCB_ONFAULT(%rcx)
661 ENTRY(fueword32_nosmap)
663 movq PCPU(CURPCB),%rcx
664 movq $fusufault,PCB_ONFAULT(%rcx)
666 movq $VM_MAXUSER_ADDRESS-4,%rax
667 cmpq %rax,%rdi /* verify address is valid */
672 movq %rax,PCB_ONFAULT(%rcx)
676 END(fueword32_nosmap)
678 ENTRY(fueword32_smap)
680 movq PCPU(CURPCB),%rcx
681 movq $fusufault,PCB_ONFAULT(%rcx)
683 movq $VM_MAXUSER_ADDRESS-4,%rax
684 cmpq %rax,%rdi /* verify address is valid */
691 movq %rax,PCB_ONFAULT(%rcx)
697 ENTRY(fuword16_nosmap)
699 movq PCPU(CURPCB),%rcx
700 movq $fusufault,PCB_ONFAULT(%rcx)
702 movq $VM_MAXUSER_ADDRESS-2,%rax
707 movq $0,PCB_ONFAULT(%rcx)
714 movq PCPU(CURPCB),%rcx
715 movq $fusufault,PCB_ONFAULT(%rcx)
717 movq $VM_MAXUSER_ADDRESS-2,%rax
724 movq $0,PCB_ONFAULT(%rcx)
731 movq PCPU(CURPCB),%rcx
732 movq $fusufault,PCB_ONFAULT(%rcx)
734 movq $VM_MAXUSER_ADDRESS-1,%rax
739 movq $0,PCB_ONFAULT(%rcx)
746 movq PCPU(CURPCB),%rcx
747 movq $fusufault,PCB_ONFAULT(%rcx)
749 movq $VM_MAXUSER_ADDRESS-1,%rax
756 movq $0,PCB_ONFAULT(%rcx)
762 /* Fault entry clears PSL.AC */
764 movq PCPU(CURPCB),%rcx
766 movq %rax,PCB_ONFAULT(%rcx)
772 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
774 * addr = %rdi, value = %rsi
778 movq PCPU(CURPCB),%rcx
779 movq $fusufault,PCB_ONFAULT(%rcx)
781 movq $VM_MAXUSER_ADDRESS-8,%rax
782 cmpq %rax,%rdi /* verify address validity */
787 movq PCPU(CURPCB),%rcx
788 movq %rax,PCB_ONFAULT(%rcx)
795 movq PCPU(CURPCB),%rcx
796 movq $fusufault,PCB_ONFAULT(%rcx)
798 movq $VM_MAXUSER_ADDRESS-8,%rax
799 cmpq %rax,%rdi /* verify address validity */
806 movq PCPU(CURPCB),%rcx
807 movq %rax,PCB_ONFAULT(%rcx)
812 ENTRY(suword32_nosmap)
814 movq PCPU(CURPCB),%rcx
815 movq $fusufault,PCB_ONFAULT(%rcx)
817 movq $VM_MAXUSER_ADDRESS-4,%rax
818 cmpq %rax,%rdi /* verify address validity */
823 movq PCPU(CURPCB),%rcx
824 movq %rax,PCB_ONFAULT(%rcx)
831 movq PCPU(CURPCB),%rcx
832 movq $fusufault,PCB_ONFAULT(%rcx)
834 movq $VM_MAXUSER_ADDRESS-4,%rax
835 cmpq %rax,%rdi /* verify address validity */
842 movq PCPU(CURPCB),%rcx
843 movq %rax,PCB_ONFAULT(%rcx)
848 ENTRY(suword16_nosmap)
850 movq PCPU(CURPCB),%rcx
851 movq $fusufault,PCB_ONFAULT(%rcx)
853 movq $VM_MAXUSER_ADDRESS-2,%rax
854 cmpq %rax,%rdi /* verify address validity */
859 movq PCPU(CURPCB),%rcx /* restore trashed register */
860 movq %rax,PCB_ONFAULT(%rcx)
867 movq PCPU(CURPCB),%rcx
868 movq $fusufault,PCB_ONFAULT(%rcx)
870 movq $VM_MAXUSER_ADDRESS-2,%rax
871 cmpq %rax,%rdi /* verify address validity */
878 movq PCPU(CURPCB),%rcx /* restore trashed register */
879 movq %rax,PCB_ONFAULT(%rcx)
886 movq PCPU(CURPCB),%rcx
887 movq $fusufault,PCB_ONFAULT(%rcx)
889 movq $VM_MAXUSER_ADDRESS-1,%rax
890 cmpq %rax,%rdi /* verify address validity */
896 movq PCPU(CURPCB),%rcx /* restore trashed register */
897 movq %rax,PCB_ONFAULT(%rcx)
904 movq PCPU(CURPCB),%rcx
905 movq $fusufault,PCB_ONFAULT(%rcx)
907 movq $VM_MAXUSER_ADDRESS-1,%rax
908 cmpq %rax,%rdi /* verify address validity */
916 movq PCPU(CURPCB),%rcx /* restore trashed register */
917 movq %rax,PCB_ONFAULT(%rcx)
923 * copyinstr(from, to, maxlen, int *lencopied)
924 * %rdi, %rsi, %rdx, %rcx
926 * copy a string from 'from' to 'to', stop when a 0 character is reached.
927 * return ENAMETOOLONG if string is longer than maxlen, and
928 * EFAULT on protection violations. If lencopied is non-zero,
929 * return the actual length in *lencopied.
931 ENTRY(copyinstr_nosmap)
933 movq %rdx,%r8 /* %r8 = maxlen */
934 movq %rcx,%r9 /* %r9 = *len */
935 xchgq %rdi,%rsi /* %rdi = from, %rsi = to */
936 movq PCPU(CURPCB),%rcx
937 movq $cpystrflt,PCB_ONFAULT(%rcx)
939 movq $VM_MAXUSER_ADDRESS,%rax
941 /* make sure 'from' is within bounds */
945 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
963 END(copyinstr_nosmap)
965 ENTRY(copyinstr_smap)
967 movq %rdx,%r8 /* %r8 = maxlen */
968 movq %rcx,%r9 /* %r9 = *len */
969 xchgq %rdi,%rsi /* %rdi = from, %rsi = to */
970 movq PCPU(CURPCB),%rcx
971 movq $cpystrflt,PCB_ONFAULT(%rcx)
973 movq $VM_MAXUSER_ADDRESS,%rax
975 /* make sure 'from' is within bounds */
981 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
991 jz copyinstr_toolong_smap
1001 /* Success -- 0 byte reached */
1006 /* set *lencopied and return %eax */
1007 movq PCPU(CURPCB),%rcx
1008 movq $0,PCB_ONFAULT(%rcx)
1017 /* Fault entry clears PSL.AC */
1022 copyinstr_toolong_smap:
1025 /* rdx is zero - return ENAMETOOLONG or EFAULT */
1026 movq $VM_MAXUSER_ADDRESS,%rax
1029 movq $ENAMETOOLONG,%rax
1035 * copystr(from, to, maxlen, int *lencopied)
1036 * %rdi, %rsi, %rdx, %rcx
1040 movq %rdx,%r8 /* %r8 = maxlen */
1052 /* Success -- 0 byte reached */
1057 /* rdx is zero -- return ENAMETOOLONG */
1058 movq $ENAMETOOLONG,%rax
1064 /* set *lencopied and return %rax */
1073 * Handling of special amd64 registers and descriptor tables etc
1075 /* void lgdt(struct region_descriptor *rdp); */
1077 /* reload the descriptor table */
1080 /* flush the prefetch q */
1087 movl %eax,%fs /* Beware, use wrmsr to set 64 bit base */
1091 /* reload code selector by turning return into intersegmental return */
1099 /*****************************************************************************/
1100 /* setjump, longjump */
1101 /*****************************************************************************/
1104 movq %rbx,0(%rdi) /* save rbx */
1105 movq %rsp,8(%rdi) /* save rsp */
1106 movq %rbp,16(%rdi) /* save rbp */
1107 movq %r12,24(%rdi) /* save r12 */
1108 movq %r13,32(%rdi) /* save r13 */
1109 movq %r14,40(%rdi) /* save r14 */
1110 movq %r15,48(%rdi) /* save r15 */
1111 movq 0(%rsp),%rdx /* get rta */
1112 movq %rdx,56(%rdi) /* save rip */
1113 xorl %eax,%eax /* return(0); */
1118 movq 0(%rdi),%rbx /* restore rbx */
1119 movq 8(%rdi),%rsp /* restore rsp */
1120 movq 16(%rdi),%rbp /* restore rbp */
1121 movq 24(%rdi),%r12 /* restore r12 */
1122 movq 32(%rdi),%r13 /* restore r13 */
1123 movq 40(%rdi),%r14 /* restore r14 */
1124 movq 48(%rdi),%r15 /* restore r15 */
1125 movq 56(%rdi),%rdx /* get rta */
1126 movq %rdx,0(%rsp) /* put in return frame */
1127 xorl %eax,%eax /* return(1); */
1133 * Support for reading MSRs in the safe manner. (Instead of panic on #gp,
1137 /* int rdmsr_safe(u_int msr, uint64_t *data) */
1139 movq PCPU(CURPCB),%r8
1140 movq $msr_onfault,PCB_ONFAULT(%r8)
1142 rdmsr /* Read MSR pointed by %ecx. Returns
1143 hi byte in edx, lo in %eax */
1144 salq $32,%rdx /* sign-shift %rdx left */
1145 movl %eax,%eax /* zero-extend %eax -> %rax */
1149 movq %rax,PCB_ONFAULT(%r8)
1154 * Support for writing MSRs in the safe manner. (Instead of panic on #gp,
1158 /* int wrmsr_safe(u_int msr, uint64_t data) */
1160 movq PCPU(CURPCB),%r8
1161 movq $msr_onfault,PCB_ONFAULT(%r8)
1166 wrmsr /* Write MSR pointed by %ecx. Accepts
1167 hi byte in edx, lo in %eax. */
1169 movq %rax,PCB_ONFAULT(%r8)
1174 * MSR operations fault handler
1178 movq $0,PCB_ONFAULT(%r8)
1184 * void pmap_pti_pcid_invalidate(uint64_t ucr3, uint64_t kcr3);
1185 * Invalidates address space addressed by ucr3, then returns to kcr3.
1186 * Done in assembler to ensure no other memory accesses happen while
1190 ENTRY(pmap_pti_pcid_invalidate)
1193 movq %rdi,%cr3 /* to user page table */
1194 movq %rsi,%cr3 /* back to kernel */
1199 * void pmap_pti_pcid_invlpg(uint64_t ucr3, uint64_t kcr3, vm_offset_t va);
1200 * Invalidates virtual address va in address space ucr3, then returns to kcr3.
1203 ENTRY(pmap_pti_pcid_invlpg)
1206 movq %rdi,%cr3 /* to user page table */
1208 movq %rsi,%cr3 /* back to kernel */
1213 * void pmap_pti_pcid_invlrng(uint64_t ucr3, uint64_t kcr3, vm_offset_t sva,
1215 * Invalidates virtual addresses between sva and eva in address space ucr3,
1216 * then returns to kcr3.
1219 ENTRY(pmap_pti_pcid_invlrng)
1222 movq %rdi,%cr3 /* to user page table */
1224 addq $PAGE_SIZE,%rdx
1227 movq %rsi,%cr3 /* back to kernel */
1232 .macro ibrs_seq_label l
1235 .macro ibrs_call_label l
1238 .macro ibrs_seq count
1241 ibrs_call_label %(ll)
1243 ibrs_seq_label %(ll)
1249 /* all callers already saved %rax, %rdx, and %rcx */
1250 ENTRY(handle_ibrs_entry)
1251 cmpb $0,hw_ibrs_active(%rip)
1253 movl $MSR_IA32_SPEC_CTRL,%ecx
1255 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1256 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32,%edx
1258 movb $1,PCPU(IBPB_SET)
1259 testl $CPUID_STDEXT_SMEP,cpu_stdext_feature(%rip)
1263 END(handle_ibrs_entry)
1265 ENTRY(handle_ibrs_exit)
1266 cmpb $0,PCPU(IBPB_SET)
1268 movl $MSR_IA32_SPEC_CTRL,%ecx
1270 andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1271 andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
1273 movb $0,PCPU(IBPB_SET)
1275 END(handle_ibrs_exit)
1277 /* registers-neutral version, but needs stack */
1278 ENTRY(handle_ibrs_exit_rs)
1279 cmpb $0,PCPU(IBPB_SET)
1284 movl $MSR_IA32_SPEC_CTRL,%ecx
1286 andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1287 andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
1292 movb $0,PCPU(IBPB_SET)
1294 END(handle_ibrs_exit_rs)
1299 * Flush L1D cache. Load enough of the data from the kernel text
1300 * to flush existing L1D content.
1302 * N.B. The function does not follow ABI calling conventions, it corrupts %rbx.
1303 * The vmm.ko caller expects that only %rax, %rdx, %rbx, %rcx, %r9, and %rflags
1304 * registers are clobbered. The NMI handler caller only needs %r13 preserved.
1307 #define L1D_FLUSH_SIZE (64 * 1024)
1309 movq $-L1D_FLUSH_SIZE, %rcx
1311 * pass 1: Preload TLB.
1312 * Kernel text is mapped using superpages. TLB preload is
1313 * done for the benefit of older CPUs which split 2M page
1314 * into 4k TLB entries.
1316 1: movb L1D_FLUSH_SIZE(%r9, %rcx), %al
1317 addq $PAGE_SIZE, %rcx
1321 movq $-L1D_FLUSH_SIZE, %rcx
1322 /* pass 2: Read each cache line. */
1323 2: movb L1D_FLUSH_SIZE(%r9, %rcx), %al
1328 #undef L1D_FLUSH_SIZE