2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1993 The Regents of the University of California.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <machine/asmacros.h>
36 #include <machine/specialreg.h>
37 #include <machine/pmap.h>
46 movq $PAGE_SIZE/8,%rcx
55 * pagecopy(%rdi=from, %rsi=to)
59 movq $PAGE_SIZE/8,%rcx
77 * The loop takes 29 bytes. Ensure that it doesn't cross a 32-byte
82 movnti %rax,(%rdi,%rdx)
83 movnti %rax,8(%rdi,%rdx)
84 movnti %rax,16(%rdi,%rdx)
85 movnti %rax,24(%rdi,%rdx)
94 * memmove(dst, src, cnt)
96 * Adapted from bcopy written by:
97 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
106 cmpq %rcx,%rax /* overlapping && src < dst? */
109 shrq $3,%rcx /* copy by 64-bit words */
113 andq $7,%rcx /* any bytes left? */
127 addq %rcx,%rdi /* copy backwards */
131 andq $7,%rcx /* any fractional bytes? */
135 movq %rdx,%rcx /* copy remainder by 32-bit words */
154 cmpq %rcx,%rax /* overlapping && src < dst? */
164 addq %rcx,%rdi /* copy backwards */
178 * memcpy(dst, src, len)
181 * Note: memcpy does not support overlapping copies
187 shrq $3,%rcx /* copy by 64-bit words */
191 andq $7,%rcx /* any bytes left? */
213 * memset(dst, c, len)
221 movabs $0x0101010101010101,%rax
252 /* fillw(pat, base, cnt) */
253 /* %rdi,%rsi, %rdx */
265 /*****************************************************************************/
266 /* copyout and fubyte family */
267 /*****************************************************************************/
269 * Access user memory from inside the kernel. These routines should be
270 * the only places that do this.
272 * These routines set curpcb->pcb_onfault for the time they execute. When a
273 * protection violation occurs inside the functions, the trap handler
274 * returns to *curpcb->pcb_onfault instead of the function.
278 * copyout(from_kernel, to_user, len)
281 ENTRY(copyout_nosmap)
283 movq PCPU(CURPCB),%rax
284 movq $copyout_fault,PCB_ONFAULT(%rax)
285 testq %rdx,%rdx /* anything to do? */
289 * Check explicitly for non-user addresses. This check is essential
290 * because it prevents usermode from writing into the kernel. We do
291 * not verify anywhere else that the user did not specify a rogue
295 * First, prevent address wrapping.
301 * XXX STOP USING VM_MAXUSER_ADDRESS.
302 * It is an end address, not a max, so every time it is used correctly it
303 * looks like there is an off by one error, and of course it caused an off
304 * by one error in several places.
306 movq $VM_MAXUSER_ADDRESS,%rcx
311 /* bcopy(%rsi, %rdi, %rdx) */
328 movq PCPU(CURPCB),%rax
329 /* Trap entry clears PSL.AC */
330 movq $copyout_fault,PCB_ONFAULT(%rax)
331 testq %rdx,%rdx /* anything to do? */
335 * Check explicitly for non-user addresses. If 486 write protection
336 * is being used, this check is essential because we are in kernel
337 * mode so the h/w does not provide any protection against writing
342 * First, prevent address wrapping.
348 * XXX STOP USING VM_MAXUSER_ADDRESS.
349 * It is an end address, not a max, so every time it is used correctly it
350 * looks like there is an off by one error, and of course it caused an off
351 * by one error in several places.
353 movq $VM_MAXUSER_ADDRESS,%rcx
358 /* bcopy(%rsi, %rdi, %rdx) */
374 movq PCPU(CURPCB),%rdx
375 movq %rax,PCB_ONFAULT(%rdx)
381 movq PCPU(CURPCB),%rdx
382 movq $0,PCB_ONFAULT(%rdx)
389 * copyin(from_user, to_kernel, len)
394 movq PCPU(CURPCB),%rax
395 movq $copyin_fault,PCB_ONFAULT(%rax)
396 testq %rdx,%rdx /* anything to do? */
400 * make sure address is valid
405 movq $VM_MAXUSER_ADDRESS,%rcx
412 shrq $3,%rcx /* copy longword-wise */
416 andb $7,%cl /* copy remaining bytes */
426 movq PCPU(CURPCB),%rax
427 movq $copyin_fault,PCB_ONFAULT(%rax)
428 testq %rdx,%rdx /* anything to do? */
432 * make sure address is valid
437 movq $VM_MAXUSER_ADDRESS,%rcx
444 shrq $3,%rcx /* copy longword-wise */
449 andb $7,%cl /* copy remaining bytes */
457 movq PCPU(CURPCB),%rdx
458 movq %rax,PCB_ONFAULT(%rdx)
465 movq PCPU(CURPCB),%rdx
466 movq $0,PCB_ONFAULT(%rdx)
472 * casueword32. Compare and set user integer. Returns -1 on fault,
473 * 0 if access was successful. Old value is written to *oldp.
474 * dst = %rdi, old = %esi, oldp = %rdx, new = %ecx
476 ENTRY(casueword32_nosmap)
478 movq PCPU(CURPCB),%r8
479 movq $fusufault,PCB_ONFAULT(%r8)
481 movq $VM_MAXUSER_ADDRESS-4,%rax
482 cmpq %rax,%rdi /* verify address is valid */
485 movl %esi,%eax /* old */
489 cmpxchgl %ecx,(%rdi) /* new = %ecx */
492 * The old value is in %eax. If the store succeeded it will be the
493 * value we expected (old) from before the store, otherwise it will
494 * be the current value. Save %eax into %esi to prepare the return
499 movq %rax,PCB_ONFAULT(%r8)
502 * Access the oldp after the pcb_onfault is cleared, to correctly
503 * catch corrupted pointer.
505 movl %esi,(%rdx) /* oldp = %rdx */
508 END(casueword32_nosmap)
510 ENTRY(casueword32_smap)
512 movq PCPU(CURPCB),%r8
513 movq $fusufault,PCB_ONFAULT(%r8)
515 movq $VM_MAXUSER_ADDRESS-4,%rax
516 cmpq %rax,%rdi /* verify address is valid */
519 movl %esi,%eax /* old */
524 cmpxchgl %ecx,(%rdi) /* new = %ecx */
528 * The old value is in %eax. If the store succeeded it will be the
529 * value we expected (old) from before the store, otherwise it will
530 * be the current value. Save %eax into %esi to prepare the return
535 movq %rax,PCB_ONFAULT(%r8)
538 * Access the oldp after the pcb_onfault is cleared, to correctly
539 * catch corrupted pointer.
541 movl %esi,(%rdx) /* oldp = %rdx */
544 END(casueword32_smap)
547 * casueword. Compare and set user long. Returns -1 on fault,
548 * 0 if access was successful. Old value is written to *oldp.
549 * dst = %rdi, old = %rsi, oldp = %rdx, new = %rcx
551 ENTRY(casueword_nosmap)
553 movq PCPU(CURPCB),%r8
554 movq $fusufault,PCB_ONFAULT(%r8)
556 movq $VM_MAXUSER_ADDRESS-4,%rax
557 cmpq %rax,%rdi /* verify address is valid */
560 movq %rsi,%rax /* old */
564 cmpxchgq %rcx,(%rdi) /* new = %rcx */
567 * The old value is in %rax. If the store succeeded it will be the
568 * value we expected (old) from before the store, otherwise it will
569 * be the current value.
573 movq %rax,PCB_ONFAULT(%r8)
577 END(casueword_nosmap)
579 ENTRY(casueword_smap)
581 movq PCPU(CURPCB),%r8
582 movq $fusufault,PCB_ONFAULT(%r8)
584 movq $VM_MAXUSER_ADDRESS-4,%rax
585 cmpq %rax,%rdi /* verify address is valid */
588 movq %rsi,%rax /* old */
593 cmpxchgq %rcx,(%rdi) /* new = %rcx */
597 * The old value is in %rax. If the store succeeded it will be the
598 * value we expected (old) from before the store, otherwise it will
599 * be the current value.
603 movq %rax,PCB_ONFAULT(%r8)
610 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
611 * byte from user memory.
612 * addr = %rdi, valp = %rsi
615 ENTRY(fueword_nosmap)
617 movq PCPU(CURPCB),%rcx
618 movq $fusufault,PCB_ONFAULT(%rcx)
620 movq $VM_MAXUSER_ADDRESS-8,%rax
621 cmpq %rax,%rdi /* verify address is valid */
626 movq %rax,PCB_ONFAULT(%rcx)
634 movq PCPU(CURPCB),%rcx
635 movq $fusufault,PCB_ONFAULT(%rcx)
637 movq $VM_MAXUSER_ADDRESS-8,%rax
638 cmpq %rax,%rdi /* verify address is valid */
645 movq %rax,PCB_ONFAULT(%rcx)
651 ENTRY(fueword32_nosmap)
653 movq PCPU(CURPCB),%rcx
654 movq $fusufault,PCB_ONFAULT(%rcx)
656 movq $VM_MAXUSER_ADDRESS-4,%rax
657 cmpq %rax,%rdi /* verify address is valid */
662 movq %rax,PCB_ONFAULT(%rcx)
666 END(fueword32_nosmap)
668 ENTRY(fueword32_smap)
670 movq PCPU(CURPCB),%rcx
671 movq $fusufault,PCB_ONFAULT(%rcx)
673 movq $VM_MAXUSER_ADDRESS-4,%rax
674 cmpq %rax,%rdi /* verify address is valid */
681 movq %rax,PCB_ONFAULT(%rcx)
687 ENTRY(fuword16_nosmap)
689 movq PCPU(CURPCB),%rcx
690 movq $fusufault,PCB_ONFAULT(%rcx)
692 movq $VM_MAXUSER_ADDRESS-2,%rax
697 movq $0,PCB_ONFAULT(%rcx)
704 movq PCPU(CURPCB),%rcx
705 movq $fusufault,PCB_ONFAULT(%rcx)
707 movq $VM_MAXUSER_ADDRESS-2,%rax
714 movq $0,PCB_ONFAULT(%rcx)
721 movq PCPU(CURPCB),%rcx
722 movq $fusufault,PCB_ONFAULT(%rcx)
724 movq $VM_MAXUSER_ADDRESS-1,%rax
729 movq $0,PCB_ONFAULT(%rcx)
736 movq PCPU(CURPCB),%rcx
737 movq $fusufault,PCB_ONFAULT(%rcx)
739 movq $VM_MAXUSER_ADDRESS-1,%rax
746 movq $0,PCB_ONFAULT(%rcx)
752 /* Fault entry clears PSL.AC */
754 movq PCPU(CURPCB),%rcx
756 movq %rax,PCB_ONFAULT(%rcx)
762 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
764 * addr = %rdi, value = %rsi
768 movq PCPU(CURPCB),%rcx
769 movq $fusufault,PCB_ONFAULT(%rcx)
771 movq $VM_MAXUSER_ADDRESS-8,%rax
772 cmpq %rax,%rdi /* verify address validity */
777 movq PCPU(CURPCB),%rcx
778 movq %rax,PCB_ONFAULT(%rcx)
785 movq PCPU(CURPCB),%rcx
786 movq $fusufault,PCB_ONFAULT(%rcx)
788 movq $VM_MAXUSER_ADDRESS-8,%rax
789 cmpq %rax,%rdi /* verify address validity */
796 movq PCPU(CURPCB),%rcx
797 movq %rax,PCB_ONFAULT(%rcx)
802 ENTRY(suword32_nosmap)
804 movq PCPU(CURPCB),%rcx
805 movq $fusufault,PCB_ONFAULT(%rcx)
807 movq $VM_MAXUSER_ADDRESS-4,%rax
808 cmpq %rax,%rdi /* verify address validity */
813 movq PCPU(CURPCB),%rcx
814 movq %rax,PCB_ONFAULT(%rcx)
821 movq PCPU(CURPCB),%rcx
822 movq $fusufault,PCB_ONFAULT(%rcx)
824 movq $VM_MAXUSER_ADDRESS-4,%rax
825 cmpq %rax,%rdi /* verify address validity */
832 movq PCPU(CURPCB),%rcx
833 movq %rax,PCB_ONFAULT(%rcx)
838 ENTRY(suword16_nosmap)
840 movq PCPU(CURPCB),%rcx
841 movq $fusufault,PCB_ONFAULT(%rcx)
843 movq $VM_MAXUSER_ADDRESS-2,%rax
844 cmpq %rax,%rdi /* verify address validity */
849 movq PCPU(CURPCB),%rcx /* restore trashed register */
850 movq %rax,PCB_ONFAULT(%rcx)
857 movq PCPU(CURPCB),%rcx
858 movq $fusufault,PCB_ONFAULT(%rcx)
860 movq $VM_MAXUSER_ADDRESS-2,%rax
861 cmpq %rax,%rdi /* verify address validity */
868 movq PCPU(CURPCB),%rcx /* restore trashed register */
869 movq %rax,PCB_ONFAULT(%rcx)
876 movq PCPU(CURPCB),%rcx
877 movq $fusufault,PCB_ONFAULT(%rcx)
879 movq $VM_MAXUSER_ADDRESS-1,%rax
880 cmpq %rax,%rdi /* verify address validity */
886 movq PCPU(CURPCB),%rcx /* restore trashed register */
887 movq %rax,PCB_ONFAULT(%rcx)
894 movq PCPU(CURPCB),%rcx
895 movq $fusufault,PCB_ONFAULT(%rcx)
897 movq $VM_MAXUSER_ADDRESS-1,%rax
898 cmpq %rax,%rdi /* verify address validity */
906 movq PCPU(CURPCB),%rcx /* restore trashed register */
907 movq %rax,PCB_ONFAULT(%rcx)
913 * copyinstr(from, to, maxlen, int *lencopied)
914 * %rdi, %rsi, %rdx, %rcx
916 * copy a string from 'from' to 'to', stop when a 0 character is reached.
917 * return ENAMETOOLONG if string is longer than maxlen, and
918 * EFAULT on protection violations. If lencopied is non-zero,
919 * return the actual length in *lencopied.
921 ENTRY(copyinstr_nosmap)
923 movq %rdx,%r8 /* %r8 = maxlen */
924 movq %rcx,%r9 /* %r9 = *len */
925 xchgq %rdi,%rsi /* %rdi = from, %rsi = to */
926 movq PCPU(CURPCB),%rcx
927 movq $cpystrflt,PCB_ONFAULT(%rcx)
929 movq $VM_MAXUSER_ADDRESS,%rax
931 /* make sure 'from' is within bounds */
935 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
953 END(copyinstr_nosmap)
955 ENTRY(copyinstr_smap)
957 movq %rdx,%r8 /* %r8 = maxlen */
958 movq %rcx,%r9 /* %r9 = *len */
959 xchgq %rdi,%rsi /* %rdi = from, %rsi = to */
960 movq PCPU(CURPCB),%rcx
961 movq $cpystrflt,PCB_ONFAULT(%rcx)
963 movq $VM_MAXUSER_ADDRESS,%rax
965 /* make sure 'from' is within bounds */
971 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
981 jz copyinstr_toolong_smap
991 /* Success -- 0 byte reached */
996 /* set *lencopied and return %eax */
997 movq PCPU(CURPCB),%rcx
998 movq $0,PCB_ONFAULT(%rcx)
1007 /* Fault entry clears PSL.AC */
1012 copyinstr_toolong_smap:
1015 /* rdx is zero - return ENAMETOOLONG or EFAULT */
1016 movq $VM_MAXUSER_ADDRESS,%rax
1019 movq $ENAMETOOLONG,%rax
1025 * copystr(from, to, maxlen, int *lencopied)
1026 * %rdi, %rsi, %rdx, %rcx
1030 movq %rdx,%r8 /* %r8 = maxlen */
1042 /* Success -- 0 byte reached */
1047 /* rdx is zero -- return ENAMETOOLONG */
1048 movq $ENAMETOOLONG,%rax
1054 /* set *lencopied and return %rax */
1063 * Handling of special amd64 registers and descriptor tables etc
1065 /* void lgdt(struct region_descriptor *rdp); */
1067 /* reload the descriptor table */
1070 /* flush the prefetch q */
1077 movl %eax,%fs /* Beware, use wrmsr to set 64 bit base */
1081 /* reload code selector by turning return into intersegmental return */
1089 /*****************************************************************************/
1090 /* setjump, longjump */
1091 /*****************************************************************************/
1094 movq %rbx,0(%rdi) /* save rbx */
1095 movq %rsp,8(%rdi) /* save rsp */
1096 movq %rbp,16(%rdi) /* save rbp */
1097 movq %r12,24(%rdi) /* save r12 */
1098 movq %r13,32(%rdi) /* save r13 */
1099 movq %r14,40(%rdi) /* save r14 */
1100 movq %r15,48(%rdi) /* save r15 */
1101 movq 0(%rsp),%rdx /* get rta */
1102 movq %rdx,56(%rdi) /* save rip */
1103 xorl %eax,%eax /* return(0); */
1108 movq 0(%rdi),%rbx /* restore rbx */
1109 movq 8(%rdi),%rsp /* restore rsp */
1110 movq 16(%rdi),%rbp /* restore rbp */
1111 movq 24(%rdi),%r12 /* restore r12 */
1112 movq 32(%rdi),%r13 /* restore r13 */
1113 movq 40(%rdi),%r14 /* restore r14 */
1114 movq 48(%rdi),%r15 /* restore r15 */
1115 movq 56(%rdi),%rdx /* get rta */
1116 movq %rdx,0(%rsp) /* put in return frame */
1117 xorl %eax,%eax /* return(1); */
1123 * Support for reading MSRs in the safe manner. (Instead of panic on #gp,
1127 /* int rdmsr_safe(u_int msr, uint64_t *data) */
1129 movq PCPU(CURPCB),%r8
1130 movq $msr_onfault,PCB_ONFAULT(%r8)
1132 rdmsr /* Read MSR pointed by %ecx. Returns
1133 hi byte in edx, lo in %eax */
1134 salq $32,%rdx /* sign-shift %rdx left */
1135 movl %eax,%eax /* zero-extend %eax -> %rax */
1139 movq %rax,PCB_ONFAULT(%r8)
1144 * Support for writing MSRs in the safe manner. (Instead of panic on #gp,
1148 /* int wrmsr_safe(u_int msr, uint64_t data) */
1150 movq PCPU(CURPCB),%r8
1151 movq $msr_onfault,PCB_ONFAULT(%r8)
1156 wrmsr /* Write MSR pointed by %ecx. Accepts
1157 hi byte in edx, lo in %eax. */
1159 movq %rax,PCB_ONFAULT(%r8)
1164 * MSR operations fault handler
1168 movq $0,PCB_ONFAULT(%r8)
1174 * void pmap_pti_pcid_invalidate(uint64_t ucr3, uint64_t kcr3);
1175 * Invalidates address space addressed by ucr3, then returns to kcr3.
1176 * Done in assembler to ensure no other memory accesses happen while
1180 ENTRY(pmap_pti_pcid_invalidate)
1183 movq %rdi,%cr3 /* to user page table */
1184 movq %rsi,%cr3 /* back to kernel */
1189 * void pmap_pti_pcid_invlpg(uint64_t ucr3, uint64_t kcr3, vm_offset_t va);
1190 * Invalidates virtual address va in address space ucr3, then returns to kcr3.
1193 ENTRY(pmap_pti_pcid_invlpg)
1196 movq %rdi,%cr3 /* to user page table */
1198 movq %rsi,%cr3 /* back to kernel */
1203 * void pmap_pti_pcid_invlrng(uint64_t ucr3, uint64_t kcr3, vm_offset_t sva,
1205 * Invalidates virtual addresses between sva and eva in address space ucr3,
1206 * then returns to kcr3.
1209 ENTRY(pmap_pti_pcid_invlrng)
1212 movq %rdi,%cr3 /* to user page table */
1214 addq $PAGE_SIZE,%rdx
1217 movq %rsi,%cr3 /* back to kernel */
1222 .macro ibrs_seq_label l
1225 .macro ibrs_call_label l
1228 .macro ibrs_seq count
1231 ibrs_call_label %(ll)
1233 ibrs_seq_label %(ll)
1239 /* all callers already saved %rax, %rdx, and %rcx */
1240 ENTRY(handle_ibrs_entry)
1241 cmpb $0,hw_ibrs_active(%rip)
1243 movl $MSR_IA32_SPEC_CTRL,%ecx
1245 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1246 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32,%edx
1248 movb $1,PCPU(IBPB_SET)
1249 testl $CPUID_STDEXT_SMEP,cpu_stdext_feature(%rip)
1253 END(handle_ibrs_entry)
1255 ENTRY(handle_ibrs_exit)
1256 cmpb $0,PCPU(IBPB_SET)
1258 movl $MSR_IA32_SPEC_CTRL,%ecx
1260 andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1261 andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
1263 movb $0,PCPU(IBPB_SET)
1265 END(handle_ibrs_exit)
1267 /* registers-neutral version, but needs stack */
1268 ENTRY(handle_ibrs_exit_rs)
1269 cmpb $0,PCPU(IBPB_SET)
1274 movl $MSR_IA32_SPEC_CTRL,%ecx
1276 andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1277 andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
1282 movb $0,PCPU(IBPB_SET)
1284 END(handle_ibrs_exit_rs)
1289 * Flush L1D cache. Load enough of the data from the kernel text
1290 * to flush existing L1D content.
1292 * N.B. The function does not follow ABI calling conventions, it corrupts %rbx.
1293 * The vmm.ko caller expects that only %rax, %rdx, %rbx, %rcx, %r9, and %rflags
1294 * registers are clobbered. The NMI handler caller only needs %r13 preserved.
1297 #define L1D_FLUSH_SIZE (64 * 1024)
1299 movq $-L1D_FLUSH_SIZE, %rcx
1301 * pass 1: Preload TLB.
1302 * Kernel text is mapped using superpages. TLB preload is
1303 * done for the benefit of older CPUs which split 2M page
1304 * into 4k TLB entries.
1306 1: movb L1D_FLUSH_SIZE(%r9, %rcx), %al
1307 addq $PAGE_SIZE, %rcx
1311 movq $-L1D_FLUSH_SIZE, %rcx
1312 /* pass 2: Read each cache line. */
1313 2: movb L1D_FLUSH_SIZE(%r9, %rcx), %al
1318 #undef L1D_FLUSH_SIZE