2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1993 The Regents of the University of California.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <machine/asmacros.h>
36 #include <machine/specialreg.h>
37 #include <machine/pmap.h>
46 movq $PAGE_SIZE/8,%rcx
65 * pagecopy(%rdi=from, %rsi=to)
69 movq $PAGE_SIZE/8,%rcx
87 * The loop takes 29 bytes. Ensure that it doesn't cross a 32-byte
92 movnti %rax,(%rdi,%rdx)
93 movnti %rax,8(%rdi,%rdx)
94 movnti %rax,16(%rdi,%rdx)
95 movnti %rax,24(%rdi,%rdx)
104 * memmove(dst, src, cnt)
106 * Adapted from bcopy written by:
107 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
116 cmpq %rcx,%r8 /* overlapping && src < dst? */
119 shrq $3,%rcx /* copy by 64-bit words */
123 andq $7,%rcx /* any bytes left? */
135 addq %rcx,%rdi /* copy backwards */
140 andq $7,%rcx /* any fractional bytes? */
145 movq %rdx,%rcx /* copy remainder by 32-bit words */
163 cmpq %rcx,%r8 /* overlapping && src < dst? */
172 addq %rcx,%rdi /* copy backwards */
185 * memcpy(dst, src, len)
188 * Note: memcpy does not support overlapping copies
194 shrq $3,%rcx /* copy by 64-bit words */
198 andq $7,%rcx /* any bytes left? */
220 * memset(dst, c, len)
228 movabs $0x0101010101010101,%rax
259 /* fillw(pat, base, cnt) */
260 /* %rdi,%rsi, %rdx */
272 /*****************************************************************************/
273 /* copyout and fubyte family */
274 /*****************************************************************************/
276 * Access user memory from inside the kernel. These routines should be
277 * the only places that do this.
279 * These routines set curpcb->pcb_onfault for the time they execute. When a
280 * protection violation occurs inside the functions, the trap handler
281 * returns to *curpcb->pcb_onfault instead of the function.
285 * copyout(from_kernel, to_user, len)
288 ENTRY(copyout_nosmap)
290 movq PCPU(CURPCB),%rax
291 movq $copyout_fault,PCB_ONFAULT(%rax)
292 testq %rdx,%rdx /* anything to do? */
296 * Check explicitly for non-user addresses. This check is essential
297 * because it prevents usermode from writing into the kernel. We do
298 * not verify anywhere else that the user did not specify a rogue
302 * First, prevent address wrapping.
308 * XXX STOP USING VM_MAXUSER_ADDRESS.
309 * It is an end address, not a max, so every time it is used correctly it
310 * looks like there is an off by one error, and of course it caused an off
311 * by one error in several places.
313 movq $VM_MAXUSER_ADDRESS,%rcx
318 /* bcopy(%rsi, %rdi, %rdx) */
335 movq PCPU(CURPCB),%rax
336 /* Trap entry clears PSL.AC */
337 movq $copyout_fault,PCB_ONFAULT(%rax)
338 testq %rdx,%rdx /* anything to do? */
342 * Check explicitly for non-user addresses. If 486 write protection
343 * is being used, this check is essential because we are in kernel
344 * mode so the h/w does not provide any protection against writing
349 * First, prevent address wrapping.
355 * XXX STOP USING VM_MAXUSER_ADDRESS.
356 * It is an end address, not a max, so every time it is used correctly it
357 * looks like there is an off by one error, and of course it caused an off
358 * by one error in several places.
360 movq $VM_MAXUSER_ADDRESS,%rcx
365 /* bcopy(%rsi, %rdi, %rdx) */
381 movq PCPU(CURPCB),%rdx
382 movq %rax,PCB_ONFAULT(%rdx)
388 movq PCPU(CURPCB),%rdx
389 movq $0,PCB_ONFAULT(%rdx)
396 * copyin(from_user, to_kernel, len)
401 movq PCPU(CURPCB),%rax
402 movq $copyin_fault,PCB_ONFAULT(%rax)
403 testq %rdx,%rdx /* anything to do? */
407 * make sure address is valid
412 movq $VM_MAXUSER_ADDRESS,%rcx
419 shrq $3,%rcx /* copy longword-wise */
423 andb $7,%cl /* copy remaining bytes */
433 movq PCPU(CURPCB),%rax
434 movq $copyin_fault,PCB_ONFAULT(%rax)
435 testq %rdx,%rdx /* anything to do? */
439 * make sure address is valid
444 movq $VM_MAXUSER_ADDRESS,%rcx
451 shrq $3,%rcx /* copy longword-wise */
456 andb $7,%cl /* copy remaining bytes */
464 movq PCPU(CURPCB),%rdx
465 movq %rax,PCB_ONFAULT(%rdx)
472 movq PCPU(CURPCB),%rdx
473 movq $0,PCB_ONFAULT(%rdx)
479 * casueword32. Compare and set user integer. Returns -1 on fault,
480 * 0 if access was successful. Old value is written to *oldp.
481 * dst = %rdi, old = %esi, oldp = %rdx, new = %ecx
483 ENTRY(casueword32_nosmap)
485 movq PCPU(CURPCB),%r8
486 movq $fusufault,PCB_ONFAULT(%r8)
488 movq $VM_MAXUSER_ADDRESS-4,%rax
489 cmpq %rax,%rdi /* verify address is valid */
492 movl %esi,%eax /* old */
496 cmpxchgl %ecx,(%rdi) /* new = %ecx */
499 * The old value is in %eax. If the store succeeded it will be the
500 * value we expected (old) from before the store, otherwise it will
501 * be the current value. Save %eax into %esi to prepare the return
506 movq %rax,PCB_ONFAULT(%r8)
509 * Access the oldp after the pcb_onfault is cleared, to correctly
510 * catch corrupted pointer.
512 movl %esi,(%rdx) /* oldp = %rdx */
515 END(casueword32_nosmap)
517 ENTRY(casueword32_smap)
519 movq PCPU(CURPCB),%r8
520 movq $fusufault,PCB_ONFAULT(%r8)
522 movq $VM_MAXUSER_ADDRESS-4,%rax
523 cmpq %rax,%rdi /* verify address is valid */
526 movl %esi,%eax /* old */
531 cmpxchgl %ecx,(%rdi) /* new = %ecx */
535 * The old value is in %eax. If the store succeeded it will be the
536 * value we expected (old) from before the store, otherwise it will
537 * be the current value. Save %eax into %esi to prepare the return
542 movq %rax,PCB_ONFAULT(%r8)
545 * Access the oldp after the pcb_onfault is cleared, to correctly
546 * catch corrupted pointer.
548 movl %esi,(%rdx) /* oldp = %rdx */
551 END(casueword32_smap)
554 * casueword. Compare and set user long. Returns -1 on fault,
555 * 0 if access was successful. Old value is written to *oldp.
556 * dst = %rdi, old = %rsi, oldp = %rdx, new = %rcx
558 ENTRY(casueword_nosmap)
560 movq PCPU(CURPCB),%r8
561 movq $fusufault,PCB_ONFAULT(%r8)
563 movq $VM_MAXUSER_ADDRESS-4,%rax
564 cmpq %rax,%rdi /* verify address is valid */
567 movq %rsi,%rax /* old */
571 cmpxchgq %rcx,(%rdi) /* new = %rcx */
574 * The old value is in %rax. If the store succeeded it will be the
575 * value we expected (old) from before the store, otherwise it will
576 * be the current value.
580 movq %rax,PCB_ONFAULT(%r8)
584 END(casueword_nosmap)
586 ENTRY(casueword_smap)
588 movq PCPU(CURPCB),%r8
589 movq $fusufault,PCB_ONFAULT(%r8)
591 movq $VM_MAXUSER_ADDRESS-4,%rax
592 cmpq %rax,%rdi /* verify address is valid */
595 movq %rsi,%rax /* old */
600 cmpxchgq %rcx,(%rdi) /* new = %rcx */
604 * The old value is in %rax. If the store succeeded it will be the
605 * value we expected (old) from before the store, otherwise it will
606 * be the current value.
610 movq %rax,PCB_ONFAULT(%r8)
617 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
618 * byte from user memory.
619 * addr = %rdi, valp = %rsi
622 ENTRY(fueword_nosmap)
624 movq PCPU(CURPCB),%rcx
625 movq $fusufault,PCB_ONFAULT(%rcx)
627 movq $VM_MAXUSER_ADDRESS-8,%rax
628 cmpq %rax,%rdi /* verify address is valid */
633 movq %rax,PCB_ONFAULT(%rcx)
641 movq PCPU(CURPCB),%rcx
642 movq $fusufault,PCB_ONFAULT(%rcx)
644 movq $VM_MAXUSER_ADDRESS-8,%rax
645 cmpq %rax,%rdi /* verify address is valid */
652 movq %rax,PCB_ONFAULT(%rcx)
658 ENTRY(fueword32_nosmap)
660 movq PCPU(CURPCB),%rcx
661 movq $fusufault,PCB_ONFAULT(%rcx)
663 movq $VM_MAXUSER_ADDRESS-4,%rax
664 cmpq %rax,%rdi /* verify address is valid */
669 movq %rax,PCB_ONFAULT(%rcx)
673 END(fueword32_nosmap)
675 ENTRY(fueword32_smap)
677 movq PCPU(CURPCB),%rcx
678 movq $fusufault,PCB_ONFAULT(%rcx)
680 movq $VM_MAXUSER_ADDRESS-4,%rax
681 cmpq %rax,%rdi /* verify address is valid */
688 movq %rax,PCB_ONFAULT(%rcx)
694 ENTRY(fuword16_nosmap)
696 movq PCPU(CURPCB),%rcx
697 movq $fusufault,PCB_ONFAULT(%rcx)
699 movq $VM_MAXUSER_ADDRESS-2,%rax
704 movq $0,PCB_ONFAULT(%rcx)
711 movq PCPU(CURPCB),%rcx
712 movq $fusufault,PCB_ONFAULT(%rcx)
714 movq $VM_MAXUSER_ADDRESS-2,%rax
721 movq $0,PCB_ONFAULT(%rcx)
728 movq PCPU(CURPCB),%rcx
729 movq $fusufault,PCB_ONFAULT(%rcx)
731 movq $VM_MAXUSER_ADDRESS-1,%rax
736 movq $0,PCB_ONFAULT(%rcx)
743 movq PCPU(CURPCB),%rcx
744 movq $fusufault,PCB_ONFAULT(%rcx)
746 movq $VM_MAXUSER_ADDRESS-1,%rax
753 movq $0,PCB_ONFAULT(%rcx)
759 /* Fault entry clears PSL.AC */
761 movq PCPU(CURPCB),%rcx
763 movq %rax,PCB_ONFAULT(%rcx)
769 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
771 * addr = %rdi, value = %rsi
775 movq PCPU(CURPCB),%rcx
776 movq $fusufault,PCB_ONFAULT(%rcx)
778 movq $VM_MAXUSER_ADDRESS-8,%rax
779 cmpq %rax,%rdi /* verify address validity */
784 movq PCPU(CURPCB),%rcx
785 movq %rax,PCB_ONFAULT(%rcx)
792 movq PCPU(CURPCB),%rcx
793 movq $fusufault,PCB_ONFAULT(%rcx)
795 movq $VM_MAXUSER_ADDRESS-8,%rax
796 cmpq %rax,%rdi /* verify address validity */
803 movq PCPU(CURPCB),%rcx
804 movq %rax,PCB_ONFAULT(%rcx)
809 ENTRY(suword32_nosmap)
811 movq PCPU(CURPCB),%rcx
812 movq $fusufault,PCB_ONFAULT(%rcx)
814 movq $VM_MAXUSER_ADDRESS-4,%rax
815 cmpq %rax,%rdi /* verify address validity */
820 movq PCPU(CURPCB),%rcx
821 movq %rax,PCB_ONFAULT(%rcx)
828 movq PCPU(CURPCB),%rcx
829 movq $fusufault,PCB_ONFAULT(%rcx)
831 movq $VM_MAXUSER_ADDRESS-4,%rax
832 cmpq %rax,%rdi /* verify address validity */
839 movq PCPU(CURPCB),%rcx
840 movq %rax,PCB_ONFAULT(%rcx)
845 ENTRY(suword16_nosmap)
847 movq PCPU(CURPCB),%rcx
848 movq $fusufault,PCB_ONFAULT(%rcx)
850 movq $VM_MAXUSER_ADDRESS-2,%rax
851 cmpq %rax,%rdi /* verify address validity */
856 movq PCPU(CURPCB),%rcx /* restore trashed register */
857 movq %rax,PCB_ONFAULT(%rcx)
864 movq PCPU(CURPCB),%rcx
865 movq $fusufault,PCB_ONFAULT(%rcx)
867 movq $VM_MAXUSER_ADDRESS-2,%rax
868 cmpq %rax,%rdi /* verify address validity */
875 movq PCPU(CURPCB),%rcx /* restore trashed register */
876 movq %rax,PCB_ONFAULT(%rcx)
883 movq PCPU(CURPCB),%rcx
884 movq $fusufault,PCB_ONFAULT(%rcx)
886 movq $VM_MAXUSER_ADDRESS-1,%rax
887 cmpq %rax,%rdi /* verify address validity */
893 movq PCPU(CURPCB),%rcx /* restore trashed register */
894 movq %rax,PCB_ONFAULT(%rcx)
901 movq PCPU(CURPCB),%rcx
902 movq $fusufault,PCB_ONFAULT(%rcx)
904 movq $VM_MAXUSER_ADDRESS-1,%rax
905 cmpq %rax,%rdi /* verify address validity */
913 movq PCPU(CURPCB),%rcx /* restore trashed register */
914 movq %rax,PCB_ONFAULT(%rcx)
920 * copyinstr(from, to, maxlen, int *lencopied)
921 * %rdi, %rsi, %rdx, %rcx
923 * copy a string from 'from' to 'to', stop when a 0 character is reached.
924 * return ENAMETOOLONG if string is longer than maxlen, and
925 * EFAULT on protection violations. If lencopied is non-zero,
926 * return the actual length in *lencopied.
928 ENTRY(copyinstr_nosmap)
930 movq %rdx,%r8 /* %r8 = maxlen */
931 movq %rcx,%r9 /* %r9 = *len */
932 xchgq %rdi,%rsi /* %rdi = from, %rsi = to */
933 movq PCPU(CURPCB),%rcx
934 movq $cpystrflt,PCB_ONFAULT(%rcx)
936 movq $VM_MAXUSER_ADDRESS,%rax
938 /* make sure 'from' is within bounds */
942 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
960 END(copyinstr_nosmap)
962 ENTRY(copyinstr_smap)
964 movq %rdx,%r8 /* %r8 = maxlen */
965 movq %rcx,%r9 /* %r9 = *len */
966 xchgq %rdi,%rsi /* %rdi = from, %rsi = to */
967 movq PCPU(CURPCB),%rcx
968 movq $cpystrflt,PCB_ONFAULT(%rcx)
970 movq $VM_MAXUSER_ADDRESS,%rax
972 /* make sure 'from' is within bounds */
978 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
988 jz copyinstr_toolong_smap
998 /* Success -- 0 byte reached */
1003 /* set *lencopied and return %eax */
1004 movq PCPU(CURPCB),%rcx
1005 movq $0,PCB_ONFAULT(%rcx)
1014 /* Fault entry clears PSL.AC */
1019 copyinstr_toolong_smap:
1022 /* rdx is zero - return ENAMETOOLONG or EFAULT */
1023 movq $VM_MAXUSER_ADDRESS,%rax
1026 movq $ENAMETOOLONG,%rax
1032 * copystr(from, to, maxlen, int *lencopied)
1033 * %rdi, %rsi, %rdx, %rcx
1037 movq %rdx,%r8 /* %r8 = maxlen */
1049 /* Success -- 0 byte reached */
1054 /* rdx is zero -- return ENAMETOOLONG */
1055 movq $ENAMETOOLONG,%rax
1061 /* set *lencopied and return %rax */
1070 * Handling of special amd64 registers and descriptor tables etc
1072 /* void lgdt(struct region_descriptor *rdp); */
1074 /* reload the descriptor table */
1077 /* flush the prefetch q */
1084 movl %eax,%fs /* Beware, use wrmsr to set 64 bit base */
1088 /* reload code selector by turning return into intersegmental return */
1096 /*****************************************************************************/
1097 /* setjump, longjump */
1098 /*****************************************************************************/
1101 movq %rbx,0(%rdi) /* save rbx */
1102 movq %rsp,8(%rdi) /* save rsp */
1103 movq %rbp,16(%rdi) /* save rbp */
1104 movq %r12,24(%rdi) /* save r12 */
1105 movq %r13,32(%rdi) /* save r13 */
1106 movq %r14,40(%rdi) /* save r14 */
1107 movq %r15,48(%rdi) /* save r15 */
1108 movq 0(%rsp),%rdx /* get rta */
1109 movq %rdx,56(%rdi) /* save rip */
1110 xorl %eax,%eax /* return(0); */
1115 movq 0(%rdi),%rbx /* restore rbx */
1116 movq 8(%rdi),%rsp /* restore rsp */
1117 movq 16(%rdi),%rbp /* restore rbp */
1118 movq 24(%rdi),%r12 /* restore r12 */
1119 movq 32(%rdi),%r13 /* restore r13 */
1120 movq 40(%rdi),%r14 /* restore r14 */
1121 movq 48(%rdi),%r15 /* restore r15 */
1122 movq 56(%rdi),%rdx /* get rta */
1123 movq %rdx,0(%rsp) /* put in return frame */
1124 xorl %eax,%eax /* return(1); */
1130 * Support for reading MSRs in the safe manner. (Instead of panic on #gp,
1134 /* int rdmsr_safe(u_int msr, uint64_t *data) */
1136 movq PCPU(CURPCB),%r8
1137 movq $msr_onfault,PCB_ONFAULT(%r8)
1139 rdmsr /* Read MSR pointed by %ecx. Returns
1140 hi byte in edx, lo in %eax */
1141 salq $32,%rdx /* sign-shift %rdx left */
1142 movl %eax,%eax /* zero-extend %eax -> %rax */
1146 movq %rax,PCB_ONFAULT(%r8)
1151 * Support for writing MSRs in the safe manner. (Instead of panic on #gp,
1155 /* int wrmsr_safe(u_int msr, uint64_t data) */
1157 movq PCPU(CURPCB),%r8
1158 movq $msr_onfault,PCB_ONFAULT(%r8)
1163 wrmsr /* Write MSR pointed by %ecx. Accepts
1164 hi byte in edx, lo in %eax. */
1166 movq %rax,PCB_ONFAULT(%r8)
1171 * MSR operations fault handler
1175 movq $0,PCB_ONFAULT(%r8)
1181 * void pmap_pti_pcid_invalidate(uint64_t ucr3, uint64_t kcr3);
1182 * Invalidates address space addressed by ucr3, then returns to kcr3.
1183 * Done in assembler to ensure no other memory accesses happen while
1187 ENTRY(pmap_pti_pcid_invalidate)
1190 movq %rdi,%cr3 /* to user page table */
1191 movq %rsi,%cr3 /* back to kernel */
1196 * void pmap_pti_pcid_invlpg(uint64_t ucr3, uint64_t kcr3, vm_offset_t va);
1197 * Invalidates virtual address va in address space ucr3, then returns to kcr3.
1200 ENTRY(pmap_pti_pcid_invlpg)
1203 movq %rdi,%cr3 /* to user page table */
1205 movq %rsi,%cr3 /* back to kernel */
1210 * void pmap_pti_pcid_invlrng(uint64_t ucr3, uint64_t kcr3, vm_offset_t sva,
1212 * Invalidates virtual addresses between sva and eva in address space ucr3,
1213 * then returns to kcr3.
1216 ENTRY(pmap_pti_pcid_invlrng)
1219 movq %rdi,%cr3 /* to user page table */
1221 addq $PAGE_SIZE,%rdx
1224 movq %rsi,%cr3 /* back to kernel */
1229 .macro ibrs_seq_label l
1232 .macro ibrs_call_label l
1235 .macro ibrs_seq count
1238 ibrs_call_label %(ll)
1240 ibrs_seq_label %(ll)
1246 /* all callers already saved %rax, %rdx, and %rcx */
1247 ENTRY(handle_ibrs_entry)
1248 cmpb $0,hw_ibrs_active(%rip)
1250 movl $MSR_IA32_SPEC_CTRL,%ecx
1252 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1253 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32,%edx
1255 movb $1,PCPU(IBPB_SET)
1256 testl $CPUID_STDEXT_SMEP,cpu_stdext_feature(%rip)
1260 END(handle_ibrs_entry)
1262 ENTRY(handle_ibrs_exit)
1263 cmpb $0,PCPU(IBPB_SET)
1265 movl $MSR_IA32_SPEC_CTRL,%ecx
1267 andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1268 andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
1270 movb $0,PCPU(IBPB_SET)
1272 END(handle_ibrs_exit)
1274 /* registers-neutral version, but needs stack */
1275 ENTRY(handle_ibrs_exit_rs)
1276 cmpb $0,PCPU(IBPB_SET)
1281 movl $MSR_IA32_SPEC_CTRL,%ecx
1283 andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1284 andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
1289 movb $0,PCPU(IBPB_SET)
1291 END(handle_ibrs_exit_rs)
1296 * Flush L1D cache. Load enough of the data from the kernel text
1297 * to flush existing L1D content.
1299 * N.B. The function does not follow ABI calling conventions, it corrupts %rbx.
1300 * The vmm.ko caller expects that only %rax, %rdx, %rbx, %rcx, %r9, and %rflags
1301 * registers are clobbered. The NMI handler caller only needs %r13 preserved.
1304 #define L1D_FLUSH_SIZE (64 * 1024)
1306 movq $-L1D_FLUSH_SIZE, %rcx
1308 * pass 1: Preload TLB.
1309 * Kernel text is mapped using superpages. TLB preload is
1310 * done for the benefit of older CPUs which split 2M page
1311 * into 4k TLB entries.
1313 1: movb L1D_FLUSH_SIZE(%r9, %rcx), %al
1314 addq $PAGE_SIZE, %rcx
1318 movq $-L1D_FLUSH_SIZE, %rcx
1319 /* pass 2: Read each cache line. */
1320 2: movb L1D_FLUSH_SIZE(%r9, %rcx), %al
1325 #undef L1D_FLUSH_SIZE