2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1993 The Regents of the University of California.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <machine/asmacros.h>
36 #include <machine/specialreg.h>
37 #include <machine/pmap.h>
46 movq $PAGE_SIZE/8,%rcx
55 * pagecopy(%rdi=from, %rsi=to)
59 movq $PAGE_SIZE/8,%rcx
77 * The loop takes 29 bytes. Ensure that it doesn't cross a 32-byte
82 movnti %rax,(%rdi,%rdx)
83 movnti %rax,8(%rdi,%rdx)
84 movnti %rax,16(%rdi,%rdx)
85 movnti %rax,24(%rdi,%rdx)
94 * memmove(dst, src, cnt)
96 * Adapted from bcopy written by:
97 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
106 cmpq %rcx,%rax /* overlapping && src < dst? */
109 shrq $3,%rcx /* copy by 64-bit words */
113 andq $7,%rcx /* any bytes left? */
127 addq %rcx,%rdi /* copy backwards */
131 andq $7,%rcx /* any fractional bytes? */
135 movq %rdx,%rcx /* copy remainder by 32-bit words */
148 * memcpy(dst, src, len)
151 * Note: memcpy does not support overlapping copies
157 shrq $3,%rcx /* copy by 64-bit words */
161 andq $7,%rcx /* any bytes left? */
173 * memset(dst, c, len)
181 movabs $0x0101010101010101,%rax
200 /* fillw(pat, base, cnt) */
201 /* %rdi,%rsi, %rdx */
213 /*****************************************************************************/
214 /* copyout and fubyte family */
215 /*****************************************************************************/
217 * Access user memory from inside the kernel. These routines should be
218 * the only places that do this.
220 * These routines set curpcb->pcb_onfault for the time they execute. When a
221 * protection violation occurs inside the functions, the trap handler
222 * returns to *curpcb->pcb_onfault instead of the function.
226 * copyout(from_kernel, to_user, len)
229 ENTRY(copyout_nosmap)
231 movq PCPU(CURPCB),%rax
232 movq $copyout_fault,PCB_ONFAULT(%rax)
233 testq %rdx,%rdx /* anything to do? */
237 * Check explicitly for non-user addresses. This check is essential
238 * because it prevents usermode from writing into the kernel. We do
239 * not verify anywhere else that the user did not specify a rogue
243 * First, prevent address wrapping.
249 * XXX STOP USING VM_MAXUSER_ADDRESS.
250 * It is an end address, not a max, so every time it is used correctly it
251 * looks like there is an off by one error, and of course it caused an off
252 * by one error in several places.
254 movq $VM_MAXUSER_ADDRESS,%rcx
259 /* bcopy(%rsi, %rdi, %rdx) */
276 movq PCPU(CURPCB),%rax
277 /* Trap entry clears PSL.AC */
278 movq $copyout_fault,PCB_ONFAULT(%rax)
279 testq %rdx,%rdx /* anything to do? */
283 * Check explicitly for non-user addresses. If 486 write protection
284 * is being used, this check is essential because we are in kernel
285 * mode so the h/w does not provide any protection against writing
290 * First, prevent address wrapping.
296 * XXX STOP USING VM_MAXUSER_ADDRESS.
297 * It is an end address, not a max, so every time it is used correctly it
298 * looks like there is an off by one error, and of course it caused an off
299 * by one error in several places.
301 movq $VM_MAXUSER_ADDRESS,%rcx
306 /* bcopy(%rsi, %rdi, %rdx) */
322 movq PCPU(CURPCB),%rdx
323 movq %rax,PCB_ONFAULT(%rdx)
329 movq PCPU(CURPCB),%rdx
330 movq $0,PCB_ONFAULT(%rdx)
337 * copyin(from_user, to_kernel, len)
342 movq PCPU(CURPCB),%rax
343 movq $copyin_fault,PCB_ONFAULT(%rax)
344 testq %rdx,%rdx /* anything to do? */
348 * make sure address is valid
353 movq $VM_MAXUSER_ADDRESS,%rcx
360 shrq $3,%rcx /* copy longword-wise */
364 andb $7,%cl /* copy remaining bytes */
374 movq PCPU(CURPCB),%rax
375 movq $copyin_fault,PCB_ONFAULT(%rax)
376 testq %rdx,%rdx /* anything to do? */
380 * make sure address is valid
385 movq $VM_MAXUSER_ADDRESS,%rcx
392 shrq $3,%rcx /* copy longword-wise */
397 andb $7,%cl /* copy remaining bytes */
405 movq PCPU(CURPCB),%rdx
406 movq %rax,PCB_ONFAULT(%rdx)
413 movq PCPU(CURPCB),%rdx
414 movq $0,PCB_ONFAULT(%rdx)
420 * casueword32. Compare and set user integer. Returns -1 on fault,
421 * 0 if access was successful. Old value is written to *oldp.
422 * dst = %rdi, old = %esi, oldp = %rdx, new = %ecx
424 ENTRY(casueword32_nosmap)
426 movq PCPU(CURPCB),%r8
427 movq $fusufault,PCB_ONFAULT(%r8)
429 movq $VM_MAXUSER_ADDRESS-4,%rax
430 cmpq %rax,%rdi /* verify address is valid */
433 movl %esi,%eax /* old */
437 cmpxchgl %ecx,(%rdi) /* new = %ecx */
440 * The old value is in %eax. If the store succeeded it will be the
441 * value we expected (old) from before the store, otherwise it will
442 * be the current value. Save %eax into %esi to prepare the return
447 movq %rax,PCB_ONFAULT(%r8)
450 * Access the oldp after the pcb_onfault is cleared, to correctly
451 * catch corrupted pointer.
453 movl %esi,(%rdx) /* oldp = %rdx */
456 END(casueword32_nosmap)
458 ENTRY(casueword32_smap)
460 movq PCPU(CURPCB),%r8
461 movq $fusufault,PCB_ONFAULT(%r8)
463 movq $VM_MAXUSER_ADDRESS-4,%rax
464 cmpq %rax,%rdi /* verify address is valid */
467 movl %esi,%eax /* old */
472 cmpxchgl %ecx,(%rdi) /* new = %ecx */
476 * The old value is in %eax. If the store succeeded it will be the
477 * value we expected (old) from before the store, otherwise it will
478 * be the current value. Save %eax into %esi to prepare the return
483 movq %rax,PCB_ONFAULT(%r8)
486 * Access the oldp after the pcb_onfault is cleared, to correctly
487 * catch corrupted pointer.
489 movl %esi,(%rdx) /* oldp = %rdx */
492 END(casueword32_smap)
495 * casueword. Compare and set user long. Returns -1 on fault,
496 * 0 if access was successful. Old value is written to *oldp.
497 * dst = %rdi, old = %rsi, oldp = %rdx, new = %rcx
499 ENTRY(casueword_nosmap)
501 movq PCPU(CURPCB),%r8
502 movq $fusufault,PCB_ONFAULT(%r8)
504 movq $VM_MAXUSER_ADDRESS-4,%rax
505 cmpq %rax,%rdi /* verify address is valid */
508 movq %rsi,%rax /* old */
512 cmpxchgq %rcx,(%rdi) /* new = %rcx */
515 * The old value is in %rax. If the store succeeded it will be the
516 * value we expected (old) from before the store, otherwise it will
517 * be the current value.
521 movq %rax,PCB_ONFAULT(%r8)
525 END(casueword_nosmap)
527 ENTRY(casueword_smap)
529 movq PCPU(CURPCB),%r8
530 movq $fusufault,PCB_ONFAULT(%r8)
532 movq $VM_MAXUSER_ADDRESS-4,%rax
533 cmpq %rax,%rdi /* verify address is valid */
536 movq %rsi,%rax /* old */
541 cmpxchgq %rcx,(%rdi) /* new = %rcx */
545 * The old value is in %rax. If the store succeeded it will be the
546 * value we expected (old) from before the store, otherwise it will
547 * be the current value.
551 movq %rax,PCB_ONFAULT(%r8)
558 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
559 * byte from user memory.
560 * addr = %rdi, valp = %rsi
563 ENTRY(fueword_nosmap)
565 movq PCPU(CURPCB),%rcx
566 movq $fusufault,PCB_ONFAULT(%rcx)
568 movq $VM_MAXUSER_ADDRESS-8,%rax
569 cmpq %rax,%rdi /* verify address is valid */
574 movq %rax,PCB_ONFAULT(%rcx)
582 movq PCPU(CURPCB),%rcx
583 movq $fusufault,PCB_ONFAULT(%rcx)
585 movq $VM_MAXUSER_ADDRESS-8,%rax
586 cmpq %rax,%rdi /* verify address is valid */
593 movq %rax,PCB_ONFAULT(%rcx)
599 ENTRY(fueword32_nosmap)
601 movq PCPU(CURPCB),%rcx
602 movq $fusufault,PCB_ONFAULT(%rcx)
604 movq $VM_MAXUSER_ADDRESS-4,%rax
605 cmpq %rax,%rdi /* verify address is valid */
610 movq %rax,PCB_ONFAULT(%rcx)
614 END(fueword32_nosmap)
616 ENTRY(fueword32_smap)
618 movq PCPU(CURPCB),%rcx
619 movq $fusufault,PCB_ONFAULT(%rcx)
621 movq $VM_MAXUSER_ADDRESS-4,%rax
622 cmpq %rax,%rdi /* verify address is valid */
629 movq %rax,PCB_ONFAULT(%rcx)
635 ENTRY(fuword16_nosmap)
637 movq PCPU(CURPCB),%rcx
638 movq $fusufault,PCB_ONFAULT(%rcx)
640 movq $VM_MAXUSER_ADDRESS-2,%rax
645 movq $0,PCB_ONFAULT(%rcx)
652 movq PCPU(CURPCB),%rcx
653 movq $fusufault,PCB_ONFAULT(%rcx)
655 movq $VM_MAXUSER_ADDRESS-2,%rax
662 movq $0,PCB_ONFAULT(%rcx)
669 movq PCPU(CURPCB),%rcx
670 movq $fusufault,PCB_ONFAULT(%rcx)
672 movq $VM_MAXUSER_ADDRESS-1,%rax
677 movq $0,PCB_ONFAULT(%rcx)
684 movq PCPU(CURPCB),%rcx
685 movq $fusufault,PCB_ONFAULT(%rcx)
687 movq $VM_MAXUSER_ADDRESS-1,%rax
694 movq $0,PCB_ONFAULT(%rcx)
700 /* Fault entry clears PSL.AC */
702 movq PCPU(CURPCB),%rcx
704 movq %rax,PCB_ONFAULT(%rcx)
710 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
712 * addr = %rdi, value = %rsi
716 movq PCPU(CURPCB),%rcx
717 movq $fusufault,PCB_ONFAULT(%rcx)
719 movq $VM_MAXUSER_ADDRESS-8,%rax
720 cmpq %rax,%rdi /* verify address validity */
725 movq PCPU(CURPCB),%rcx
726 movq %rax,PCB_ONFAULT(%rcx)
733 movq PCPU(CURPCB),%rcx
734 movq $fusufault,PCB_ONFAULT(%rcx)
736 movq $VM_MAXUSER_ADDRESS-8,%rax
737 cmpq %rax,%rdi /* verify address validity */
744 movq PCPU(CURPCB),%rcx
745 movq %rax,PCB_ONFAULT(%rcx)
750 ENTRY(suword32_nosmap)
752 movq PCPU(CURPCB),%rcx
753 movq $fusufault,PCB_ONFAULT(%rcx)
755 movq $VM_MAXUSER_ADDRESS-4,%rax
756 cmpq %rax,%rdi /* verify address validity */
761 movq PCPU(CURPCB),%rcx
762 movq %rax,PCB_ONFAULT(%rcx)
769 movq PCPU(CURPCB),%rcx
770 movq $fusufault,PCB_ONFAULT(%rcx)
772 movq $VM_MAXUSER_ADDRESS-4,%rax
773 cmpq %rax,%rdi /* verify address validity */
780 movq PCPU(CURPCB),%rcx
781 movq %rax,PCB_ONFAULT(%rcx)
786 ENTRY(suword16_nosmap)
788 movq PCPU(CURPCB),%rcx
789 movq $fusufault,PCB_ONFAULT(%rcx)
791 movq $VM_MAXUSER_ADDRESS-2,%rax
792 cmpq %rax,%rdi /* verify address validity */
797 movq PCPU(CURPCB),%rcx /* restore trashed register */
798 movq %rax,PCB_ONFAULT(%rcx)
805 movq PCPU(CURPCB),%rcx
806 movq $fusufault,PCB_ONFAULT(%rcx)
808 movq $VM_MAXUSER_ADDRESS-2,%rax
809 cmpq %rax,%rdi /* verify address validity */
816 movq PCPU(CURPCB),%rcx /* restore trashed register */
817 movq %rax,PCB_ONFAULT(%rcx)
824 movq PCPU(CURPCB),%rcx
825 movq $fusufault,PCB_ONFAULT(%rcx)
827 movq $VM_MAXUSER_ADDRESS-1,%rax
828 cmpq %rax,%rdi /* verify address validity */
834 movq PCPU(CURPCB),%rcx /* restore trashed register */
835 movq %rax,PCB_ONFAULT(%rcx)
842 movq PCPU(CURPCB),%rcx
843 movq $fusufault,PCB_ONFAULT(%rcx)
845 movq $VM_MAXUSER_ADDRESS-1,%rax
846 cmpq %rax,%rdi /* verify address validity */
854 movq PCPU(CURPCB),%rcx /* restore trashed register */
855 movq %rax,PCB_ONFAULT(%rcx)
861 * copyinstr(from, to, maxlen, int *lencopied)
862 * %rdi, %rsi, %rdx, %rcx
864 * copy a string from 'from' to 'to', stop when a 0 character is reached.
865 * return ENAMETOOLONG if string is longer than maxlen, and
866 * EFAULT on protection violations. If lencopied is non-zero,
867 * return the actual length in *lencopied.
869 ENTRY(copyinstr_nosmap)
871 movq %rdx,%r8 /* %r8 = maxlen */
872 movq %rcx,%r9 /* %r9 = *len */
873 xchgq %rdi,%rsi /* %rdi = from, %rsi = to */
874 movq PCPU(CURPCB),%rcx
875 movq $cpystrflt,PCB_ONFAULT(%rcx)
877 movq $VM_MAXUSER_ADDRESS,%rax
879 /* make sure 'from' is within bounds */
883 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
901 END(copyinstr_nosmap)
903 ENTRY(copyinstr_smap)
905 movq %rdx,%r8 /* %r8 = maxlen */
906 movq %rcx,%r9 /* %r9 = *len */
907 xchgq %rdi,%rsi /* %rdi = from, %rsi = to */
908 movq PCPU(CURPCB),%rcx
909 movq $cpystrflt,PCB_ONFAULT(%rcx)
911 movq $VM_MAXUSER_ADDRESS,%rax
913 /* make sure 'from' is within bounds */
919 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
929 jz copyinstr_toolong_smap
939 /* Success -- 0 byte reached */
944 /* set *lencopied and return %eax */
945 movq PCPU(CURPCB),%rcx
946 movq $0,PCB_ONFAULT(%rcx)
955 /* Fault entry clears PSL.AC */
960 copyinstr_toolong_smap:
963 /* rdx is zero - return ENAMETOOLONG or EFAULT */
964 movq $VM_MAXUSER_ADDRESS,%rax
967 movq $ENAMETOOLONG,%rax
973 * copystr(from, to, maxlen, int *lencopied)
974 * %rdi, %rsi, %rdx, %rcx
978 movq %rdx,%r8 /* %r8 = maxlen */
990 /* Success -- 0 byte reached */
995 /* rdx is zero -- return ENAMETOOLONG */
996 movq $ENAMETOOLONG,%rax
1002 /* set *lencopied and return %rax */
1011 * Handling of special amd64 registers and descriptor tables etc
1013 /* void lgdt(struct region_descriptor *rdp); */
1015 /* reload the descriptor table */
1018 /* flush the prefetch q */
1025 movl %eax,%fs /* Beware, use wrmsr to set 64 bit base */
1029 /* reload code selector by turning return into intersegmental return */
1037 /*****************************************************************************/
1038 /* setjump, longjump */
1039 /*****************************************************************************/
1042 movq %rbx,0(%rdi) /* save rbx */
1043 movq %rsp,8(%rdi) /* save rsp */
1044 movq %rbp,16(%rdi) /* save rbp */
1045 movq %r12,24(%rdi) /* save r12 */
1046 movq %r13,32(%rdi) /* save r13 */
1047 movq %r14,40(%rdi) /* save r14 */
1048 movq %r15,48(%rdi) /* save r15 */
1049 movq 0(%rsp),%rdx /* get rta */
1050 movq %rdx,56(%rdi) /* save rip */
1051 xorl %eax,%eax /* return(0); */
1056 movq 0(%rdi),%rbx /* restore rbx */
1057 movq 8(%rdi),%rsp /* restore rsp */
1058 movq 16(%rdi),%rbp /* restore rbp */
1059 movq 24(%rdi),%r12 /* restore r12 */
1060 movq 32(%rdi),%r13 /* restore r13 */
1061 movq 40(%rdi),%r14 /* restore r14 */
1062 movq 48(%rdi),%r15 /* restore r15 */
1063 movq 56(%rdi),%rdx /* get rta */
1064 movq %rdx,0(%rsp) /* put in return frame */
1065 xorl %eax,%eax /* return(1); */
1071 * Support for reading MSRs in the safe manner. (Instead of panic on #gp,
1075 /* int rdmsr_safe(u_int msr, uint64_t *data) */
1077 movq PCPU(CURPCB),%r8
1078 movq $msr_onfault,PCB_ONFAULT(%r8)
1080 rdmsr /* Read MSR pointed by %ecx. Returns
1081 hi byte in edx, lo in %eax */
1082 salq $32,%rdx /* sign-shift %rdx left */
1083 movl %eax,%eax /* zero-extend %eax -> %rax */
1087 movq %rax,PCB_ONFAULT(%r8)
1092 * Support for writing MSRs in the safe manner. (Instead of panic on #gp,
1096 /* int wrmsr_safe(u_int msr, uint64_t data) */
1098 movq PCPU(CURPCB),%r8
1099 movq $msr_onfault,PCB_ONFAULT(%r8)
1104 wrmsr /* Write MSR pointed by %ecx. Accepts
1105 hi byte in edx, lo in %eax. */
1107 movq %rax,PCB_ONFAULT(%r8)
1112 * MSR operations fault handler
1116 movq $0,PCB_ONFAULT(%r8)
1122 * void pmap_pti_pcid_invalidate(uint64_t ucr3, uint64_t kcr3);
1123 * Invalidates address space addressed by ucr3, then returns to kcr3.
1124 * Done in assembler to ensure no other memory accesses happen while
1128 ENTRY(pmap_pti_pcid_invalidate)
1131 movq %rdi,%cr3 /* to user page table */
1132 movq %rsi,%cr3 /* back to kernel */
1137 * void pmap_pti_pcid_invlpg(uint64_t ucr3, uint64_t kcr3, vm_offset_t va);
1138 * Invalidates virtual address va in address space ucr3, then returns to kcr3.
1141 ENTRY(pmap_pti_pcid_invlpg)
1144 movq %rdi,%cr3 /* to user page table */
1146 movq %rsi,%cr3 /* back to kernel */
1151 * void pmap_pti_pcid_invlrng(uint64_t ucr3, uint64_t kcr3, vm_offset_t sva,
1153 * Invalidates virtual addresses between sva and eva in address space ucr3,
1154 * then returns to kcr3.
1157 ENTRY(pmap_pti_pcid_invlrng)
1160 movq %rdi,%cr3 /* to user page table */
1162 addq $PAGE_SIZE,%rdx
1165 movq %rsi,%cr3 /* back to kernel */
1170 .macro ibrs_seq_label l
1173 .macro ibrs_call_label l
1176 .macro ibrs_seq count
1179 ibrs_call_label %(ll)
1181 ibrs_seq_label %(ll)
1187 /* all callers already saved %rax, %rdx, and %rcx */
1188 ENTRY(handle_ibrs_entry)
1189 cmpb $0,hw_ibrs_active(%rip)
1191 movl $MSR_IA32_SPEC_CTRL,%ecx
1193 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1194 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32,%edx
1196 movb $1,PCPU(IBPB_SET)
1197 testl $CPUID_STDEXT_SMEP,cpu_stdext_feature(%rip)
1201 END(handle_ibrs_entry)
1203 ENTRY(handle_ibrs_exit)
1204 cmpb $0,PCPU(IBPB_SET)
1206 movl $MSR_IA32_SPEC_CTRL,%ecx
1208 andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1209 andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
1211 movb $0,PCPU(IBPB_SET)
1213 END(handle_ibrs_exit)
1215 /* registers-neutral version, but needs stack */
1216 ENTRY(handle_ibrs_exit_rs)
1217 cmpb $0,PCPU(IBPB_SET)
1222 movl $MSR_IA32_SPEC_CTRL,%ecx
1224 andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1225 andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
1230 movb $0,PCPU(IBPB_SET)
1232 END(handle_ibrs_exit_rs)
1237 * Flush L1D cache. Load enough of the data from the kernel text
1238 * to flush existing L1D content.
1240 * N.B. The function does not follow ABI calling conventions, it corrupts %rbx.
1241 * The vmm.ko caller expects that only %rax, %rdx, %rbx, %rcx, %r9, and %rflags
1242 * registers are clobbered. The NMI handler caller only needs %r13 preserved.
1245 #define L1D_FLUSH_SIZE (64 * 1024)
1247 movq $-L1D_FLUSH_SIZE, %rcx
1249 * pass 1: Preload TLB.
1250 * Kernel text is mapped using superpages. TLB preload is
1251 * done for the benefit of older CPUs which split 2M page
1252 * into 4k TLB entries.
1254 1: movb L1D_FLUSH_SIZE(%r9, %rcx), %al
1255 addq $PAGE_SIZE, %rcx
1259 movq $-L1D_FLUSH_SIZE, %rcx
1260 /* pass 2: Read each cache line. */
1261 2: movb L1D_FLUSH_SIZE(%r9, %rcx), %al
1266 #undef L1D_FLUSH_SIZE