2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1993 The Regents of the University of California.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <machine/asmacros.h>
36 #include <machine/specialreg.h>
37 #include <machine/pmap.h>
46 movq $PAGE_SIZE/8,%rcx
65 * pagecopy(%rdi=from, %rsi=to)
69 movq $PAGE_SIZE/8,%rcx
87 * The loop takes 29 bytes. Ensure that it doesn't cross a 32-byte
92 movnti %rax,(%rdi,%rdx)
93 movnti %rax,8(%rdi,%rdx)
94 movnti %rax,16(%rdi,%rdx)
95 movnti %rax,24(%rdi,%rdx)
104 * memmove(dst, src, cnt)
106 * Adapted from bcopy written by:
107 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
116 cmpq %rcx,%r8 /* overlapping && src < dst? */
119 shrq $3,%rcx /* copy by 64-bit words */
123 andq $7,%rcx /* any bytes left? */
135 addq %rcx,%rdi /* copy backwards */
140 andq $7,%rcx /* any fractional bytes? */
145 movq %rdx,%rcx /* copy remainder by 32-bit words */
163 cmpq %rcx,%r8 /* overlapping && src < dst? */
172 addq %rcx,%rdi /* copy backwards */
185 * memcpy(dst, src, len)
188 * Note: memcpy does not support overlapping copies
194 shrq $3,%rcx /* copy by 64-bit words */
198 andq $7,%rcx /* any bytes left? */
220 * memset(dst, c, len)
228 movabs $0x0101010101010101,%rax
259 /* fillw(pat, base, cnt) */
260 /* %rdi,%rsi, %rdx */
272 /*****************************************************************************/
273 /* copyout and fubyte family */
274 /*****************************************************************************/
276 * Access user memory from inside the kernel. These routines should be
277 * the only places that do this.
279 * These routines set curpcb->pcb_onfault for the time they execute. When a
280 * protection violation occurs inside the functions, the trap handler
281 * returns to *curpcb->pcb_onfault instead of the function.
284 .macro SMAP_DISABLE smap
291 .macro SMAP_ENABLE smap
298 * copyout(from_kernel, to_user, len)
301 .macro COPYOUT smap erms
303 movq PCPU(CURPCB),%rax
304 /* Trap entry clears PSL.AC */
305 movq $copyout_fault,PCB_ONFAULT(%rax)
306 testq %rdx,%rdx /* anything to do? */
310 * Check explicitly for non-user addresses. If 486 write protection
311 * is being used, this check is essential because we are in kernel
312 * mode so the h/w does not provide any protection against writing
317 * First, prevent address wrapping.
323 * XXX STOP USING VM_MAXUSER_ADDRESS.
324 * It is an end address, not a max, so every time it is used correctly it
325 * looks like there is an off by one error, and of course it caused an off
326 * by one error in several places.
328 movq $VM_MAXUSER_ADDRESS,%rcx
333 /* bcopy(%rsi, %rdi, %rdx) */
351 movq PCPU(CURPCB),%rdx
352 movq %rax,PCB_ONFAULT(%rdx)
357 ENTRY(copyout_nosmap_std)
358 COPYOUT smap=0 erms=0
359 END(copyout_nosmap_std)
361 ENTRY(copyout_smap_std)
362 COPYOUT smap=1 erms=0
363 END(copyout_smap_std)
365 ENTRY(copyout_nosmap_erms)
366 COPYOUT smap=0 erms=1
367 END(copyout_nosmap_erms)
369 ENTRY(copyout_smap_erms)
370 COPYOUT smap=1 erms=1
371 END(copyout_smap_erms)
375 movq PCPU(CURPCB),%rdx
376 movq $0,PCB_ONFAULT(%rdx)
382 * copyin(from_user, to_kernel, len)
385 .macro COPYIN smap erms
387 movq PCPU(CURPCB),%rax
388 movq $copyin_fault,PCB_ONFAULT(%rax)
389 testq %rdx,%rdx /* anything to do? */
393 * make sure address is valid
398 movq $VM_MAXUSER_ADDRESS,%rcx
408 shrq $3,%rcx /* copy longword-wise */
412 andb $7,%cl /* copy remaining bytes */
422 movq PCPU(CURPCB),%rdx
423 movq %rax,PCB_ONFAULT(%rdx)
428 ENTRY(copyin_nosmap_std)
430 END(copyin_nosmap_std)
432 ENTRY(copyin_smap_std)
436 ENTRY(copyin_nosmap_erms)
438 END(copyin_nosmap_erms)
440 ENTRY(copyin_smap_erms)
442 END(copyin_smap_erms)
446 movq PCPU(CURPCB),%rdx
447 movq $0,PCB_ONFAULT(%rdx)
453 * casueword32. Compare and set user integer. Returns -1 on fault,
454 * 0 if access was successful. Old value is written to *oldp.
455 * dst = %rdi, old = %esi, oldp = %rdx, new = %ecx
457 ENTRY(casueword32_nosmap)
459 movq PCPU(CURPCB),%r8
460 movq $fusufault,PCB_ONFAULT(%r8)
462 movq $VM_MAXUSER_ADDRESS-4,%rax
463 cmpq %rax,%rdi /* verify address is valid */
466 movl %esi,%eax /* old */
470 cmpxchgl %ecx,(%rdi) /* new = %ecx */
473 * The old value is in %eax. If the store succeeded it will be the
474 * value we expected (old) from before the store, otherwise it will
475 * be the current value. Save %eax into %esi to prepare the return
480 movq %rax,PCB_ONFAULT(%r8)
483 * Access the oldp after the pcb_onfault is cleared, to correctly
484 * catch corrupted pointer.
486 movl %esi,(%rdx) /* oldp = %rdx */
489 END(casueword32_nosmap)
491 ENTRY(casueword32_smap)
493 movq PCPU(CURPCB),%r8
494 movq $fusufault,PCB_ONFAULT(%r8)
496 movq $VM_MAXUSER_ADDRESS-4,%rax
497 cmpq %rax,%rdi /* verify address is valid */
500 movl %esi,%eax /* old */
505 cmpxchgl %ecx,(%rdi) /* new = %ecx */
509 * The old value is in %eax. If the store succeeded it will be the
510 * value we expected (old) from before the store, otherwise it will
511 * be the current value. Save %eax into %esi to prepare the return
516 movq %rax,PCB_ONFAULT(%r8)
519 * Access the oldp after the pcb_onfault is cleared, to correctly
520 * catch corrupted pointer.
522 movl %esi,(%rdx) /* oldp = %rdx */
525 END(casueword32_smap)
528 * casueword. Compare and set user long. Returns -1 on fault,
529 * 0 if access was successful. Old value is written to *oldp.
530 * dst = %rdi, old = %rsi, oldp = %rdx, new = %rcx
532 ENTRY(casueword_nosmap)
534 movq PCPU(CURPCB),%r8
535 movq $fusufault,PCB_ONFAULT(%r8)
537 movq $VM_MAXUSER_ADDRESS-4,%rax
538 cmpq %rax,%rdi /* verify address is valid */
541 movq %rsi,%rax /* old */
545 cmpxchgq %rcx,(%rdi) /* new = %rcx */
548 * The old value is in %rax. If the store succeeded it will be the
549 * value we expected (old) from before the store, otherwise it will
550 * be the current value.
554 movq %rax,PCB_ONFAULT(%r8)
558 END(casueword_nosmap)
560 ENTRY(casueword_smap)
562 movq PCPU(CURPCB),%r8
563 movq $fusufault,PCB_ONFAULT(%r8)
565 movq $VM_MAXUSER_ADDRESS-4,%rax
566 cmpq %rax,%rdi /* verify address is valid */
569 movq %rsi,%rax /* old */
574 cmpxchgq %rcx,(%rdi) /* new = %rcx */
578 * The old value is in %rax. If the store succeeded it will be the
579 * value we expected (old) from before the store, otherwise it will
580 * be the current value.
584 movq %rax,PCB_ONFAULT(%r8)
591 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
592 * byte from user memory.
593 * addr = %rdi, valp = %rsi
596 ENTRY(fueword_nosmap)
598 movq PCPU(CURPCB),%rcx
599 movq $fusufault,PCB_ONFAULT(%rcx)
601 movq $VM_MAXUSER_ADDRESS-8,%rax
602 cmpq %rax,%rdi /* verify address is valid */
607 movq %rax,PCB_ONFAULT(%rcx)
615 movq PCPU(CURPCB),%rcx
616 movq $fusufault,PCB_ONFAULT(%rcx)
618 movq $VM_MAXUSER_ADDRESS-8,%rax
619 cmpq %rax,%rdi /* verify address is valid */
626 movq %rax,PCB_ONFAULT(%rcx)
632 ENTRY(fueword32_nosmap)
634 movq PCPU(CURPCB),%rcx
635 movq $fusufault,PCB_ONFAULT(%rcx)
637 movq $VM_MAXUSER_ADDRESS-4,%rax
638 cmpq %rax,%rdi /* verify address is valid */
643 movq %rax,PCB_ONFAULT(%rcx)
647 END(fueword32_nosmap)
649 ENTRY(fueword32_smap)
651 movq PCPU(CURPCB),%rcx
652 movq $fusufault,PCB_ONFAULT(%rcx)
654 movq $VM_MAXUSER_ADDRESS-4,%rax
655 cmpq %rax,%rdi /* verify address is valid */
662 movq %rax,PCB_ONFAULT(%rcx)
668 ENTRY(fuword16_nosmap)
670 movq PCPU(CURPCB),%rcx
671 movq $fusufault,PCB_ONFAULT(%rcx)
673 movq $VM_MAXUSER_ADDRESS-2,%rax
678 movq $0,PCB_ONFAULT(%rcx)
685 movq PCPU(CURPCB),%rcx
686 movq $fusufault,PCB_ONFAULT(%rcx)
688 movq $VM_MAXUSER_ADDRESS-2,%rax
695 movq $0,PCB_ONFAULT(%rcx)
702 movq PCPU(CURPCB),%rcx
703 movq $fusufault,PCB_ONFAULT(%rcx)
705 movq $VM_MAXUSER_ADDRESS-1,%rax
710 movq $0,PCB_ONFAULT(%rcx)
717 movq PCPU(CURPCB),%rcx
718 movq $fusufault,PCB_ONFAULT(%rcx)
720 movq $VM_MAXUSER_ADDRESS-1,%rax
727 movq $0,PCB_ONFAULT(%rcx)
733 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
735 * addr = %rdi, value = %rsi
739 movq PCPU(CURPCB),%rcx
740 movq $fusufault,PCB_ONFAULT(%rcx)
742 movq $VM_MAXUSER_ADDRESS-8,%rax
743 cmpq %rax,%rdi /* verify address validity */
748 movq PCPU(CURPCB),%rcx
749 movq %rax,PCB_ONFAULT(%rcx)
756 movq PCPU(CURPCB),%rcx
757 movq $fusufault,PCB_ONFAULT(%rcx)
759 movq $VM_MAXUSER_ADDRESS-8,%rax
760 cmpq %rax,%rdi /* verify address validity */
767 movq PCPU(CURPCB),%rcx
768 movq %rax,PCB_ONFAULT(%rcx)
773 ENTRY(suword32_nosmap)
775 movq PCPU(CURPCB),%rcx
776 movq $fusufault,PCB_ONFAULT(%rcx)
778 movq $VM_MAXUSER_ADDRESS-4,%rax
779 cmpq %rax,%rdi /* verify address validity */
784 movq PCPU(CURPCB),%rcx
785 movq %rax,PCB_ONFAULT(%rcx)
792 movq PCPU(CURPCB),%rcx
793 movq $fusufault,PCB_ONFAULT(%rcx)
795 movq $VM_MAXUSER_ADDRESS-4,%rax
796 cmpq %rax,%rdi /* verify address validity */
803 movq PCPU(CURPCB),%rcx
804 movq %rax,PCB_ONFAULT(%rcx)
809 ENTRY(suword16_nosmap)
811 movq PCPU(CURPCB),%rcx
812 movq $fusufault,PCB_ONFAULT(%rcx)
814 movq $VM_MAXUSER_ADDRESS-2,%rax
815 cmpq %rax,%rdi /* verify address validity */
820 movq PCPU(CURPCB),%rcx /* restore trashed register */
821 movq %rax,PCB_ONFAULT(%rcx)
828 movq PCPU(CURPCB),%rcx
829 movq $fusufault,PCB_ONFAULT(%rcx)
831 movq $VM_MAXUSER_ADDRESS-2,%rax
832 cmpq %rax,%rdi /* verify address validity */
839 movq PCPU(CURPCB),%rcx /* restore trashed register */
840 movq %rax,PCB_ONFAULT(%rcx)
847 movq PCPU(CURPCB),%rcx
848 movq $fusufault,PCB_ONFAULT(%rcx)
850 movq $VM_MAXUSER_ADDRESS-1,%rax
851 cmpq %rax,%rdi /* verify address validity */
857 movq PCPU(CURPCB),%rcx /* restore trashed register */
858 movq %rax,PCB_ONFAULT(%rcx)
865 movq PCPU(CURPCB),%rcx
866 movq $fusufault,PCB_ONFAULT(%rcx)
868 movq $VM_MAXUSER_ADDRESS-1,%rax
869 cmpq %rax,%rdi /* verify address validity */
877 movq PCPU(CURPCB),%rcx /* restore trashed register */
878 movq %rax,PCB_ONFAULT(%rcx)
884 /* Fault entry clears PSL.AC */
886 movq PCPU(CURPCB),%rcx
888 movq %rax,PCB_ONFAULT(%rcx)
894 * copyinstr(from, to, maxlen, int *lencopied)
895 * %rdi, %rsi, %rdx, %rcx
897 * copy a string from 'from' to 'to', stop when a 0 character is reached.
898 * return ENAMETOOLONG if string is longer than maxlen, and
899 * EFAULT on protection violations. If lencopied is non-zero,
900 * return the actual length in *lencopied.
902 ENTRY(copyinstr_nosmap)
904 movq %rdx,%r8 /* %r8 = maxlen */
905 movq %rcx,%r9 /* %r9 = *len */
906 xchgq %rdi,%rsi /* %rdi = from, %rsi = to */
907 movq PCPU(CURPCB),%rcx
908 movq $cpystrflt,PCB_ONFAULT(%rcx)
910 movq $VM_MAXUSER_ADDRESS,%rax
912 /* make sure 'from' is within bounds */
916 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
934 END(copyinstr_nosmap)
936 ENTRY(copyinstr_smap)
938 movq %rdx,%r8 /* %r8 = maxlen */
939 movq %rcx,%r9 /* %r9 = *len */
940 xchgq %rdi,%rsi /* %rdi = from, %rsi = to */
941 movq PCPU(CURPCB),%rcx
942 movq $cpystrflt,PCB_ONFAULT(%rcx)
944 movq $VM_MAXUSER_ADDRESS,%rax
946 /* make sure 'from' is within bounds */
952 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
962 jz copyinstr_toolong_smap
972 /* Success -- 0 byte reached */
977 /* set *lencopied and return %eax */
978 movq PCPU(CURPCB),%rcx
979 movq $0,PCB_ONFAULT(%rcx)
988 /* Fault entry clears PSL.AC */
993 copyinstr_toolong_smap:
996 /* rdx is zero - return ENAMETOOLONG or EFAULT */
997 movq $VM_MAXUSER_ADDRESS,%rax
1000 movq $ENAMETOOLONG,%rax
1006 * copystr(from, to, maxlen, int *lencopied)
1007 * %rdi, %rsi, %rdx, %rcx
1011 movq %rdx,%r8 /* %r8 = maxlen */
1023 /* Success -- 0 byte reached */
1028 /* rdx is zero -- return ENAMETOOLONG */
1029 movq $ENAMETOOLONG,%rax
1035 /* set *lencopied and return %rax */
1044 * Handling of special amd64 registers and descriptor tables etc
1046 /* void lgdt(struct region_descriptor *rdp); */
1048 /* reload the descriptor table */
1051 /* flush the prefetch q */
1058 movl %eax,%fs /* Beware, use wrmsr to set 64 bit base */
1062 /* reload code selector by turning return into intersegmental return */
1070 /*****************************************************************************/
1071 /* setjump, longjump */
1072 /*****************************************************************************/
1075 movq %rbx,0(%rdi) /* save rbx */
1076 movq %rsp,8(%rdi) /* save rsp */
1077 movq %rbp,16(%rdi) /* save rbp */
1078 movq %r12,24(%rdi) /* save r12 */
1079 movq %r13,32(%rdi) /* save r13 */
1080 movq %r14,40(%rdi) /* save r14 */
1081 movq %r15,48(%rdi) /* save r15 */
1082 movq 0(%rsp),%rdx /* get rta */
1083 movq %rdx,56(%rdi) /* save rip */
1084 xorl %eax,%eax /* return(0); */
1089 movq 0(%rdi),%rbx /* restore rbx */
1090 movq 8(%rdi),%rsp /* restore rsp */
1091 movq 16(%rdi),%rbp /* restore rbp */
1092 movq 24(%rdi),%r12 /* restore r12 */
1093 movq 32(%rdi),%r13 /* restore r13 */
1094 movq 40(%rdi),%r14 /* restore r14 */
1095 movq 48(%rdi),%r15 /* restore r15 */
1096 movq 56(%rdi),%rdx /* get rta */
1097 movq %rdx,0(%rsp) /* put in return frame */
1098 xorl %eax,%eax /* return(1); */
1104 * Support for reading MSRs in the safe manner. (Instead of panic on #gp,
1108 /* int rdmsr_safe(u_int msr, uint64_t *data) */
1110 movq PCPU(CURPCB),%r8
1111 movq $msr_onfault,PCB_ONFAULT(%r8)
1113 rdmsr /* Read MSR pointed by %ecx. Returns
1114 hi byte in edx, lo in %eax */
1115 salq $32,%rdx /* sign-shift %rdx left */
1116 movl %eax,%eax /* zero-extend %eax -> %rax */
1120 movq %rax,PCB_ONFAULT(%r8)
1125 * Support for writing MSRs in the safe manner. (Instead of panic on #gp,
1129 /* int wrmsr_safe(u_int msr, uint64_t data) */
1131 movq PCPU(CURPCB),%r8
1132 movq $msr_onfault,PCB_ONFAULT(%r8)
1137 wrmsr /* Write MSR pointed by %ecx. Accepts
1138 hi byte in edx, lo in %eax. */
1140 movq %rax,PCB_ONFAULT(%r8)
1145 * MSR operations fault handler
1149 movq $0,PCB_ONFAULT(%r8)
1155 * void pmap_pti_pcid_invalidate(uint64_t ucr3, uint64_t kcr3);
1156 * Invalidates address space addressed by ucr3, then returns to kcr3.
1157 * Done in assembler to ensure no other memory accesses happen while
1161 ENTRY(pmap_pti_pcid_invalidate)
1164 movq %rdi,%cr3 /* to user page table */
1165 movq %rsi,%cr3 /* back to kernel */
1170 * void pmap_pti_pcid_invlpg(uint64_t ucr3, uint64_t kcr3, vm_offset_t va);
1171 * Invalidates virtual address va in address space ucr3, then returns to kcr3.
1174 ENTRY(pmap_pti_pcid_invlpg)
1177 movq %rdi,%cr3 /* to user page table */
1179 movq %rsi,%cr3 /* back to kernel */
1184 * void pmap_pti_pcid_invlrng(uint64_t ucr3, uint64_t kcr3, vm_offset_t sva,
1186 * Invalidates virtual addresses between sva and eva in address space ucr3,
1187 * then returns to kcr3.
1190 ENTRY(pmap_pti_pcid_invlrng)
1193 movq %rdi,%cr3 /* to user page table */
1195 addq $PAGE_SIZE,%rdx
1198 movq %rsi,%cr3 /* back to kernel */
1203 .macro ibrs_seq_label l
1206 .macro ibrs_call_label l
1209 .macro ibrs_seq count
1212 ibrs_call_label %(ll)
1214 ibrs_seq_label %(ll)
1220 /* all callers already saved %rax, %rdx, and %rcx */
1221 ENTRY(handle_ibrs_entry)
1222 cmpb $0,hw_ibrs_active(%rip)
1224 movl $MSR_IA32_SPEC_CTRL,%ecx
1226 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1227 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32,%edx
1229 movb $1,PCPU(IBPB_SET)
1230 testl $CPUID_STDEXT_SMEP,cpu_stdext_feature(%rip)
1234 END(handle_ibrs_entry)
1236 ENTRY(handle_ibrs_exit)
1237 cmpb $0,PCPU(IBPB_SET)
1239 movl $MSR_IA32_SPEC_CTRL,%ecx
1241 andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1242 andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
1244 movb $0,PCPU(IBPB_SET)
1246 END(handle_ibrs_exit)
1248 /* registers-neutral version, but needs stack */
1249 ENTRY(handle_ibrs_exit_rs)
1250 cmpb $0,PCPU(IBPB_SET)
1255 movl $MSR_IA32_SPEC_CTRL,%ecx
1257 andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1258 andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
1263 movb $0,PCPU(IBPB_SET)
1265 END(handle_ibrs_exit_rs)
1270 * Flush L1D cache. Load enough of the data from the kernel text
1271 * to flush existing L1D content.
1273 * N.B. The function does not follow ABI calling conventions, it corrupts %rbx.
1274 * The vmm.ko caller expects that only %rax, %rdx, %rbx, %rcx, %r9, and %rflags
1275 * registers are clobbered. The NMI handler caller only needs %r13 preserved.
1278 #define L1D_FLUSH_SIZE (64 * 1024)
1280 movq $-L1D_FLUSH_SIZE, %rcx
1282 * pass 1: Preload TLB.
1283 * Kernel text is mapped using superpages. TLB preload is
1284 * done for the benefit of older CPUs which split 2M page
1285 * into 4k TLB entries.
1287 1: movb L1D_FLUSH_SIZE(%r9, %rcx), %al
1288 addq $PAGE_SIZE, %rcx
1292 movq $-L1D_FLUSH_SIZE, %rcx
1293 /* pass 2: Read each cache line. */
1294 2: movb L1D_FLUSH_SIZE(%r9, %rcx), %al
1299 #undef L1D_FLUSH_SIZE