2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1993 The Regents of the University of California.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <machine/asmacros.h>
36 #include <machine/specialreg.h>
37 #include <machine/pmap.h>
46 movq $PAGE_SIZE/8,%rcx
65 * pagecopy(%rdi=from, %rsi=to)
69 movq $PAGE_SIZE/8,%rcx
87 * The loop takes 29 bytes. Ensure that it doesn't cross a 32-byte
92 movnti %rax,(%rdi,%rdx)
93 movnti %rax,8(%rdi,%rdx)
94 movnti %rax,16(%rdi,%rdx)
95 movnti %rax,24(%rdi,%rdx)
104 * memmove(dst, src, cnt)
106 * Adapted from bcopy written by:
107 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
116 cmpq %rcx,%r8 /* overlapping && src < dst? */
121 shrq $3,%rcx /* copy by 64-bit words */
125 andq $7,%rcx /* any bytes left? */
138 addq %rcx,%rdi /* copy backwards */
143 andq $7,%rcx /* any fractional bytes? */
148 movq %rdx,%rcx /* copy remainder by 32-bit words */
166 cmpq %rcx,%r8 /* overlapping && src < dst? */
175 addq %rcx,%rdi /* copy backwards */
188 * memcpy(dst, src, len)
191 * Note: memcpy does not support overlapping copies
199 shrq $3,%rcx /* copy by 64-bit words */
203 andq $7,%rcx /* any bytes left? */
226 * memset(dst, c, len)
234 movabs $0x0101010101010101,%rax
268 /* fillw(pat, base, cnt) */
269 /* %rdi,%rsi, %rdx */
281 /*****************************************************************************/
282 /* copyout and fubyte family */
283 /*****************************************************************************/
285 * Access user memory from inside the kernel. These routines should be
286 * the only places that do this.
288 * These routines set curpcb->pcb_onfault for the time they execute. When a
289 * protection violation occurs inside the functions, the trap handler
290 * returns to *curpcb->pcb_onfault instead of the function.
293 .macro SMAP_DISABLE smap
300 .macro SMAP_ENABLE smap
307 * copyout(from_kernel, to_user, len)
310 .macro COPYOUT smap erms
312 movq PCPU(CURPCB),%r9
313 /* Trap entry clears PSL.AC */
314 movq $copy_fault,PCB_ONFAULT(%r9)
315 testq %rdx,%rdx /* anything to do? */
319 * Check explicitly for non-user addresses. If 486 write protection
320 * is being used, this check is essential because we are in kernel
321 * mode so the h/w does not provide any protection against writing
326 * First, prevent address wrapping.
332 * XXX STOP USING VM_MAXUSER_ADDRESS.
333 * It is an end address, not a max, so every time it is used correctly it
334 * looks like there is an off by one error, and of course it caused an off
335 * by one error in several places.
337 movq $VM_MAXUSER_ADDRESS,%rcx
342 /* bcopy(%rsi, %rdi, %rdx) */
357 movq %rax,PCB_ONFAULT(%r9)
367 movq %rax,PCB_ONFAULT(%r9)
372 ENTRY(copyout_nosmap_std)
373 COPYOUT smap=0 erms=0
374 END(copyout_nosmap_std)
376 ENTRY(copyout_smap_std)
377 COPYOUT smap=1 erms=0
378 END(copyout_smap_std)
380 ENTRY(copyout_nosmap_erms)
381 COPYOUT smap=0 erms=1
382 END(copyout_nosmap_erms)
384 ENTRY(copyout_smap_erms)
385 COPYOUT smap=1 erms=1
386 END(copyout_smap_erms)
389 * copyin(from_user, to_kernel, len)
392 .macro COPYIN smap erms
394 movq PCPU(CURPCB),%r9
395 movq $copy_fault,PCB_ONFAULT(%r9)
396 testq %rdx,%rdx /* anything to do? */
400 * make sure address is valid
405 movq $VM_MAXUSER_ADDRESS,%rcx
417 shrq $3,%rcx /* copy longword-wise */
421 andb $7,%cl /* copy remaining bytes */
425 movq %rax,PCB_ONFAULT(%r9)
436 movq %rax,PCB_ONFAULT(%r9)
441 ENTRY(copyin_nosmap_std)
443 END(copyin_nosmap_std)
445 ENTRY(copyin_smap_std)
449 ENTRY(copyin_nosmap_erms)
451 END(copyin_nosmap_erms)
453 ENTRY(copyin_smap_erms)
455 END(copyin_smap_erms)
459 movq PCPU(CURPCB),%rdx
460 movq $0,PCB_ONFAULT(%rdx)
466 * casueword32. Compare and set user integer. Returns -1 on fault,
467 * 0 if access was successful. Old value is written to *oldp.
468 * dst = %rdi, old = %esi, oldp = %rdx, new = %ecx
470 ENTRY(casueword32_nosmap)
472 movq PCPU(CURPCB),%r8
473 movq $fusufault,PCB_ONFAULT(%r8)
475 movq $VM_MAXUSER_ADDRESS-4,%rax
476 cmpq %rax,%rdi /* verify address is valid */
479 movl %esi,%eax /* old */
483 cmpxchgl %ecx,(%rdi) /* new = %ecx */
486 * The old value is in %eax. If the store succeeded it will be the
487 * value we expected (old) from before the store, otherwise it will
488 * be the current value. Save %eax into %esi to prepare the return
493 movq %rax,PCB_ONFAULT(%r8)
496 * Access the oldp after the pcb_onfault is cleared, to correctly
497 * catch corrupted pointer.
499 movl %esi,(%rdx) /* oldp = %rdx */
502 END(casueword32_nosmap)
504 ENTRY(casueword32_smap)
506 movq PCPU(CURPCB),%r8
507 movq $fusufault,PCB_ONFAULT(%r8)
509 movq $VM_MAXUSER_ADDRESS-4,%rax
510 cmpq %rax,%rdi /* verify address is valid */
513 movl %esi,%eax /* old */
518 cmpxchgl %ecx,(%rdi) /* new = %ecx */
522 * The old value is in %eax. If the store succeeded it will be the
523 * value we expected (old) from before the store, otherwise it will
524 * be the current value. Save %eax into %esi to prepare the return
529 movq %rax,PCB_ONFAULT(%r8)
532 * Access the oldp after the pcb_onfault is cleared, to correctly
533 * catch corrupted pointer.
535 movl %esi,(%rdx) /* oldp = %rdx */
538 END(casueword32_smap)
541 * casueword. Compare and set user long. Returns -1 on fault,
542 * 0 if access was successful. Old value is written to *oldp.
543 * dst = %rdi, old = %rsi, oldp = %rdx, new = %rcx
545 ENTRY(casueword_nosmap)
547 movq PCPU(CURPCB),%r8
548 movq $fusufault,PCB_ONFAULT(%r8)
550 movq $VM_MAXUSER_ADDRESS-4,%rax
551 cmpq %rax,%rdi /* verify address is valid */
554 movq %rsi,%rax /* old */
558 cmpxchgq %rcx,(%rdi) /* new = %rcx */
561 * The old value is in %rax. If the store succeeded it will be the
562 * value we expected (old) from before the store, otherwise it will
563 * be the current value.
567 movq %rax,PCB_ONFAULT(%r8)
571 END(casueword_nosmap)
573 ENTRY(casueword_smap)
575 movq PCPU(CURPCB),%r8
576 movq $fusufault,PCB_ONFAULT(%r8)
578 movq $VM_MAXUSER_ADDRESS-4,%rax
579 cmpq %rax,%rdi /* verify address is valid */
582 movq %rsi,%rax /* old */
587 cmpxchgq %rcx,(%rdi) /* new = %rcx */
591 * The old value is in %rax. If the store succeeded it will be the
592 * value we expected (old) from before the store, otherwise it will
593 * be the current value.
597 movq %rax,PCB_ONFAULT(%r8)
604 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
605 * byte from user memory.
606 * addr = %rdi, valp = %rsi
609 ENTRY(fueword_nosmap)
611 movq PCPU(CURPCB),%rcx
612 movq $fusufault,PCB_ONFAULT(%rcx)
614 movq $VM_MAXUSER_ADDRESS-8,%rax
615 cmpq %rax,%rdi /* verify address is valid */
620 movq %rax,PCB_ONFAULT(%rcx)
628 movq PCPU(CURPCB),%rcx
629 movq $fusufault,PCB_ONFAULT(%rcx)
631 movq $VM_MAXUSER_ADDRESS-8,%rax
632 cmpq %rax,%rdi /* verify address is valid */
639 movq %rax,PCB_ONFAULT(%rcx)
645 ENTRY(fueword32_nosmap)
647 movq PCPU(CURPCB),%rcx
648 movq $fusufault,PCB_ONFAULT(%rcx)
650 movq $VM_MAXUSER_ADDRESS-4,%rax
651 cmpq %rax,%rdi /* verify address is valid */
656 movq %rax,PCB_ONFAULT(%rcx)
660 END(fueword32_nosmap)
662 ENTRY(fueword32_smap)
664 movq PCPU(CURPCB),%rcx
665 movq $fusufault,PCB_ONFAULT(%rcx)
667 movq $VM_MAXUSER_ADDRESS-4,%rax
668 cmpq %rax,%rdi /* verify address is valid */
675 movq %rax,PCB_ONFAULT(%rcx)
681 ENTRY(fuword16_nosmap)
683 movq PCPU(CURPCB),%rcx
684 movq $fusufault,PCB_ONFAULT(%rcx)
686 movq $VM_MAXUSER_ADDRESS-2,%rax
691 movq $0,PCB_ONFAULT(%rcx)
698 movq PCPU(CURPCB),%rcx
699 movq $fusufault,PCB_ONFAULT(%rcx)
701 movq $VM_MAXUSER_ADDRESS-2,%rax
708 movq $0,PCB_ONFAULT(%rcx)
715 movq PCPU(CURPCB),%rcx
716 movq $fusufault,PCB_ONFAULT(%rcx)
718 movq $VM_MAXUSER_ADDRESS-1,%rax
723 movq $0,PCB_ONFAULT(%rcx)
730 movq PCPU(CURPCB),%rcx
731 movq $fusufault,PCB_ONFAULT(%rcx)
733 movq $VM_MAXUSER_ADDRESS-1,%rax
740 movq $0,PCB_ONFAULT(%rcx)
746 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
748 * addr = %rdi, value = %rsi
752 movq PCPU(CURPCB),%rcx
753 movq $fusufault,PCB_ONFAULT(%rcx)
755 movq $VM_MAXUSER_ADDRESS-8,%rax
756 cmpq %rax,%rdi /* verify address validity */
761 movq PCPU(CURPCB),%rcx
762 movq %rax,PCB_ONFAULT(%rcx)
769 movq PCPU(CURPCB),%rcx
770 movq $fusufault,PCB_ONFAULT(%rcx)
772 movq $VM_MAXUSER_ADDRESS-8,%rax
773 cmpq %rax,%rdi /* verify address validity */
780 movq PCPU(CURPCB),%rcx
781 movq %rax,PCB_ONFAULT(%rcx)
786 ENTRY(suword32_nosmap)
788 movq PCPU(CURPCB),%rcx
789 movq $fusufault,PCB_ONFAULT(%rcx)
791 movq $VM_MAXUSER_ADDRESS-4,%rax
792 cmpq %rax,%rdi /* verify address validity */
797 movq PCPU(CURPCB),%rcx
798 movq %rax,PCB_ONFAULT(%rcx)
805 movq PCPU(CURPCB),%rcx
806 movq $fusufault,PCB_ONFAULT(%rcx)
808 movq $VM_MAXUSER_ADDRESS-4,%rax
809 cmpq %rax,%rdi /* verify address validity */
816 movq PCPU(CURPCB),%rcx
817 movq %rax,PCB_ONFAULT(%rcx)
822 ENTRY(suword16_nosmap)
824 movq PCPU(CURPCB),%rcx
825 movq $fusufault,PCB_ONFAULT(%rcx)
827 movq $VM_MAXUSER_ADDRESS-2,%rax
828 cmpq %rax,%rdi /* verify address validity */
833 movq PCPU(CURPCB),%rcx /* restore trashed register */
834 movq %rax,PCB_ONFAULT(%rcx)
841 movq PCPU(CURPCB),%rcx
842 movq $fusufault,PCB_ONFAULT(%rcx)
844 movq $VM_MAXUSER_ADDRESS-2,%rax
845 cmpq %rax,%rdi /* verify address validity */
852 movq PCPU(CURPCB),%rcx /* restore trashed register */
853 movq %rax,PCB_ONFAULT(%rcx)
860 movq PCPU(CURPCB),%rcx
861 movq $fusufault,PCB_ONFAULT(%rcx)
863 movq $VM_MAXUSER_ADDRESS-1,%rax
864 cmpq %rax,%rdi /* verify address validity */
870 movq PCPU(CURPCB),%rcx /* restore trashed register */
871 movq %rax,PCB_ONFAULT(%rcx)
878 movq PCPU(CURPCB),%rcx
879 movq $fusufault,PCB_ONFAULT(%rcx)
881 movq $VM_MAXUSER_ADDRESS-1,%rax
882 cmpq %rax,%rdi /* verify address validity */
890 movq PCPU(CURPCB),%rcx /* restore trashed register */
891 movq %rax,PCB_ONFAULT(%rcx)
897 /* Fault entry clears PSL.AC */
899 movq PCPU(CURPCB),%rcx
901 movq %rax,PCB_ONFAULT(%rcx)
907 * copyinstr(from, to, maxlen, int *lencopied)
908 * %rdi, %rsi, %rdx, %rcx
910 * copy a string from 'from' to 'to', stop when a 0 character is reached.
911 * return ENAMETOOLONG if string is longer than maxlen, and
912 * EFAULT on protection violations. If lencopied is non-zero,
913 * return the actual length in *lencopied.
915 ENTRY(copyinstr_nosmap)
917 movq %rdx,%r8 /* %r8 = maxlen */
918 movq %rcx,%r9 /* %r9 = *len */
919 xchgq %rdi,%rsi /* %rdi = from, %rsi = to */
920 movq PCPU(CURPCB),%rcx
921 movq $cpystrflt,PCB_ONFAULT(%rcx)
923 movq $VM_MAXUSER_ADDRESS,%rax
925 /* make sure 'from' is within bounds */
929 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
947 END(copyinstr_nosmap)
949 ENTRY(copyinstr_smap)
951 movq %rdx,%r8 /* %r8 = maxlen */
952 movq %rcx,%r9 /* %r9 = *len */
953 xchgq %rdi,%rsi /* %rdi = from, %rsi = to */
954 movq PCPU(CURPCB),%rcx
955 movq $cpystrflt,PCB_ONFAULT(%rcx)
957 movq $VM_MAXUSER_ADDRESS,%rax
959 /* make sure 'from' is within bounds */
965 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
975 jz copyinstr_toolong_smap
985 /* Success -- 0 byte reached */
990 /* set *lencopied and return %eax */
991 movq PCPU(CURPCB),%rcx
992 movq $0,PCB_ONFAULT(%rcx)
1001 /* Fault entry clears PSL.AC */
1006 copyinstr_toolong_smap:
1009 /* rdx is zero - return ENAMETOOLONG or EFAULT */
1010 movq $VM_MAXUSER_ADDRESS,%rax
1013 movq $ENAMETOOLONG,%rax
1019 * copystr(from, to, maxlen, int *lencopied)
1020 * %rdi, %rsi, %rdx, %rcx
1024 movq %rdx,%r8 /* %r8 = maxlen */
1036 /* Success -- 0 byte reached */
1041 /* rdx is zero -- return ENAMETOOLONG */
1042 movq $ENAMETOOLONG,%rax
1048 /* set *lencopied and return %rax */
1057 * Handling of special amd64 registers and descriptor tables etc
1059 /* void lgdt(struct region_descriptor *rdp); */
1061 /* reload the descriptor table */
1064 /* flush the prefetch q */
1071 movl %eax,%fs /* Beware, use wrmsr to set 64 bit base */
1075 /* reload code selector by turning return into intersegmental return */
1083 /*****************************************************************************/
1084 /* setjump, longjump */
1085 /*****************************************************************************/
1088 movq %rbx,0(%rdi) /* save rbx */
1089 movq %rsp,8(%rdi) /* save rsp */
1090 movq %rbp,16(%rdi) /* save rbp */
1091 movq %r12,24(%rdi) /* save r12 */
1092 movq %r13,32(%rdi) /* save r13 */
1093 movq %r14,40(%rdi) /* save r14 */
1094 movq %r15,48(%rdi) /* save r15 */
1095 movq 0(%rsp),%rdx /* get rta */
1096 movq %rdx,56(%rdi) /* save rip */
1097 xorl %eax,%eax /* return(0); */
1102 movq 0(%rdi),%rbx /* restore rbx */
1103 movq 8(%rdi),%rsp /* restore rsp */
1104 movq 16(%rdi),%rbp /* restore rbp */
1105 movq 24(%rdi),%r12 /* restore r12 */
1106 movq 32(%rdi),%r13 /* restore r13 */
1107 movq 40(%rdi),%r14 /* restore r14 */
1108 movq 48(%rdi),%r15 /* restore r15 */
1109 movq 56(%rdi),%rdx /* get rta */
1110 movq %rdx,0(%rsp) /* put in return frame */
1111 xorl %eax,%eax /* return(1); */
1117 * Support for reading MSRs in the safe manner. (Instead of panic on #gp,
1121 /* int rdmsr_safe(u_int msr, uint64_t *data) */
1123 movq PCPU(CURPCB),%r8
1124 movq $msr_onfault,PCB_ONFAULT(%r8)
1126 rdmsr /* Read MSR pointed by %ecx. Returns
1127 hi byte in edx, lo in %eax */
1128 salq $32,%rdx /* sign-shift %rdx left */
1129 movl %eax,%eax /* zero-extend %eax -> %rax */
1133 movq %rax,PCB_ONFAULT(%r8)
1138 * Support for writing MSRs in the safe manner. (Instead of panic on #gp,
1142 /* int wrmsr_safe(u_int msr, uint64_t data) */
1144 movq PCPU(CURPCB),%r8
1145 movq $msr_onfault,PCB_ONFAULT(%r8)
1150 wrmsr /* Write MSR pointed by %ecx. Accepts
1151 hi byte in edx, lo in %eax. */
1153 movq %rax,PCB_ONFAULT(%r8)
1158 * MSR operations fault handler
1162 movq $0,PCB_ONFAULT(%r8)
1168 * void pmap_pti_pcid_invalidate(uint64_t ucr3, uint64_t kcr3);
1169 * Invalidates address space addressed by ucr3, then returns to kcr3.
1170 * Done in assembler to ensure no other memory accesses happen while
1174 ENTRY(pmap_pti_pcid_invalidate)
1177 movq %rdi,%cr3 /* to user page table */
1178 movq %rsi,%cr3 /* back to kernel */
1183 * void pmap_pti_pcid_invlpg(uint64_t ucr3, uint64_t kcr3, vm_offset_t va);
1184 * Invalidates virtual address va in address space ucr3, then returns to kcr3.
1187 ENTRY(pmap_pti_pcid_invlpg)
1190 movq %rdi,%cr3 /* to user page table */
1192 movq %rsi,%cr3 /* back to kernel */
1197 * void pmap_pti_pcid_invlrng(uint64_t ucr3, uint64_t kcr3, vm_offset_t sva,
1199 * Invalidates virtual addresses between sva and eva in address space ucr3,
1200 * then returns to kcr3.
1203 ENTRY(pmap_pti_pcid_invlrng)
1206 movq %rdi,%cr3 /* to user page table */
1208 addq $PAGE_SIZE,%rdx
1211 movq %rsi,%cr3 /* back to kernel */
1216 .macro ibrs_seq_label l
1219 .macro ibrs_call_label l
1222 .macro ibrs_seq count
1225 ibrs_call_label %(ll)
1227 ibrs_seq_label %(ll)
1233 /* all callers already saved %rax, %rdx, and %rcx */
1234 ENTRY(handle_ibrs_entry)
1235 cmpb $0,hw_ibrs_active(%rip)
1237 movl $MSR_IA32_SPEC_CTRL,%ecx
1239 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1240 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32,%edx
1242 movb $1,PCPU(IBPB_SET)
1243 testl $CPUID_STDEXT_SMEP,cpu_stdext_feature(%rip)
1247 END(handle_ibrs_entry)
1249 ENTRY(handle_ibrs_exit)
1250 cmpb $0,PCPU(IBPB_SET)
1252 movl $MSR_IA32_SPEC_CTRL,%ecx
1254 andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1255 andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
1257 movb $0,PCPU(IBPB_SET)
1259 END(handle_ibrs_exit)
1261 /* registers-neutral version, but needs stack */
1262 ENTRY(handle_ibrs_exit_rs)
1263 cmpb $0,PCPU(IBPB_SET)
1268 movl $MSR_IA32_SPEC_CTRL,%ecx
1270 andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1271 andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
1276 movb $0,PCPU(IBPB_SET)
1278 END(handle_ibrs_exit_rs)
1283 * Flush L1D cache. Load enough of the data from the kernel text
1284 * to flush existing L1D content.
1286 * N.B. The function does not follow ABI calling conventions, it corrupts %rbx.
1287 * The vmm.ko caller expects that only %rax, %rdx, %rbx, %rcx, %r9, and %rflags
1288 * registers are clobbered. The NMI handler caller only needs %r13 preserved.
1291 #define L1D_FLUSH_SIZE (64 * 1024)
1293 movq $-L1D_FLUSH_SIZE, %rcx
1295 * pass 1: Preload TLB.
1296 * Kernel text is mapped using superpages. TLB preload is
1297 * done for the benefit of older CPUs which split 2M page
1298 * into 4k TLB entries.
1300 1: movb L1D_FLUSH_SIZE(%r9, %rcx), %al
1301 addq $PAGE_SIZE, %rcx
1305 movq $-L1D_FLUSH_SIZE, %rcx
1306 /* pass 2: Read each cache line. */
1307 2: movb L1D_FLUSH_SIZE(%r9, %rcx), %al
1312 #undef L1D_FLUSH_SIZE