2 * Copyright (c) 2003 Peter Wemm.
3 * Copyright (c) 1993 The Regents of the University of California.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of the University nor the names of its contributors
15 * may be used to endorse or promote products derived from this software
16 * without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 #include <machine/asmacros.h>
36 #include <machine/specialreg.h>
37 #include <machine/pmap.h>
46 movq $PAGE_SIZE/8,%rcx
65 * pagecopy(%rdi=from, %rsi=to)
69 movq $PAGE_SIZE/8,%rcx
87 * The loop takes 29 bytes. Ensure that it doesn't cross a 32-byte
92 movnti %rax,(%rdi,%rdx)
93 movnti %rax,8(%rdi,%rdx)
94 movnti %rax,16(%rdi,%rdx)
95 movnti %rax,24(%rdi,%rdx)
104 * memcmpy(b1, b2, len)
116 movzbl (%rdi,%rcx,1),%eax
117 movzbl (%rsi,%rcx,1),%r8d
123 movzbl (%rdi,%rcx,1),%eax
124 movzbl (%rsi,%rcx,1),%r8d
130 movzbl (%rdi,%rcx,1),%eax
131 movzbl (%rsi,%rcx,1),%r8d
137 movzbl (%rdi,%rcx,1),%eax
138 movzbl (%rsi,%rcx,1),%r8d
198 * memmove(dst, src, cnt)
200 * Adapted from bcopy written by:
201 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
210 cmpq %rcx,%r8 /* overlapping && src < dst? */
215 shrq $3,%rcx /* copy by 64-bit words */
219 andq $7,%rcx /* any bytes left? */
232 addq %rcx,%rdi /* copy backwards */
237 andq $7,%rcx /* any fractional bytes? */
242 movq %rdx,%rcx /* copy remainder by 32-bit words */
260 cmpq %rcx,%r8 /* overlapping && src < dst? */
269 addq %rcx,%rdi /* copy backwards */
282 * memcpy(dst, src, len)
285 * Note: memcpy does not support overlapping copies
293 shrq $3,%rcx /* copy by 64-bit words */
297 andq $7,%rcx /* any bytes left? */
320 * memset(dst, c, len)
328 movabs $0x0101010101010101,%rax
411 /* fillw(pat, base, cnt) */
412 /* %rdi,%rsi, %rdx */
424 /*****************************************************************************/
425 /* copyout and fubyte family */
426 /*****************************************************************************/
428 * Access user memory from inside the kernel. These routines should be
429 * the only places that do this.
431 * These routines set curpcb->pcb_onfault for the time they execute. When a
432 * protection violation occurs inside the functions, the trap handler
433 * returns to *curpcb->pcb_onfault instead of the function.
436 .macro SMAP_DISABLE smap
443 .macro SMAP_ENABLE smap
450 * copyout(from_kernel, to_user, len)
453 .macro COPYOUT smap erms
455 movq PCPU(CURPCB),%r9
456 movq $copy_fault,PCB_ONFAULT(%r9)
459 * Check explicitly for non-user addresses. If 486 write protection
460 * is being used, this check is essential because we are in kernel
461 * mode so the h/w does not provide any protection against writing
466 * First, prevent address wrapping.
472 * XXX STOP USING VM_MAXUSER_ADDRESS.
473 * It is an end address, not a max, so every time it is used correctly it
474 * looks like there is an off by one error, and of course it caused an off
475 * by one error in several places.
477 movq $VM_MAXUSER_ADDRESS,%rcx
482 * Set up arguments for rep movs*.
490 * Set return value to zero. Remaining failure mode goes through
506 movq %rax,PCB_ONFAULT(%r9)
516 movq %rax,PCB_ONFAULT(%r9)
521 ENTRY(copyout_nosmap_std)
522 COPYOUT smap=0 erms=0
523 END(copyout_nosmap_std)
525 ENTRY(copyout_smap_std)
526 COPYOUT smap=1 erms=0
527 END(copyout_smap_std)
529 ENTRY(copyout_nosmap_erms)
530 COPYOUT smap=0 erms=1
531 END(copyout_nosmap_erms)
533 ENTRY(copyout_smap_erms)
534 COPYOUT smap=1 erms=1
535 END(copyout_smap_erms)
538 * copyin(from_user, to_kernel, len)
541 .macro COPYIN smap erms
543 movq PCPU(CURPCB),%r9
544 movq $copy_fault,PCB_ONFAULT(%r9)
547 * make sure address is valid
552 movq $VM_MAXUSER_ADDRESS,%rcx
567 shrq $3,%rcx /* copy longword-wise */
571 andb $7,%cl /* copy remaining bytes */
574 movq %rax,PCB_ONFAULT(%r9)
584 movq %rax,PCB_ONFAULT(%r9)
589 ENTRY(copyin_nosmap_std)
591 END(copyin_nosmap_std)
593 ENTRY(copyin_smap_std)
597 ENTRY(copyin_nosmap_erms)
599 END(copyin_nosmap_erms)
601 ENTRY(copyin_smap_erms)
603 END(copyin_smap_erms)
606 /* Trap entry clears PSL.AC */
608 movq $0,PCB_ONFAULT(%r9)
614 * casueword32. Compare and set user integer. Returns -1 on fault,
615 * 0 if access was successful. Old value is written to *oldp.
616 * dst = %rdi, old = %esi, oldp = %rdx, new = %ecx
618 ENTRY(casueword32_nosmap)
620 movq PCPU(CURPCB),%r8
621 movq $fusufault,PCB_ONFAULT(%r8)
623 movq $VM_MAXUSER_ADDRESS-4,%rax
624 cmpq %rax,%rdi /* verify address is valid */
627 movl %esi,%eax /* old */
631 cmpxchgl %ecx,(%rdi) /* new = %ecx */
634 * The old value is in %eax. If the store succeeded it will be the
635 * value we expected (old) from before the store, otherwise it will
636 * be the current value. Save %eax into %esi to prepare the return
641 movq %rax,PCB_ONFAULT(%r8)
644 * Access the oldp after the pcb_onfault is cleared, to correctly
645 * catch corrupted pointer.
647 movl %esi,(%rdx) /* oldp = %rdx */
650 END(casueword32_nosmap)
652 ENTRY(casueword32_smap)
654 movq PCPU(CURPCB),%r8
655 movq $fusufault,PCB_ONFAULT(%r8)
657 movq $VM_MAXUSER_ADDRESS-4,%rax
658 cmpq %rax,%rdi /* verify address is valid */
661 movl %esi,%eax /* old */
666 cmpxchgl %ecx,(%rdi) /* new = %ecx */
670 * The old value is in %eax. If the store succeeded it will be the
671 * value we expected (old) from before the store, otherwise it will
672 * be the current value. Save %eax into %esi to prepare the return
677 movq %rax,PCB_ONFAULT(%r8)
680 * Access the oldp after the pcb_onfault is cleared, to correctly
681 * catch corrupted pointer.
683 movl %esi,(%rdx) /* oldp = %rdx */
686 END(casueword32_smap)
689 * casueword. Compare and set user long. Returns -1 on fault,
690 * 0 if access was successful. Old value is written to *oldp.
691 * dst = %rdi, old = %rsi, oldp = %rdx, new = %rcx
693 ENTRY(casueword_nosmap)
695 movq PCPU(CURPCB),%r8
696 movq $fusufault,PCB_ONFAULT(%r8)
698 movq $VM_MAXUSER_ADDRESS-4,%rax
699 cmpq %rax,%rdi /* verify address is valid */
702 movq %rsi,%rax /* old */
706 cmpxchgq %rcx,(%rdi) /* new = %rcx */
709 * The old value is in %rax. If the store succeeded it will be the
710 * value we expected (old) from before the store, otherwise it will
711 * be the current value.
715 movq %rax,PCB_ONFAULT(%r8)
719 END(casueword_nosmap)
721 ENTRY(casueword_smap)
723 movq PCPU(CURPCB),%r8
724 movq $fusufault,PCB_ONFAULT(%r8)
726 movq $VM_MAXUSER_ADDRESS-4,%rax
727 cmpq %rax,%rdi /* verify address is valid */
730 movq %rsi,%rax /* old */
735 cmpxchgq %rcx,(%rdi) /* new = %rcx */
739 * The old value is in %rax. If the store succeeded it will be the
740 * value we expected (old) from before the store, otherwise it will
741 * be the current value.
745 movq %rax,PCB_ONFAULT(%r8)
752 * Fetch (load) a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit
753 * byte from user memory.
754 * addr = %rdi, valp = %rsi
757 ENTRY(fueword_nosmap)
759 movq PCPU(CURPCB),%rcx
760 movq $fusufault,PCB_ONFAULT(%rcx)
762 movq $VM_MAXUSER_ADDRESS-8,%rax
763 cmpq %rax,%rdi /* verify address is valid */
768 movq %rax,PCB_ONFAULT(%rcx)
776 movq PCPU(CURPCB),%rcx
777 movq $fusufault,PCB_ONFAULT(%rcx)
779 movq $VM_MAXUSER_ADDRESS-8,%rax
780 cmpq %rax,%rdi /* verify address is valid */
787 movq %rax,PCB_ONFAULT(%rcx)
793 ENTRY(fueword32_nosmap)
795 movq PCPU(CURPCB),%rcx
796 movq $fusufault,PCB_ONFAULT(%rcx)
798 movq $VM_MAXUSER_ADDRESS-4,%rax
799 cmpq %rax,%rdi /* verify address is valid */
804 movq %rax,PCB_ONFAULT(%rcx)
808 END(fueword32_nosmap)
810 ENTRY(fueword32_smap)
812 movq PCPU(CURPCB),%rcx
813 movq $fusufault,PCB_ONFAULT(%rcx)
815 movq $VM_MAXUSER_ADDRESS-4,%rax
816 cmpq %rax,%rdi /* verify address is valid */
823 movq %rax,PCB_ONFAULT(%rcx)
829 ENTRY(fuword16_nosmap)
831 movq PCPU(CURPCB),%rcx
832 movq $fusufault,PCB_ONFAULT(%rcx)
834 movq $VM_MAXUSER_ADDRESS-2,%rax
839 movq $0,PCB_ONFAULT(%rcx)
846 movq PCPU(CURPCB),%rcx
847 movq $fusufault,PCB_ONFAULT(%rcx)
849 movq $VM_MAXUSER_ADDRESS-2,%rax
856 movq $0,PCB_ONFAULT(%rcx)
863 movq PCPU(CURPCB),%rcx
864 movq $fusufault,PCB_ONFAULT(%rcx)
866 movq $VM_MAXUSER_ADDRESS-1,%rax
871 movq $0,PCB_ONFAULT(%rcx)
878 movq PCPU(CURPCB),%rcx
879 movq $fusufault,PCB_ONFAULT(%rcx)
881 movq $VM_MAXUSER_ADDRESS-1,%rax
888 movq $0,PCB_ONFAULT(%rcx)
894 * Store a 64-bit word, a 32-bit word, a 16-bit word, or an 8-bit byte to
896 * addr = %rdi, value = %rsi
900 movq PCPU(CURPCB),%rcx
901 movq $fusufault,PCB_ONFAULT(%rcx)
903 movq $VM_MAXUSER_ADDRESS-8,%rax
904 cmpq %rax,%rdi /* verify address validity */
909 movq PCPU(CURPCB),%rcx
910 movq %rax,PCB_ONFAULT(%rcx)
917 movq PCPU(CURPCB),%rcx
918 movq $fusufault,PCB_ONFAULT(%rcx)
920 movq $VM_MAXUSER_ADDRESS-8,%rax
921 cmpq %rax,%rdi /* verify address validity */
928 movq PCPU(CURPCB),%rcx
929 movq %rax,PCB_ONFAULT(%rcx)
934 ENTRY(suword32_nosmap)
936 movq PCPU(CURPCB),%rcx
937 movq $fusufault,PCB_ONFAULT(%rcx)
939 movq $VM_MAXUSER_ADDRESS-4,%rax
940 cmpq %rax,%rdi /* verify address validity */
945 movq PCPU(CURPCB),%rcx
946 movq %rax,PCB_ONFAULT(%rcx)
953 movq PCPU(CURPCB),%rcx
954 movq $fusufault,PCB_ONFAULT(%rcx)
956 movq $VM_MAXUSER_ADDRESS-4,%rax
957 cmpq %rax,%rdi /* verify address validity */
964 movq PCPU(CURPCB),%rcx
965 movq %rax,PCB_ONFAULT(%rcx)
970 ENTRY(suword16_nosmap)
972 movq PCPU(CURPCB),%rcx
973 movq $fusufault,PCB_ONFAULT(%rcx)
975 movq $VM_MAXUSER_ADDRESS-2,%rax
976 cmpq %rax,%rdi /* verify address validity */
981 movq PCPU(CURPCB),%rcx /* restore trashed register */
982 movq %rax,PCB_ONFAULT(%rcx)
989 movq PCPU(CURPCB),%rcx
990 movq $fusufault,PCB_ONFAULT(%rcx)
992 movq $VM_MAXUSER_ADDRESS-2,%rax
993 cmpq %rax,%rdi /* verify address validity */
1000 movq PCPU(CURPCB),%rcx /* restore trashed register */
1001 movq %rax,PCB_ONFAULT(%rcx)
1006 ENTRY(subyte_nosmap)
1008 movq PCPU(CURPCB),%rcx
1009 movq $fusufault,PCB_ONFAULT(%rcx)
1011 movq $VM_MAXUSER_ADDRESS-1,%rax
1012 cmpq %rax,%rdi /* verify address validity */
1018 movq PCPU(CURPCB),%rcx /* restore trashed register */
1019 movq %rax,PCB_ONFAULT(%rcx)
1026 movq PCPU(CURPCB),%rcx
1027 movq $fusufault,PCB_ONFAULT(%rcx)
1029 movq $VM_MAXUSER_ADDRESS-1,%rax
1030 cmpq %rax,%rdi /* verify address validity */
1038 movq PCPU(CURPCB),%rcx /* restore trashed register */
1039 movq %rax,PCB_ONFAULT(%rcx)
1045 /* Fault entry clears PSL.AC */
1047 movq PCPU(CURPCB),%rcx
1049 movq %rax,PCB_ONFAULT(%rcx)
1055 * copyinstr(from, to, maxlen, int *lencopied)
1056 * %rdi, %rsi, %rdx, %rcx
1058 * copy a string from 'from' to 'to', stop when a 0 character is reached.
1059 * return ENAMETOOLONG if string is longer than maxlen, and
1060 * EFAULT on protection violations. If lencopied is non-zero,
1061 * return the actual length in *lencopied.
1063 .macro COPYINSTR smap
1065 movq %rdx,%r8 /* %r8 = maxlen */
1066 movq PCPU(CURPCB),%r9
1067 movq $cpystrflt,PCB_ONFAULT(%r9)
1069 movq $VM_MAXUSER_ADDRESS,%rax
1071 /* make sure 'from' is within bounds */
1077 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
1085 jz copyinstr_toolong
1087 jz copyinstr_toolong_smap
1099 /* Success -- 0 byte reached */
1103 /* set *lencopied and return %eax */
1104 movq %rax,PCB_ONFAULT(%r9)
1121 ENTRY(copyinstr_nosmap)
1123 END(copyinstr_nosmap)
1125 ENTRY(copyinstr_smap)
1130 /* Fault entry clears PSL.AC */
1133 /* set *lencopied and return %eax */
1134 movq $0,PCB_ONFAULT(%r9)
1144 copyinstr_toolong_smap:
1147 /* rdx is zero - return ENAMETOOLONG or EFAULT */
1148 movq $VM_MAXUSER_ADDRESS,%rax
1151 movl $ENAMETOOLONG,%eax
1155 * copystr(from, to, maxlen, int *lencopied)
1156 * %rdi, %rsi, %rdx, %rcx
1160 movq %rdx,%r8 /* %r8 = maxlen */
1173 /* Success -- 0 byte reached */
1179 /* set *lencopied and return %rax */
1186 /* rdx is zero -- return ENAMETOOLONG */
1187 movl $ENAMETOOLONG,%eax
1192 * Handling of special amd64 registers and descriptor tables etc
1194 /* void lgdt(struct region_descriptor *rdp); */
1196 /* reload the descriptor table */
1199 /* flush the prefetch q */
1206 movl %eax,%fs /* Beware, use wrmsr to set 64 bit base */
1210 /* reload code selector by turning return into intersegmental return */
1218 /*****************************************************************************/
1219 /* setjump, longjump */
1220 /*****************************************************************************/
1223 movq %rbx,0(%rdi) /* save rbx */
1224 movq %rsp,8(%rdi) /* save rsp */
1225 movq %rbp,16(%rdi) /* save rbp */
1226 movq %r12,24(%rdi) /* save r12 */
1227 movq %r13,32(%rdi) /* save r13 */
1228 movq %r14,40(%rdi) /* save r14 */
1229 movq %r15,48(%rdi) /* save r15 */
1230 movq 0(%rsp),%rdx /* get rta */
1231 movq %rdx,56(%rdi) /* save rip */
1232 xorl %eax,%eax /* return(0); */
1237 movq 0(%rdi),%rbx /* restore rbx */
1238 movq 8(%rdi),%rsp /* restore rsp */
1239 movq 16(%rdi),%rbp /* restore rbp */
1240 movq 24(%rdi),%r12 /* restore r12 */
1241 movq 32(%rdi),%r13 /* restore r13 */
1242 movq 40(%rdi),%r14 /* restore r14 */
1243 movq 48(%rdi),%r15 /* restore r15 */
1244 movq 56(%rdi),%rdx /* get rta */
1245 movq %rdx,0(%rsp) /* put in return frame */
1246 xorl %eax,%eax /* return(1); */
1252 * Support for reading MSRs in the safe manner. (Instead of panic on #gp,
1256 /* int rdmsr_safe(u_int msr, uint64_t *data) */
1258 movq PCPU(CURPCB),%r8
1259 movq $msr_onfault,PCB_ONFAULT(%r8)
1261 rdmsr /* Read MSR pointed by %ecx. Returns
1262 hi byte in edx, lo in %eax */
1263 salq $32,%rdx /* sign-shift %rdx left */
1264 movl %eax,%eax /* zero-extend %eax -> %rax */
1268 movq %rax,PCB_ONFAULT(%r8)
1273 * Support for writing MSRs in the safe manner. (Instead of panic on #gp,
1277 /* int wrmsr_safe(u_int msr, uint64_t data) */
1279 movq PCPU(CURPCB),%r8
1280 movq $msr_onfault,PCB_ONFAULT(%r8)
1285 wrmsr /* Write MSR pointed by %ecx. Accepts
1286 hi byte in edx, lo in %eax. */
1288 movq %rax,PCB_ONFAULT(%r8)
1293 * MSR operations fault handler
1297 movq $0,PCB_ONFAULT(%r8)
1303 * void pmap_pti_pcid_invalidate(uint64_t ucr3, uint64_t kcr3);
1304 * Invalidates address space addressed by ucr3, then returns to kcr3.
1305 * Done in assembler to ensure no other memory accesses happen while
1309 ENTRY(pmap_pti_pcid_invalidate)
1312 movq %rdi,%cr3 /* to user page table */
1313 movq %rsi,%cr3 /* back to kernel */
1318 * void pmap_pti_pcid_invlpg(uint64_t ucr3, uint64_t kcr3, vm_offset_t va);
1319 * Invalidates virtual address va in address space ucr3, then returns to kcr3.
1322 ENTRY(pmap_pti_pcid_invlpg)
1325 movq %rdi,%cr3 /* to user page table */
1327 movq %rsi,%cr3 /* back to kernel */
1332 * void pmap_pti_pcid_invlrng(uint64_t ucr3, uint64_t kcr3, vm_offset_t sva,
1334 * Invalidates virtual addresses between sva and eva in address space ucr3,
1335 * then returns to kcr3.
1338 ENTRY(pmap_pti_pcid_invlrng)
1341 movq %rdi,%cr3 /* to user page table */
1343 addq $PAGE_SIZE,%rdx
1346 movq %rsi,%cr3 /* back to kernel */
1351 .macro ibrs_seq_label l
1354 .macro ibrs_call_label l
1357 .macro ibrs_seq count
1360 ibrs_call_label %(ll)
1362 ibrs_seq_label %(ll)
1368 /* all callers already saved %rax, %rdx, and %rcx */
1369 ENTRY(handle_ibrs_entry)
1370 cmpb $0,hw_ibrs_active(%rip)
1372 movl $MSR_IA32_SPEC_CTRL,%ecx
1374 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1375 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32,%edx
1377 movb $1,PCPU(IBPB_SET)
1378 testl $CPUID_STDEXT_SMEP,cpu_stdext_feature(%rip)
1382 END(handle_ibrs_entry)
1384 ENTRY(handle_ibrs_exit)
1385 cmpb $0,PCPU(IBPB_SET)
1387 movl $MSR_IA32_SPEC_CTRL,%ecx
1389 andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1390 andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
1392 movb $0,PCPU(IBPB_SET)
1394 END(handle_ibrs_exit)
1396 /* registers-neutral version, but needs stack */
1397 ENTRY(handle_ibrs_exit_rs)
1398 cmpb $0,PCPU(IBPB_SET)
1403 movl $MSR_IA32_SPEC_CTRL,%ecx
1405 andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
1406 andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
1411 movb $0,PCPU(IBPB_SET)
1413 END(handle_ibrs_exit_rs)
1418 * Flush L1D cache. Load enough of the data from the kernel text
1419 * to flush existing L1D content.
1421 * N.B. The function does not follow ABI calling conventions, it corrupts %rbx.
1422 * The vmm.ko caller expects that only %rax, %rdx, %rbx, %rcx, %r9, and %rflags
1423 * registers are clobbered. The NMI handler caller only needs %r13 preserved.
1426 #define L1D_FLUSH_SIZE (64 * 1024)
1428 movq $-L1D_FLUSH_SIZE, %rcx
1430 * pass 1: Preload TLB.
1431 * Kernel text is mapped using superpages. TLB preload is
1432 * done for the benefit of older CPUs which split 2M page
1433 * into 4k TLB entries.
1435 1: movb L1D_FLUSH_SIZE(%r9, %rcx), %al
1436 addq $PAGE_SIZE, %rcx
1440 movq $-L1D_FLUSH_SIZE, %rcx
1441 /* pass 2: Read each cache line. */
1442 2: movb L1D_FLUSH_SIZE(%r9, %rcx), %al
1447 #undef L1D_FLUSH_SIZE