2 * Copyright (c) 1993 The Regents of the University of California.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <machine/asmacros.h>
33 #include <machine/cputypes.h>
34 #include <machine/pmap.h>
35 #include <machine/specialreg.h>
45 * void bzero(void *buf, u_int len)
71 * The loop takes 14 bytes. Ensure that it doesn't cross a 16-byte
136 /* fillw(pat, base, cnt) */
149 * memmove(dst, src, cnt) (return dst)
150 * bcopy(src, dst, cnt)
151 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
174 cmpl %ecx,%eax /* overlapping && src < dst? */
177 shrl $2,%ecx /* copy by 32-bit words */
181 andl $3,%ecx /* any bytes left? */
186 movl 8(%ebp),%eax /* return dst for memmove */
192 addl %ecx,%edi /* copy backwards */
196 andl $3,%ecx /* any fractional bytes? */
200 movl 16(%ebp),%ecx /* copy remainder by 32-bit words */
209 movl 8(%ebp),%eax /* return dst for memmove */
215 * Note: memcpy does not support overlapping copies
224 shrl $2,%ecx /* copy by 32-bit words */
228 andl $3,%ecx /* any bytes left? */
237 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
243 movl 12(%esp),%esi /* %esi = from */
244 movl 16(%esp),%edi /* %edi = to */
245 movl 20(%esp),%edx /* %edx = maxlen */
255 /* Success -- 0 byte reached */
260 /* edx is zero -- return ENAMETOOLONG */
261 movl $ENAMETOOLONG,%eax
264 /* set *lencopied and return %eax */
303 * Handling of special 386 registers and descriptor tables etc
305 /* void lgdt(struct region_descriptor *rdp); */
307 /* reload the descriptor table */
311 /* flush the prefetch q */
315 /* reload "stale" selectors */
324 /* reload code selector by turning return into intersegmental return */
332 /* ssdtosd(*ssdp,*sdp) */
354 /* void reset_dbregs() */
357 movl %eax,%dr7 /* disable all breakpoints first */
366 /*****************************************************************************/
367 /* setjump, longjump */
368 /*****************************************************************************/
372 movl %ebx,(%eax) /* save ebx */
373 movl %esp,4(%eax) /* save esp */
374 movl %ebp,8(%eax) /* save ebp */
375 movl %esi,12(%eax) /* save esi */
376 movl %edi,16(%eax) /* save edi */
377 movl (%esp),%edx /* get rta */
378 movl %edx,20(%eax) /* save eip */
379 xorl %eax,%eax /* return(0); */
385 movl (%eax),%ebx /* restore ebx */
386 movl 4(%eax),%esp /* restore esp */
387 movl 8(%eax),%ebp /* restore ebp */
388 movl 12(%eax),%esi /* restore esi */
389 movl 16(%eax),%edi /* restore edi */
390 movl 20(%eax),%edx /* get rta */
391 movl %edx,(%esp) /* put in return frame */
392 xorl %eax,%eax /* return(1); */
398 * Support for reading MSRs in the safe manner. (Instead of panic on #gp,
402 /* int rdmsr_safe(u_int msr, uint64_t *data) */
403 movl PCPU(CURPCB),%ecx
404 movl $msr_onfault,PCB_ONFAULT(%ecx)
413 movl PCPU(CURPCB),%ecx
414 movl %eax,PCB_ONFAULT(%ecx)
419 * Support for writing MSRs in the safe manner. (Instead of panic on #gp,
423 /* int wrmsr_safe(u_int msr, uint64_t data) */
424 movl PCPU(CURPCB),%ecx
425 movl $msr_onfault,PCB_ONFAULT(%ecx)
433 movl PCPU(CURPCB),%ecx
434 movl %eax,PCB_ONFAULT(%ecx)
439 * MSR operations fault handler
443 movl PCPU(CURPCB),%ecx
444 movl $0,PCB_ONFAULT(%ecx)
448 ENTRY(handle_ibrs_entry)
449 cmpb $0,hw_ibrs_ibpb_active
451 movl $MSR_IA32_SPEC_CTRL,%ecx
453 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
454 orl $(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32,%edx
456 movb $1,PCPU(IBPB_SET)
458 * i386 does not implement SMEP, but the 4/4 split makes this not
462 END(handle_ibrs_entry)
464 ENTRY(handle_ibrs_exit)
465 cmpb $0,PCPU(IBPB_SET)
467 movl $MSR_IA32_SPEC_CTRL,%ecx
469 andl $~(IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP),%eax
470 andl $~((IA32_SPEC_CTRL_IBRS|IA32_SPEC_CTRL_STIBP)>>32),%edx
472 movb $0,PCPU(IBPB_SET)
474 END(handle_ibrs_exit)
476 ENTRY(mds_handler_void)
478 END(mds_handler_void)
480 ENTRY(mds_handler_verw)
486 END(mds_handler_verw)
488 ENTRY(mds_handler_ivb)
493 1: movl PCPU(MDS_BUF), %edx
494 movdqa %xmm0, PCPU(MDS_TMP)
503 2: movntdq %xmm0, (%edx)
509 movdqa PCPU(MDS_TMP),%xmm0
516 ENTRY(mds_handler_bdw)
521 1: movl PCPU(MDS_BUF), %ebx
522 movdqa %xmm0, PCPU(MDS_TMP)
528 2: movntdq %xmm0, (%ebx)
537 movdqa PCPU(MDS_TMP),%xmm0
544 ENTRY(mds_handler_skl_sse)
549 1: movl PCPU(MDS_BUF), %edi
550 movl PCPU(MDS_BUF64), %edx
551 movdqa %xmm0, PCPU(MDS_TMP)
558 2: clflushopt 5376(%edi, %eax, 8)
568 movdqa PCPU(MDS_TMP), %xmm0
573 END(mds_handler_skl_sse)
575 ENTRY(mds_handler_skl_avx)
580 1: movl PCPU(MDS_BUF), %edi
581 movl PCPU(MDS_BUF64), %edx
582 vmovdqa %ymm0, PCPU(MDS_TMP)
583 vpxor %ymm0, %ymm0, %ymm0
586 vorpd (%edx), %ymm0, %ymm0
587 vorpd (%edx), %ymm0, %ymm0
589 2: clflushopt 5376(%edi, %eax, 8)
599 vmovdqa PCPU(MDS_TMP), %ymm0
604 END(mds_handler_skl_avx)
606 ENTRY(mds_handler_skl_avx512)
611 1: movl PCPU(MDS_BUF), %edi
612 movl PCPU(MDS_BUF64), %edx
613 vmovdqa64 %zmm0, PCPU(MDS_TMP)
614 vpxord %zmm0, %zmm0, %zmm0
617 vorpd (%edx), %zmm0, %zmm0
618 vorpd (%edx), %zmm0, %zmm0
620 2: clflushopt 5376(%edi, %eax, 8)
630 vmovdqa64 PCPU(MDS_TMP), %zmm0
635 END(mds_handler_skl_avx512)
637 ENTRY(mds_handler_silvermont)
642 1: movl PCPU(MDS_BUF), %edx
643 movdqa %xmm0, PCPU(MDS_TMP)
647 2: movntdq %xmm0, (%edx)
653 movdqa PCPU(MDS_TMP),%xmm0
658 END(mds_handler_silvermont)