2 * Copyright (c) 1993 The Regents of the University of California.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 #include <machine/asmacros.h>
39 #include <machine/cputypes.h>
40 #include <machine/pmap.h>
41 #include <machine/specialreg.h>
57 .globl _copyout_vector
59 .long _generic_copyout
60 .globl _ovbcopy_vector
63 #if defined(I586_CPU) && NNPX > 0
73 * void bzero(void *buf, u_int len)
98 * do 64 byte chunks first
100 * XXX this is probably over-unrolled at least for DX2's
157 * a jump table seems to be faster than a loop or more range reductions
159 * XXX need a const section for non-text
194 #if defined(I586_CPU) && NNPX > 0
200 * The FPU register method is twice as fast as the integer register
201 * method unless the target is in the L1 cache and we pre-allocate a
202 * cache line for it (then the integer register method is 4-5 times
203 * faster). However, we never pre-allocate cache lines, since that
204 * would make the integer method 25% or more slower for the common
205 * case when the target isn't in either the L1 cache or the L2 cache.
206 * Thus we normally use the FPU register method unless the overhead
207 * would be too large.
209 cmpl $256,%ecx /* empirical; clts, fninit, smsw cost a lot */
213 * The FPU registers may belong to an application or to fastmove()
214 * or to another invocation of bcopy() or ourself in a higher level
215 * interrupt or trap handler. Preserving the registers is
216 * complicated since we avoid it if possible at all levels. We
217 * want to localize the complications even when that increases them.
218 * Here the extra work involves preserving CR0_TS in TS.
219 * `npxproc != NULL' is supposed to be the condition that all the
220 * FPU resources belong to an application, but npxproc and CR0_TS
221 * aren't set atomically enough for this condition to work in
222 * interrupt handlers.
224 * Case 1: FPU registers belong to the application: we must preserve
225 * the registers if we use them, so we only use the FPU register
226 * method if the target size is large enough to amortize the extra
227 * overhead for preserving them. CR0_TS must be preserved although
228 * it is very likely to end up as set.
230 * Case 2: FPU registers belong to fastmove(): fastmove() currently
231 * makes the registers look like they belong to an application so
232 * that cpu_switch() and savectx() don't have to know about it, so
233 * this case reduces to case 1.
235 * Case 3: FPU registers belong to the kernel: don't use the FPU
236 * register method. This case is unlikely, and supporting it would
237 * be more complicated and might take too much stack.
239 * Case 4: FPU registers don't belong to anyone: the FPU registers
240 * don't need to be preserved, so we always use the FPU register
241 * method. CR0_TS must be preserved although it is very likely to
242 * always end up as clear.
246 cmpl $256+184,%ecx /* empirical; not quite 2*108 more */
248 sarb $1,kernel_fpu_lock
257 sarb $1,kernel_fpu_lock
261 fninit /* XXX should avoid needing this */
266 * Align to an 8 byte boundary (misalignment in the main loop would
267 * cost a factor of >= 2). Avoid jumps (at little cost if it is
268 * already aligned) by always zeroing 8 bytes and using the part up
269 * to the _next_ alignment position.
272 addl %edx,%ecx /* part of %ecx -= new_%edx - %edx */
278 * Similarly align `len' to a multiple of 8.
285 * This wouldn't be any faster if it were unrolled, since the loop
286 * control instructions are much faster than the fstl and/or done
287 * in parallel with it so their overhead is insignificant.
289 fpureg_i586_bzero_loop:
294 jae fpureg_i586_bzero_loop
301 movb $0xfe,kernel_fpu_lock
307 movb $0xfe,kernel_fpu_lock
312 * `rep stos' seems to be the best method in practice for small
313 * counts. Fancy methods usually take too long to start up due
314 * to cache and BTB misses.
334 #endif /* I586_CPU && NNPX > 0 */
386 /* fillw(pat, base, cnt) */
406 cmpl %ecx,%eax /* overlapping && src < dst? */
408 cld /* nope, copy forwards */
417 addl %ecx,%edi /* copy backwards. */
438 * generic_bcopy(src, dst, cnt)
439 * ws@tools.de (Wolfgang Solfrank, TooLs GmbH) +49-228-985800
450 cmpl %ecx,%eax /* overlapping && src < dst? */
453 shrl $2,%ecx /* copy by 32-bit words */
454 cld /* nope, copy forwards */
458 andl $3,%ecx /* any bytes left? */
467 addl %ecx,%edi /* copy backwards */
471 andl $3,%ecx /* any fractional bytes? */
475 movl 20(%esp),%ecx /* copy remainder by 32-bit words */
486 #if defined(I586_CPU) && NNPX > 0
496 cmpl %ecx,%eax /* overlapping && src < dst? */
502 sarb $1,kernel_fpu_lock
515 fninit /* XXX should avoid needing this */
520 #define DCACHE_SIZE 8192
521 cmpl $(DCACHE_SIZE-512)/2,%ecx
523 movl $(DCACHE_SIZE-512)/2,%ecx
527 jb 5f /* XXX should prefetch if %ecx >= 32 */
548 large_i586_bcopy_loop:
569 jae large_i586_bcopy_loop
581 movb $0xfe,kernel_fpu_lock
584 * This is a duplicate of the main part of generic_bcopy. See the comments
585 * there. Jumping into generic_bcopy would cost a whole 0-1 cycles and
586 * would mess up high resolution profiling.
622 #endif /* I586_CPU && NNPX > 0 */
625 * Note: memcpy does not support overlapping copies
634 shrl $2,%ecx /* copy by 32-bit words */
635 cld /* nope, copy forwards */
639 andl $3,%ecx /* any bytes left? */
647 /*****************************************************************************/
648 /* copyout and fubyte family */
649 /*****************************************************************************/
651 * Access user memory from inside the kernel. These routines and possibly
652 * the math- and DOS emulators should be the only places that do this.
654 * We have to access the memory with user's permissions, so use a segment
655 * selector with RPL 3. For writes to user space we have to additionally
656 * check the PTE for write permission, because the 386 does not check
657 * write permissions when we are executing with EPL 0. The 486 does check
658 * this if the WP bit is set in CR0, so we can use a simpler version here.
660 * These routines set curpcb->onfault for the time they execute. When a
661 * protection violation occurs inside the functions, the trap handler
662 * returns to *curpcb->onfault instead of the function.
666 * copyout(from_kernel, to_user, len) - MP SAFE (if not I386_CPU)
672 ENTRY(generic_copyout)
674 movl $copyout_fault,PCB_ONFAULT(%eax)
681 testl %ebx,%ebx /* anything to do? */
685 * Check explicitly for non-user addresses. If 486 write protection
686 * is being used, this check is essential because we are in kernel
687 * mode so the h/w does not provide any protection against writing
692 * First, prevent address wrapping.
698 * XXX STOP USING VM_MAXUSER_ADDRESS.
699 * It is an end address, not a max, so every time it is used correctly it
700 * looks like there is an off by one error, and of course it caused an off
701 * by one error in several places.
703 cmpl $VM_MAXUSER_ADDRESS,%eax
706 #if defined(I386_CPU)
708 #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
709 cmpl $CPUCLASS_386,_cpu_class
713 * We have to check each PTE for user write permission.
714 * The checking may cause a page fault, so it is important to set
715 * up everything for return via copyout_fault before here.
717 /* compute number of pages */
722 shrl $IDXSHIFT+2,%ecx
725 /* compute PTE offset for start address */
731 /* check PTE for each page */
732 leal _PTmap(%edx),%eax
735 testb $PG_V,_PTmap(%eax) /* PTE page must be valid */
737 movb _PTmap(%edx),%al
738 andb $PG_V|PG_RW|PG_U,%al /* page must be valid and user writable */
739 cmpb $PG_V|PG_RW|PG_U,%al
743 /* simulate a trap */
748 call _trapwrite /* trapwrite(addr) */
753 testl %eax,%eax /* if not ok, return EFAULT */
759 jnz 1b /* check next page */
760 #endif /* I386_CPU */
762 /* bcopy(%esi, %edi, %ebx) */
766 #if defined(I586_CPU) && NNPX > 0
785 movl %eax,PCB_ONFAULT(%edx)
794 movl $0,PCB_ONFAULT(%edx)
798 #if defined(I586_CPU) && NNPX > 0
801 * Duplicated from generic_copyout. Could be done a bit better.
804 movl $copyout_fault,PCB_ONFAULT(%eax)
811 testl %ebx,%ebx /* anything to do? */
815 * Check explicitly for non-user addresses. If 486 write protection
816 * is being used, this check is essential because we are in kernel
817 * mode so the h/w does not provide any protection against writing
822 * First, prevent address wrapping.
828 * XXX STOP USING VM_MAXUSER_ADDRESS.
829 * It is an end address, not a max, so every time it is used correctly it
830 * looks like there is an off by one error, and of course it caused an off
831 * by one error in several places.
833 cmpl $VM_MAXUSER_ADDRESS,%eax
836 /* bcopy(%esi, %edi, %ebx) */
840 * End of duplicated code.
850 #endif /* I586_CPU && NNPX > 0 */
853 * copyin(from_user, to_kernel, len) - MP SAFE
859 ENTRY(generic_copyin)
861 movl $copyin_fault,PCB_ONFAULT(%eax)
864 movl 12(%esp),%esi /* caddr_t from */
865 movl 16(%esp),%edi /* caddr_t to */
866 movl 20(%esp),%ecx /* size_t len */
869 * make sure address is valid
874 cmpl $VM_MAXUSER_ADDRESS,%edx
877 #if defined(I586_CPU) && NNPX > 0
882 shrl $2,%ecx /* copy longword-wise */
887 andb $3,%cl /* copy remaining bytes */
891 #if defined(I586_CPU) && NNPX > 0
899 movl %eax,PCB_ONFAULT(%edx)
907 movl $0,PCB_ONFAULT(%edx)
911 #if defined(I586_CPU) && NNPX > 0
914 * Duplicated from generic_copyin. Could be done a bit better.
917 movl $copyin_fault,PCB_ONFAULT(%eax)
920 movl 12(%esp),%esi /* caddr_t from */
921 movl 16(%esp),%edi /* caddr_t to */
922 movl 20(%esp),%ecx /* size_t len */
925 * make sure address is valid
930 cmpl $VM_MAXUSER_ADDRESS,%edx
933 * End of duplicated code.
939 pushl %ebx /* XXX prepare for fastmove_fault */
944 #endif /* I586_CPU && NNPX > 0 */
946 #if defined(I586_CPU) && NNPX > 0
947 /* fastmove(src, dst, len)
950 len in %ecx XXX changed to on stack for profiling
951 uses %eax and %edx for tmp. storage
953 /* XXX use ENTRY() to get profiling. fastmove() is actually a non-entry. */
957 subl $PCB_SAVEFPU_SIZE+3*4,%esp
963 testl $7,%esi /* check if src addr is multiple of 8 */
966 testl $7,%edi /* check if dst addr is multiple of 8 */
969 /* if (npxproc != NULL) { */
972 /* fnsave(&curpcb->pcb_savefpu); */
974 fnsave PCB_SAVEFPU(%eax)
975 /* npxproc = NULL; */
979 /* now we own the FPU. */
982 * The process' FP state is saved in the pcb, but if we get
983 * switched, the cpu_switch() will store our FP state in the
984 * pcb. It should be possible to avoid all the copying for
985 * this, e.g., by setting a flag to tell cpu_switch() to
986 * save the state somewhere else.
988 /* tmp = curpcb->pcb_savefpu; */
994 addl $PCB_SAVEFPU,%esi
996 movl $PCB_SAVEFPU_SIZE>>2,%ecx
1002 /* stop_emulating(); */
1004 /* npxproc = curproc; */
1008 movl $fastmove_fault,PCB_ONFAULT(%eax)
1065 /* curpcb->pcb_savefpu = tmp; */
1070 addl $PCB_SAVEFPU,%edi
1073 movl $PCB_SAVEFPU_SIZE>>2,%ecx
1080 /* start_emulating(); */
1084 /* npxproc = NULL; */
1090 movl $fastmove_tail_fault,PCB_ONFAULT(%eax)
1093 shrl $2,%ecx /* copy longword-wise */
1098 andb $3,%cl /* copy remaining bytes */
1109 addl $PCB_SAVEFPU,%edi
1112 movl $PCB_SAVEFPU_SIZE>>2,%ecx
1121 fastmove_tail_fault:
1129 movl $0,PCB_ONFAULT(%edx)
1132 #endif /* I586_CPU && NNPX > 0 */
1135 * fu{byte,sword,word} - MP SAFE
1137 * Fetch a byte (sword, word) from user memory
1141 movl $fusufault,PCB_ONFAULT(%ecx)
1142 movl 4(%esp),%edx /* from */
1144 cmpl $VM_MAXUSER_ADDRESS-4,%edx /* verify address is valid */
1148 movl $0,PCB_ONFAULT(%ecx)
1152 * These two routines are called from the profiling code, potentially
1153 * at interrupt time. If they fail, that's okay, good things will
1154 * happen later. Fail all the time for now - until the trap code is
1155 * able to deal with this.
1167 movl $fusufault,PCB_ONFAULT(%ecx)
1170 cmpl $VM_MAXUSER_ADDRESS-2,%edx
1174 movl $0,PCB_ONFAULT(%ecx)
1182 movl $fusufault,PCB_ONFAULT(%ecx)
1185 cmpl $VM_MAXUSER_ADDRESS-1,%edx
1189 movl $0,PCB_ONFAULT(%ecx)
1196 movl %eax,PCB_ONFAULT(%ecx)
1201 * su{byte,sword,word} - MP SAFE (if not I386_CPU)
1203 * Write a byte (word, longword) to user memory
1207 movl $fusufault,PCB_ONFAULT(%ecx)
1210 #if defined(I386_CPU)
1212 #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1213 cmpl $CPUCLASS_386,_cpu_class
1214 jne 2f /* we only have to set the right segment selector */
1215 #endif /* I486_CPU || I586_CPU || I686_CPU */
1217 /* XXX - page boundary crossing is still not handled */
1222 leal _PTmap(%edx),%ecx
1225 testb $PG_V,_PTmap(%ecx) /* PTE page must be valid */
1227 movb _PTmap(%edx),%dl
1228 andb $PG_V|PG_RW|PG_U,%dl /* page must be valid and user writable */
1229 cmpb $PG_V|PG_RW|PG_U,%dl
1233 /* simulate a trap */
1236 popl %edx /* remove junk parameter from stack */
1244 cmpl $VM_MAXUSER_ADDRESS-4,%edx /* verify address validity */
1251 movl %eax,PCB_ONFAULT(%ecx)
1255 * susword - MP SAFE (if not I386_CPU)
1259 movl $fusufault,PCB_ONFAULT(%ecx)
1262 #if defined(I386_CPU)
1264 #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1265 cmpl $CPUCLASS_386,_cpu_class
1267 #endif /* I486_CPU || I586_CPU || I686_CPU */
1269 /* XXX - page boundary crossing is still not handled */
1274 leal _PTmap(%edx),%ecx
1277 testb $PG_V,_PTmap(%ecx) /* PTE page must be valid */
1279 movb _PTmap(%edx),%dl
1280 andb $PG_V|PG_RW|PG_U,%dl /* page must be valid and user writable */
1281 cmpb $PG_V|PG_RW|PG_U,%dl
1285 /* simulate a trap */
1288 popl %edx /* remove junk parameter from stack */
1296 cmpl $VM_MAXUSER_ADDRESS-2,%edx /* verify address validity */
1302 movl _curpcb,%ecx /* restore trashed register */
1303 movl %eax,PCB_ONFAULT(%ecx)
1307 * su[i]byte - MP SAFE (if not I386_CPU)
1312 movl $fusufault,PCB_ONFAULT(%ecx)
1315 #if defined(I386_CPU)
1317 #if defined(I486_CPU) || defined(I586_CPU) || defined(I686_CPU)
1318 cmpl $CPUCLASS_386,_cpu_class
1320 #endif /* I486_CPU || I586_CPU || I686_CPU */
1326 leal _PTmap(%edx),%ecx
1329 testb $PG_V,_PTmap(%ecx) /* PTE page must be valid */
1331 movb _PTmap(%edx),%dl
1332 andb $PG_V|PG_RW|PG_U,%dl /* page must be valid and user writable */
1333 cmpb $PG_V|PG_RW|PG_U,%dl
1337 /* simulate a trap */
1340 popl %edx /* remove junk parameter from stack */
1348 cmpl $VM_MAXUSER_ADDRESS-1,%edx /* verify address validity */
1354 movl _curpcb,%ecx /* restore trashed register */
1355 movl %eax,PCB_ONFAULT(%ecx)
1359 * copyinstr(from, to, maxlen, int *lencopied) - MP SAFE
1361 * copy a string from from to to, stop when a 0 character is reached.
1362 * return ENAMETOOLONG if string is longer than maxlen, and
1363 * EFAULT on protection violations. If lencopied is non-zero,
1364 * return the actual length in *lencopied.
1370 movl $cpystrflt,PCB_ONFAULT(%ecx)
1372 movl 12(%esp),%esi /* %esi = from */
1373 movl 16(%esp),%edi /* %edi = to */
1374 movl 20(%esp),%edx /* %edx = maxlen */
1376 movl $VM_MAXUSER_ADDRESS,%eax
1378 /* make sure 'from' is within bounds */
1382 /* restrict maxlen to <= VM_MAXUSER_ADDRESS-from */
1400 /* Success -- 0 byte reached */
1405 /* edx is zero - return ENAMETOOLONG or EFAULT */
1406 cmpl $VM_MAXUSER_ADDRESS,%esi
1409 movl $ENAMETOOLONG,%eax
1416 /* set *lencopied and return %eax */
1418 movl $0,PCB_ONFAULT(%ecx)
1432 * copystr(from, to, maxlen, int *lencopied) - MP SAFE
1438 movl 12(%esp),%esi /* %esi = from */
1439 movl 16(%esp),%edi /* %edi = to */
1440 movl 20(%esp),%edx /* %edx = maxlen */
1451 /* Success -- 0 byte reached */
1456 /* edx is zero -- return ENAMETOOLONG */
1457 movl $ENAMETOOLONG,%eax
1460 /* set *lencopied and return %eax */
1482 cld /* compare forwards */
1501 * Handling of special 386 registers and descriptor tables etc
1503 /* void lgdt(struct region_descriptor *rdp); */
1505 /* reload the descriptor table */
1509 /* flush the prefetch q */
1513 /* reload "stale" selectors */
1524 /* reload code selector by turning return into intersegmental return */
1531 * void lidt(struct region_descriptor *rdp);
1539 * void lldt(u_short sel)
1546 * void ltr(u_short sel)
1552 /* ssdtosd(*ssdp,*sdp) */
1589 /* void load_cr3(caddr_t cr3) */
1591 #if defined(SWTCH_OPTIM_STATS)
1592 incl _tlb_flush_count
1603 /* void load_cr4(caddr_t cr4) */
1609 /* void load_dr6(u_int dr6) */
1615 /* void reset_dbregs() */
1618 movl %eax,%dr7 /* disable all breapoints first */
1626 /*****************************************************************************/
1627 /* setjump, longjump */
1628 /*****************************************************************************/
1632 movl %ebx,(%eax) /* save ebx */
1633 movl %esp,4(%eax) /* save esp */
1634 movl %ebp,8(%eax) /* save ebp */
1635 movl %esi,12(%eax) /* save esi */
1636 movl %edi,16(%eax) /* save edi */
1637 movl (%esp),%edx /* get rta */
1638 movl %edx,20(%eax) /* save eip */
1639 xorl %eax,%eax /* return(0); */
1644 movl (%eax),%ebx /* restore ebx */
1645 movl 4(%eax),%esp /* restore esp */
1646 movl 8(%eax),%ebp /* restore ebp */
1647 movl 12(%eax),%esi /* restore esi */
1648 movl 16(%eax),%edi /* restore edi */
1649 movl 20(%eax),%edx /* get rta */
1650 movl %edx,(%esp) /* put in return frame */
1651 xorl %eax,%eax /* return(1); */
1656 * Support for BB-profiling (gcc -a). The kernbb program will extract
1657 * the data from the kernel.
1667 NON_GPROF_ENTRY(__bb_init_func)
1673 .byte 0xc3 /* avoid macro for `ret' */